-
Notifications
You must be signed in to change notification settings - Fork 8
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Generate baseline data script for operator with single input #78
base: main
Are you sure you want to change the base?
Changes from all commits
0265141
a7105c8
991a9f0
0496091
6741480
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -25,7 +25,7 @@ jobs: | |
|
||
- uses: actions/[email protected] | ||
with: | ||
node-version: '14.x' | ||
node-version: '18.x' | ||
|
||
- run: npm install | ||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,28 @@ | ||
How to generate test-data file for WPT tests? | ||
|
||
Step 1: Please prepare resources JSON file which includes those tests | ||
to test each operator of WebNN API without specified inputs and outputs | ||
data. | ||
|
||
Step 2: Implement generate test-data scripts | ||
|
||
Step 3: Execute command for generating test-data files for WPT tests. | ||
```shell | ||
node gen-operator-with-single-input.js resources\<operator>.json | ||
``` | ||
|
||
Take an example for softsign operator tests: | ||
|
||
```shell | ||
node gen-operator-with-single-input.js resources\softsign.json | ||
``` | ||
|
||
then, you can find two generated folders named 'test-data' and | ||
'test-data-wpt'. There're raw test data as being | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. They're? |
||
./test-data/softsign-data.json, | ||
and raw WPT test-data file as being ./test-data-wpt/softsign.json. | ||
|
||
|
||
You can manually modify some test data in | ||
./test-data/softsign-data.json, | ||
then execute Step 3, to update ./test-data-wpt/softsign.json. |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,82 @@ | ||
'use strict'; | ||
|
||
/* eslint guard-for-in: 0 */ | ||
|
||
import path from 'path'; | ||
import {softsign} from '../../src/softsign.js'; | ||
import {gelu} from '../../src/gelu.js'; | ||
import {Tensor} from '../../src/lib/tensor.js'; | ||
import {utils} from './utils.js'; | ||
|
||
(() => { | ||
function computeBySingleInput(operatorName, input, options = {}) { | ||
const operatorMappingDict = { | ||
'gelu': gelu, | ||
'softsign': softsign, | ||
}; | ||
const inputTensor = new Tensor(input.shape, input.data); | ||
const outputTensor = | ||
operatorMappingDict[operatorName](inputTensor, options); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. (minor style) Hmm, code is more readable when it's not awkwardly split like this for going just one column past 80.
There's plenty of screen space on monitors in the year 2024, and I'm curious why 80 is being applied here when the root eslint is set to 100 columns? https://github.com/webmachinelearning/webnn-baseline/blob/main/.eslintrc.js#L17 In any case, it seems inconsistent. |
||
return outputTensor.data; | ||
} | ||
|
||
const testDataFileName = path.basename(process.argv[2]); | ||
const operatorString = | ||
testDataFileName.slice(0, testDataFileName.indexOf('.json')); | ||
const savedDataFile = path.join( | ||
path.dirname(process.argv[1]), 'test-data', | ||
`${operatorString}-data.json`); | ||
const jsonDict = utils.readJsonFile(process.argv[2]); | ||
const inputsDataInfo = jsonDict.inputsData; | ||
const inputsDataRange = jsonDict.inputsDataRange; | ||
const toSaveDataDict = utils.prepareInputsData( | ||
inputsDataInfo, savedDataFile, inputsDataRange.min, inputsDataRange.max); | ||
toSaveDataDict['expectedData'] = {}; | ||
const tests = jsonDict.tests; | ||
const wptTests = JSON.parse(JSON.stringify(tests)); | ||
for (const test of tests) { | ||
console.log(`name ${test.name}`); | ||
const precisionDataInput = utils.getPrecisionDataFromDataDict( | ||
toSaveDataDict['inputsData'], test.inputs.input.data, | ||
test.inputs.input.type); | ||
const input = {shape: test.inputs.input.shape, data: precisionDataInput}; | ||
const result = computeBySingleInput(operatorString, input, test.options); | ||
toSaveDataDict['expectedData'][test.expected.data] = | ||
utils.getPrecisionData(result, test.expected.type); | ||
} | ||
|
||
utils.writeJsonFile(toSaveDataDict, savedDataFile); | ||
console.log(`[ Done ] Saved test data into ${savedDataFile}.`); | ||
|
||
const wptConformanceTestsDict = {tests: []}; | ||
for (const test of wptTests) { | ||
// update inputs data | ||
for (const inputName in test.inputs) { | ||
test.inputs[inputName].data = | ||
typeof test.inputs[inputName].data === 'number' || | ||
(typeof test.inputs[inputName].data === 'object' && | ||
typeof test.inputs[inputName].data[0] === 'number') ? | ||
test.inputs[inputName].data : | ||
utils.getPrecisionDataFromDataDict( | ||
toSaveDataDict['inputsData'], test.inputs[inputName].data, | ||
test.inputs[inputName].type); | ||
} | ||
// update weights (scale, bias, and etc.) data of options | ||
if (test.options) { | ||
for (const optionName in test.options) { | ||
if (test.options[optionName].data) { | ||
test.options[optionName].data = | ||
toSaveDataDict['inputsData'][test.options[optionName].data]; | ||
} | ||
} | ||
} | ||
// update expected data | ||
test.expected.data = toSaveDataDict['expectedData'][test.expected.data]; | ||
wptConformanceTestsDict.tests.push(test); | ||
} | ||
const savedWPTDataFile = path.join( | ||
path.dirname(process.argv[1]), 'test-data-wpt', `${operatorString}.json`); | ||
utils.writeJsonFile(wptConformanceTestsDict, savedWPTDataFile); | ||
|
||
console.log(`[ Done ] Generate test data file for WPT tests.`); | ||
})(); |
Original file line number | Diff line number | Diff line change | ||||
---|---|---|---|---|---|---|
@@ -0,0 +1,210 @@ | ||||||
{ | ||||||
"tests": [ | ||||||
{ | ||||||
"name": "gelu float32 0D scalar", | ||||||
"inputs": { | ||||||
"input": { | ||||||
"shape": [], | ||||||
"data": "float64DataScalar", | ||||||
"type": "float32" | ||||||
} | ||||||
}, | ||||||
"expected": { | ||||||
"name": "output", | ||||||
"shape": [], | ||||||
"data": "float32DataScalar", | ||||||
"type": "float32" | ||||||
} | ||||||
}, | ||||||
{ | ||||||
"name": "gelu float16 0D scalar", | ||||||
"inputs": { | ||||||
"input": { | ||||||
"shape": [], | ||||||
"data": "float64DataScalar", | ||||||
"type": "float16" | ||||||
} | ||||||
}, | ||||||
"expected": { | ||||||
"name": "output", | ||||||
"shape": [], | ||||||
"data": "float16DataScalar", | ||||||
"type": "float16" | ||||||
} | ||||||
}, | ||||||
{ | ||||||
"name": "gelu float32 1D tensor", | ||||||
"inputs": { | ||||||
"input": { | ||||||
"shape": [24], | ||||||
"data": "float64Data", | ||||||
"type": "float32" | ||||||
} | ||||||
}, | ||||||
"expected": { | ||||||
"name": "output", | ||||||
"shape": [24], | ||||||
"data": "float32Data", | ||||||
"type": "float32" | ||||||
} | ||||||
}, | ||||||
{ | ||||||
"name": "gelu float16 1D tensor", | ||||||
"inputs": { | ||||||
"input": { | ||||||
"shape": [24], | ||||||
"data": "float64Data", | ||||||
"type": "float16" | ||||||
} | ||||||
}, | ||||||
"expected": { | ||||||
"name": "output", | ||||||
"shape": [24], | ||||||
"data": "float16Data", | ||||||
"type": "float16" | ||||||
} | ||||||
}, | ||||||
{ | ||||||
"name": "gelu float32 2D tensor", | ||||||
"inputs": { | ||||||
"input": { | ||||||
"shape": [4, 6], | ||||||
"data": "float64Data", | ||||||
"type": "float32" | ||||||
} | ||||||
}, | ||||||
"expected": { | ||||||
"name": "output", | ||||||
"shape": [4, 6], | ||||||
"data": "float32Data", | ||||||
"type": "float32" | ||||||
} | ||||||
}, | ||||||
{ | ||||||
"name": "gelu float16 2D tensor", | ||||||
"inputs": { | ||||||
"input": { | ||||||
"shape": [4, 6], | ||||||
"data": "float64Data", | ||||||
"type": "float16" | ||||||
} | ||||||
}, | ||||||
"expected": { | ||||||
"name": "output", | ||||||
"shape": [4, 6], | ||||||
"data": "float16Data", | ||||||
"type": "float16" | ||||||
} | ||||||
}, | ||||||
{ | ||||||
"name": "gelu float32 3D tensor", | ||||||
"inputs": { | ||||||
"input": { | ||||||
"shape": [2, 3, 4], | ||||||
"data": "float64Data", | ||||||
"type": "float32" | ||||||
} | ||||||
}, | ||||||
"expected": { | ||||||
"name": "output", | ||||||
"shape": [2, 3, 4], | ||||||
"data": "float32Data", | ||||||
"type": "float32" | ||||||
} | ||||||
}, | ||||||
{ | ||||||
"name": "gelu float16 3D tensor", | ||||||
"inputs": { | ||||||
"input": { | ||||||
"shape": [2, 3, 4], | ||||||
"data": "float64Data", | ||||||
"type": "float16" | ||||||
} | ||||||
}, | ||||||
"expected": { | ||||||
"name": "output", | ||||||
"shape": [2, 3, 4], | ||||||
"data": "float16Data", | ||||||
"type": "float16" | ||||||
} | ||||||
}, | ||||||
{ | ||||||
"name": "gelu float32 4D tensor", | ||||||
"inputs": { | ||||||
"input": { | ||||||
"shape": [2, 2, 2, 3], | ||||||
"data": "float64Data", | ||||||
"type": "float32" | ||||||
} | ||||||
}, | ||||||
"expected": { | ||||||
"name": "output", | ||||||
"shape": [2, 2, 2, 3], | ||||||
"data": "float32Data", | ||||||
"type": "float32" | ||||||
} | ||||||
}, | ||||||
{ | ||||||
"name": "gelu float16 4D tensor", | ||||||
"inputs": { | ||||||
"input": { | ||||||
"shape": [2, 2, 2, 3], | ||||||
"data": "float64Data", | ||||||
"type": "float16" | ||||||
} | ||||||
}, | ||||||
"expected": { | ||||||
"name": "output", | ||||||
"shape": [2, 2, 2, 3], | ||||||
"data": "float16Data", | ||||||
"type": "float16" | ||||||
} | ||||||
}, | ||||||
{ | ||||||
"name": "gelu float32 5D tensor", | ||||||
"inputs": { | ||||||
"input": { | ||||||
"shape": [2, 1, 4, 1, 3], | ||||||
"data": "float64Data", | ||||||
"type": "float32" | ||||||
} | ||||||
}, | ||||||
"expected": { | ||||||
"name": "output", | ||||||
"shape": [2, 1, 4, 1, 3], | ||||||
"data": "float32Data", | ||||||
"type": "float32" | ||||||
} | ||||||
}, | ||||||
{ | ||||||
"name": "gelu float16 5D tensor", | ||||||
"inputs": { | ||||||
"input": { | ||||||
"shape": [2, 1, 4, 1, 3], | ||||||
"data": "float64Data", | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
Typo? The output below is
|
||||||
"type": "float16" | ||||||
} | ||||||
}, | ||||||
"expected": { | ||||||
"name": "output", | ||||||
"shape": [2, 1, 4, 1, 3], | ||||||
"data": "float16Data", | ||||||
"type": "float16" | ||||||
} | ||||||
} | ||||||
], | ||||||
"inputsData": { | ||||||
"float64DataScalar": { | ||||||
"shape": [1], | ||||||
"type": "float64" | ||||||
}, | ||||||
"float64Data": { | ||||||
"shape": [24], | ||||||
"type": "float64" | ||||||
} | ||||||
}, | ||||||
"inputsDataRange": { | ||||||
"max": 1, | ||||||
"min": -1 | ||||||
} | ||||||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Then you ...