Unreviewed changes
4 is the latest approved patch-set.
The change was submitted with unreviewed changes in the following files:
```
The name of the file: third_party/blink/web_tests/external/wpt/webnn/conformance_tests/tensor.https.any.js
Insertions: 24, Deletions: 63.
@@ -1421,12 +1421,8 @@
// Check if WebNN interop is supported.
try {
- let mlTensor = await mlContext.createExportableTensor(
- {
- dataType: dataType,
- shape: shape,
- },
- gpuDevice);
+ let mlTensor =
+ await mlContext.createExportableTensor({dataType, shape}, gpuDevice);
await mlContext.exportToGPU(mlTensor);
} catch (e) {
if (e.name === 'NotSupportedError') {
@@ -1438,7 +1434,7 @@
// Construct a simple graph: OUTPUT = LHS + RHS.
const mlBuilder = new MLGraphBuilder(mlContext);
- const mlOperandDescriptor = {dataType: dataType, shape};
+ const mlOperandDescriptor = {dataType, shape};
const lhsOperand = mlBuilder.input('lhs', mlOperandDescriptor);
const rhsOperand = mlBuilder.input('rhs', mlOperandDescriptor);
mlGraph = await mlBuilder.build(
@@ -1472,10 +1468,7 @@
return;
}
- const mlTensorDescriptor = {
- dataType: dataType,
- shape: shape,
- };
+ const mlTensorDescriptor = {dataType, shape};
const mlTensor = await mlContext.createExportableTensor(mlTensorDescriptor,
gpuDevice);
@@ -1493,10 +1486,7 @@
return;
}
- const mlTensor = await mlContext.createTensor({
- dataType: dataType,
- shape: shape,
- });
+ const mlTensor = await mlContext.createTensor({dataType, shape});
await promise_rejects_js(t, TypeError, mlContext.exportToGPU(mlTensor));
}, `${testName} / export wrong tensor`);
@@ -1510,8 +1500,8 @@
const elementSize = typedArray.BYTES_PER_ELEMENT;
const shape = [maxBufferSizeOOB / elementSize];
- const mlTensor = await mlContext.createExportableTensor(
- {dataType: dataType, shape: shape}, gpuDevice);
+ const mlTensor =
+ await mlContext.createExportableTensor({dataType, shape}, gpuDevice);
await mlContext.exportToGPU(mlTensor);
}, `${testName} / export big tensor`)
@@ -1522,7 +1512,7 @@
}
const mlTensorDescriptor =
- {dataType: dataType, shape: shape, readable: true, writable: true};
+ {dataType, shape, readable: true, writable: true};
let mlTensor = await mlContext.createExportableTensor(mlTensorDescriptor, gpuDevice);
const inputData = new typedArray(sizeOfShape(shape)).fill(1.0);
@@ -1539,11 +1529,7 @@
return;
}
- const mlTensorDescriptor = {
- dataType: dataType,
- shape: shape,
- writable: true
- };
+ const mlTensorDescriptor = {dataType, shape, writable: true};
let mlTensor = await mlContext.createExportableTensor(mlTensorDescriptor, gpuDevice);
@@ -1561,12 +1547,8 @@
return;
}
- const mlTensor = await mlContext.createExportableTensor(
- {
- dataType: dataType,
- shape: shape,
- },
- gpuDevice);
+ const mlTensor =
+ await mlContext.createExportableTensor({dataType, shape}, gpuDevice);
await mlContext.exportToGPU(mlTensor);
assert_throws_js(
TypeError,
@@ -1579,12 +1561,8 @@
return;
}
- const mlTensor = await mlContext.createExportableTensor(
- {
- dataType: dataType,
- shape: shape,
- },
- gpuDevice);
+ const mlTensor =
+ await mlContext.createExportableTensor({dataType, shape}, gpuDevice);
// Second call rejects because the first export is still pending and multiple
// exports aren’t allowed.
@@ -1604,13 +1582,13 @@
// Initialize the tensor buffers from WebNN.
let mlTensorInput = await mlContext.createExportableTensor(
- {dataType: dataType, shape: shape, writable: true}, gpuDevice);
+ {dataType, shape, writable: true}, gpuDevice);
const inputData1 = new typedArray(sizeOfShape(shape)).fill(1.0);
mlContext.writeTensor(mlTensorInput, inputData1);
- let mlTensorOutput = await mlContext.createExportableTensor(
- {dataType: dataType, shape: shape}, gpuDevice);
+ let mlTensorOutput =
+ await mlContext.createExportableTensor({dataType, shape}, gpuDevice);
let gpuTensorBufferInput = await mlContext.exportToGPU(mlTensorInput);
let gpuTensorBufferOutput = await mlContext.exportToGPU(mlTensorOutput);
@@ -1644,13 +1622,13 @@
// Initialize the tensor buffers from WebNN.
let mlTensorInput = await mlContext.createExportableTensor(
- {dataType: dataType, shape: shape, writable: true}, gpuDevice);
+ {dataType, shape, writable: true}, gpuDevice);
const inputData = new typedArray(sizeOfShape(shape)).fill(1.0);
mlContext.writeTensor(mlTensorInput, inputData);
let mlTensorOutput = await mlContext.createExportableTensor(
- {dataType: dataType, shape: shape, readable: true}, gpuDevice);
+ {dataType, shape, readable: true}, gpuDevice);
let gpuTensorBufferInput = await mlContext.exportToGPU(mlTensorInput);
let gpuTensorBufferOutput = await mlContext.exportToGPU(mlTensorOutput);
@@ -1678,18 +1656,13 @@
// Initialize the tensor buffers from WebNN.
let mlTensorInput = await mlContext.createExportableTensor(
- {
- dataType: dataType,
- shape: shape,
- writable: true,
- },
- gpuDevice);
+ {dataType, shape, writable: true}, gpuDevice);
const inputData = new typedArray(sizeOfShape(shape)).fill(1.0);
mlContext.writeTensor(mlTensorInput, inputData);
- let mlTensorOutput = await mlContext.createExportableTensor(
- {dataType: dataType, shape: shape}, gpuDevice);
+ let mlTensorOutput =
+ await mlContext.createExportableTensor({dataType, shape}, gpuDevice);
let gpuTensorBufferInput = await mlContext.exportToGPU(mlTensorInput);
let gpuTensorBufferOutput = await mlContext.exportToGPU(mlTensorOutput);
@@ -1730,12 +1703,7 @@
let anotherMLContext = await navigator.ml.createContext(contextOptions);
let mlTensor = await anotherMLContext.createExportableTensor(
- {
- dataType: dataType,
- shape: shape,
- writable: true,
- },
- gpuDevice);
+ {dataType, shape, writable: true}, gpuDevice);
const inputData = new typedArray(sizeOfShape(shape)).fill(1.0);
anotherMLContext.writeTensor(mlTensor, inputData);
@@ -1758,13 +1726,7 @@
let anotherMLContext = await navigator.ml.createContext(contextOptions);
let mlTensor = await anotherMLContext.createExportableTensor(
- {
- dataType: dataType,
- shape: shape,
- readable: true,
- writable: true,
- },
- anotherGPUDevice);
+ {dataType, shape, readable: true, writable: true}, anotherGPUDevice);
const inputData = new typedArray(sizeOfShape(shape)).fill(1.0);
anotherMLContext.writeTensor(mlTensor, inputData);
@@ -1789,8 +1751,7 @@
let anotherMLContext = await navigator.ml.createContext(contextOptions);
let mlTensor = await anotherMLContext.createExportableTensor(
- {dataType: dataType, shape: shape, readable: true, writable: true},
- anotherGPUDevice);
+ {dataType, shape, readable: true, writable: true}, anotherGPUDevice);
const inputData = new typedArray(sizeOfShape(shape)).fill(1.0);
anotherMLContext.writeTensor(mlTensor, inputData);
```