diff --git a/index.bs b/index.bs index dcaace27..de96e17f 100644 --- a/index.bs +++ b/index.bs @@ -2903,6 +2903,94 @@ partial dictionary MLOpSupportLimits { 1. Return |output|. +### cumulativeSum ### {#api-mlgraphbuilder-cumulativesum} +Compute the accumulated sum of a series of values along the given axis, either including or excluding the current value. + + + + + + + + + + + + + + + + + + + + + + + +
Constraints for {{MLGraphBuilder/cumulativeSum()}}
operand[=/allowed data types=][=/allowed ranks=]
{{input}}[=/any data type|any=][=/any rank|N=]
*output*[=/same type as|same as=] {{input}}[=/same rank as|same as=] {{input}}
+ +{{MLCumulativeSumOptions}} has the following members: +
+ : exclusive + :: + Whether to include or exclude the current value in the output, meaning inclusive prefix sum or exclusive prefix sum [[Prefix-sum]]. Given input *[1,2,3,4]*, inclusive addition would yield an output of *[1,3,6,10]* whereas exclusive would yield *[0,1,3,6]*. The default is inclusive. + + : reversed + :: + Whether to reverse the summation direction along the active axis to instead start from the high coordinate to low coordinate. Given input *[1,2,3,4]*, inclusive forward addition would yield an output of *[1,3,6,10]* whereas inclusive backward summation would yield *[10,9,7,4]*. The default is forward. +
+ +
+ **Arguments:** + - input: an {{MLOperand}}. The input tensor. + - axis: an {{unsigned long}} scalar. The axis the summation will be performed on. Its value must be in the range [0, N-1] where N is *input*'s [=MLOperand/rank=]. + - options: an {{MLCumulativeSumOptions}}. Specifies the optional parameters of the operation. + + **Returns:** + - an {{MLOperand}}. The output tensor of the same shape as *input*. +
+ +{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/cumulativeSum()}}: +
+ : cumulativeSum + :: Support limits for operator {{MLGraphBuilder/cumulativeSum()}}. +
+ +
+ + The cumulativeSum(|input|, |axis|, |options|) method steps are: + + 1. If [=this=] [=MLGraphBuilder/can not build=], then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |input| returns false, then [=exception/throw=] a {{TypeError}}. + 1. If |input|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-cumulativesum)), then [=exception/throw=] a {{TypeError}}. + 1. If |axis| is greater than or equal to |input|'s [=MLOperand/rank=], then [=exception/throw=] a {{TypeError}}. + 1. *Make graph connections:* + 1. Let |output| be the result of [=copying an MLOperand=] given |input|. + 1. Let |operator| be an [=operator=] for the "cumulativeSum" operation and |options|. + 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. + 1. Set |operator|'s [=operator/input=] to |input|. + 1. Set |operator|'s [=operator/output=] to |output|. + 1. Return |output|. +
+ + ### Element-wise binary operations ### {#api-mlgraphbuilder-binary} Compute the element-wise binary addition, subtraction, multiplication, division, power, maximum and minimum of the two input tensors. @@ -3078,6 +3166,9 @@ partial interface MLGraphBuilder { MLOperand equal(MLOperand a, MLOperand b, optional MLOperatorOptions options = {}); + MLOperand notEqual(MLOperand a, + MLOperand b, + optional MLOperatorOptions options = {}); MLOperand greater(MLOperand a, MLOperand b, optional MLOperatorOptions options = {}); @@ -3091,6 +3182,15 @@ partial interface MLGraphBuilder { MLOperand b, optional MLOperatorOptions options = {}); MLOperand logicalNot(MLOperand a, optional MLOperatorOptions options = {}); + MLOperand logicalAnd(MLOperand a, + MLOperand b, + optional MLOperatorOptions options = {}); + MLOperand logicalOr(MLOperand a, + MLOperand b, + optional MLOperatorOptions options = {}); + MLOperand logicalXor(MLOperand a, + MLOperand b, + optional MLOperatorOptions options = {}); }; dictionary MLLogicalNotSupportLimits { @@ -3100,15 +3200,19 @@ dictionary MLLogicalNotSupportLimits { partial dictionary MLOpSupportLimits { MLBinarySupportLimits equal; + MLBinarySupportLimits notEqual; MLBinarySupportLimits greater; MLBinarySupportLimits greaterOrEqual; MLBinarySupportLimits lesser; MLBinarySupportLimits lesserOrEqual; MLLogicalNotSupportLimits logicalNot; + MLLogicalNotSupportLimits logicalAnd; + MLLogicalNotSupportLimits logicalOr; + MLLogicalNotSupportLimits logicalXor; }; -
+
**Arguments:** - a: an {{MLOperand}}. The first input tensor. - b: an {{MLOperand}}. The second input tensor when specified. @@ -3117,7 +3221,7 @@ partial dictionary MLOpSupportLimits { **Returns:** an {{MLOperand}}. The output tensor that contains the result of element-wise comparison of the two input tensors.
- +
@@ -3156,6 +3260,8 @@ partial dictionary MLOpSupportLimits {
: equal :: Support limits for operator {{MLGraphBuilder/equal()}}. + : notEqual + :: Support limits for operator {{MLGraphBuilder/notEqual()}}. : greater :: Support limits for operator {{MLGraphBuilder/greater()}}. : greaterOrEqual @@ -3166,42 +3272,53 @@ partial dictionary MLOpSupportLimits { :: Support limits for operator {{MLGraphBuilder/lesserOrEqual()}}. : logicalNot :: Support limits for operator {{MLGraphBuilder/logicalNot()}}. + : logicalAnd + :: Support limits for operator {{MLGraphBuilder/logicalAnd()}}. + : logicalOr + :: Support limits for operator {{MLGraphBuilder/logicalOr()}}. + : logicalXor + :: Support limits for operator {{MLGraphBuilder/logicalXor()}}.
**Operation types:** - *equal*: Compare if the values of the two input tensors are equal, element-wise. + - *notEqual*: Compare if the values of the two input tensors are not equal, element-wise. - *greater*: Compare if the values of the first input tensor is greater, element-wise. - *greaterOrEqual*: Compare if the values of the first input tensor is greater or equal, element-wise. - *lesser*: Compare if the values of the first input tensor is lesser, element-wise. - *lesserOrEqual*: Compare if the values of the first input tensor is lesser or equal, element-wise. - *logicalNot*: Invert the values of the input tensor to values 0 or 1, element-wise. Specifically, when the input value is non-zero, invert it to 0. Conversely, for a zero input value, invert it to 1. + - *logicalAnd*: Compute the logical *and* of the two input tensors, element-wise, treating any non-zero value as true and returning elements of 0 or 1. + - *logicalOr*: Compute the logical *or* of the two input tensors, element-wise, treating any non-zero value as true and returning elements of 0 or 1. + - *logicalXor*: Compute the logical *xor* of the two input tensors, element-wise, treating any non-zero value as true and returning elements of 0 or 1.
-Although operations {{MLGraphBuilder/greaterOrEqual()}} and {{MLGraphBuilder/lesserOrEqual()}} can each be implemented in terms of operations {{MLGraphBuilder/logicalNot()}}, {{MLGraphBuilder/lesser()}}, and {{MLGraphBuilder/greater()}} in other words `builder.greaterOrEqual(a, b)` is `builder.logicalNot(builder.lesser(a, b))`, they are specifically defined to handle NaN cases and for performance reason to avoid double comparisons. +Although operations {{MLGraphBuilder/greaterOrEqual()}} and {{MLGraphBuilder/lesserOrEqual()}} can each be implemented in terms of operations {{MLGraphBuilder/logicalNot()}}, {{MLGraphBuilder/lesser()}}, and {{MLGraphBuilder/greater()}} (in other words `builder.greaterOrEqual(a, b)` is `builder.logicalNot(builder.lesser(a, b))`), they are specifically defined to handle NaN cases and for performance reason to avoid double comparisons.
To create element-wise logical operation given [=string=] |op|, {{MLOperand}} |a|, an optional {{MLOperand}} |b|, and {{MLOperatorOptions}} |options|, run the following steps: - 1. [=Assert=]: |op| is one of "equal", "greater", "greaterOrEqual", "lesser", "lesserOrEqual", "logicalNot". + 1. [=Assert=]: |op| is one of "equal", "notEqual", "greater", "greaterOrEqual", "lesser", "lesserOrEqual", "logicalNot", "logicalAnd", "logicalOr", "logicalXor". 1. If [=this=] [=MLGraphBuilder/can not build=], then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. - 1. If |op| is "logicalNot": - 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |a| returns false, then [=exception/throw=] a {{TypeError}}. - 1. If |a|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"uint8"}}, then [=exception/throw=] a {{TypeError}}. - 1. Let |outputShape| be a [=list/clone=] of |a|'s [=MLOperand/shape=]. - 1. Otherwise: - 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |a| and |b| returns false, then [=exception/throw=] a {{TypeError}}. + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |a| returns false, then [=exception/throw=] a {{TypeError}}. + 1. If |b| is passed: + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |b| returns false, then [=exception/throw=] a {{TypeError}}. 1. If |a|'s [=MLOperand/dataType=] is not equal to |b|'s [=MLOperand/dataType=], then [=exception/throw=] a {{TypeError}}. 1. Let |outputShape| be the result of [=bidirectionally broadcasting=] |a|'s [=MLOperand/shape=] and |b|'s [=MLOperand/shape=]. If that returns failure, then [=exception/throw=] a {{TypeError}}. + 1. Otherwise: + 1. Let |outputShape| be a [=list/clone=] of |a|'s [=MLOperand/shape=]. + 1. If |op| is one of "logicalNot", "logicalAnd", "logicalOr", "logicalXor": + 1. If |a|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"uint8"}}, then [=exception/throw=] a {{TypeError}}. 1. Let |descriptor| be the result of [=creating an MLOperandDescriptor=] given {{MLOperandDataType/"uint8"}} and |outputShape|. 1. *Make graph connections:* 1. Let |output| be the result of [=creating an MLOperand=] given [=this=] and |descriptor|. - 1. Let |operator| be an [=operator=] for the |op| operation, given |a| and (if |op| is not "logicalNot") |b|, and |options|. + 1. Let |operator| be an [=operator=] for the |op| operation, given |a| and (if |b| is passed) |b|, and |options|. 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. - 1. Set |operator|'s [=operator/inputs=] to |a| and (if |op| is anything other than "logicalNot") |b|. + 1. Set |operator|'s [=operator/inputs=] to |a| and (if |b| is passed) |b|. 1. Set |operator|'s [=operator/output=] to |output|. 1. Return |output|.
@@ -3217,6 +3334,13 @@ Although operations {{MLGraphBuilder/greaterOrEqual()}} and {{MLGraphBuilder/les 1. Return |output|. +
+ The notEqual(|a|, |b|, |options|) method steps are: + 1. Let |output| be the result of running the [=MLGraphBuilder/element-wise-logical-op | create element-wise logical operation=] given "notEqual", |a|, |b|, and |options|. + 1. If that [=exception/throws=] an error, then re-[=exception/throw=] the error. + 1. Return |output|. +
+
The greater(|a|, |b|, |options|) method steps are: 1. Let |output| be the result of running the [=MLGraphBuilder/element-wise-logical-op | create element-wise logical operation=] given "greater", |a|, |b|, and |options|. @@ -3251,6 +3375,27 @@ Although operations {{MLGraphBuilder/greaterOrEqual()}} and {{MLGraphBuilder/les 1. If that [=exception/throws=] an error, then re-[=exception/throw=] the error. 1. Return |output|.
+ +
+ The logicalAnd(|a|, |b|, |options|) method steps are: + 1. Let |output| be the result of running the [=MLGraphBuilder/element-wise-logical-op | create element-wise logical operation=] given "logicalAnd", |a|, |b|, and |options|. + 1. If that [=exception/throws=] an error, then re-[=exception/throw=] the error. + 1. Return |output|. +
+ +
+ The logicalOr(|a|, |b|, |options|) method steps are: + 1. Let |output| be the result of running the [=MLGraphBuilder/element-wise-logical-op | create element-wise logical operation=] given "logicalOr", |a|, |b|, and |options|. + 1. If that [=exception/throws=] an error, then re-[=exception/throw=] the error. + 1. Return |output|. +
+ +
+ The logicalXor(|a|, |b|, |options|) method steps are: + 1. Let |output| be the result of running the [=MLGraphBuilder/element-wise-logical-op | create element-wise logical operation=] given "logicalXor", |a|, |b|, and |options|. + 1. If that [=exception/throws=] an error, then re-[=exception/throw=] the error. + 1. Return |output|. +
### Element-wise unary operations ### {#api-mlgraphbuilder-unary} @@ -3268,6 +3413,7 @@ partial interface MLGraphBuilder { MLOperand neg(MLOperand input, optional MLOperatorOptions options = {}); MLOperand reciprocal(MLOperand input, optional MLOperatorOptions options = {}); MLOperand sin(MLOperand input, optional MLOperatorOptions options = {}); + MLOperand sign(MLOperand input, optional MLOperatorOptions options = {}); MLOperand sqrt(MLOperand input, optional MLOperatorOptions options = {}); MLOperand tan(MLOperand input, optional MLOperatorOptions options = {}); }; @@ -3284,12 +3430,13 @@ partial dictionary MLOpSupportLimits { MLSingleInputSupportLimits neg; MLSingleInputSupportLimits reciprocal; MLSingleInputSupportLimits sin; + MLSingleInputSupportLimits sign; MLSingleInputSupportLimits sqrt; MLSingleInputSupportLimits tan; }; -
+
**Arguments:** - input: an {{MLOperand}}. The input tensor. - options: an {{MLOperatorOptions}}. Specifies the optional parameters of the operation. @@ -3299,7 +3446,7 @@ partial dictionary MLOpSupportLimits { tensor is the same as the shape of input tensor.
-
Constraints for element-wise logical options
+
@@ -3345,6 +3492,8 @@ partial dictionary MLOpSupportLimits { :: Support limits for operator {{MLGraphBuilder/reciprocal()}}. : sin :: Support limits for operator {{MLGraphBuilder/sin()}}. + : sign + :: Support limits for operator {{MLGraphBuilder/sign()}}. : sqrt :: Support limits for operator {{MLGraphBuilder/sqrt()}}. : tan @@ -3364,6 +3513,7 @@ partial dictionary MLOpSupportLimits { - *neg*: Compute the numerical negative value of the input tensor, element-wise. - *reciprocal*: Compute the reciprocal of the input tensor, element-wise. - *sin*: Compute the sine of the input tensor, element-wise. + - *sign*: Compute the sign (-1, 0, 1) of the input tensor, element-wise, returning 1 if > 0, -1 if < 0, and 0 otherwise. - *sqrt*: Compute the square root of the input tensor, element-wise. - *tan*: Compute the tangent of the input tensor, element-wise. @@ -3372,7 +3522,7 @@ partial dictionary MLOpSupportLimits { To create element-wise unary operation given [=string=] |op|, {{MLOperand}} |input|, optional [=/list=] |allowedDataTypes|, and |options|, run the following steps: - 1. [=Assert=]: |op| is one of "abs", "ceil", "cos", "erf", "exp", "floor", "identity", "log", "neg", "reciprocal", "sin", "sqrt", "tan". + 1. [=Assert=]: |op| is one of "abs", "ceil", "cos", "erf", "exp", "floor", "identity", "log", "neg", "reciprocal", "sin", "sign", "sqrt", "tan". 1. If [=this=] [=MLGraphBuilder/can not build=], then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |input| returns false, then [=exception/throw=] a {{TypeError}}. 1. If |allowedDataTypes| is given and it does not [=list/contain=] |input|'s [=MLOperand/dataType=], then [=exception/throw=] a {{TypeError}}. @@ -3466,6 +3616,13 @@ partial dictionary MLOpSupportLimits { 1. Return |output|. +
+ The sign(|input|, |options|) method steps are: + 1. Let |output| be the result of running the [=MLGraphBuilder/element-wise-unary-op | create element-wise unary operation=] given "sign", |input|, « {{MLOperandDataType/"float32"}}, {{MLOperandDataType/"float16"}}, {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"int8"}} », and |options|. + 1. If that [=exception/throws=] an error, then re-[=exception/throw=] the error. + 1. Return |output|. +
+
The sqrt(|input|, |options|) method steps are: 1. Let |output| be the result of running the [=MLGraphBuilder/element-wise-unary-op | create element-wise unary operation=] given "sqrt", |input|, « {{MLOperandDataType/"float32"}}, {{MLOperandDataType/"float16"}} », and |options|. @@ -3481,6 +3638,309 @@ partial dictionary MLOpSupportLimits {
+
+
+ + The behavior of the {{MLGraphBuilder/sign()}} operation can be [EMULATED] + +
+    function sign(builder, input, options) {
+      const zero = builder.constant(input.dataType, 0);
+      const positiveOne = builder.constant(input.dataType, 1);
+      const negativeOne = builder.constant(input.dataType, -1);
+
+      return builder.where(
+        builder.greater(input, zero),
+        positiveOne,
+        builder.where(
+          builder.lesser(input, zero),
+          negativeOne,
+          zero));
+    }
+    
+
+
+ + +### dequantizeLinear ### {#api-mlgraphbuilder-dequantizelinear} +Dequantizes an integer tensor to floating point tensor using the scale and zero-point bias, where `output = (input - zeroPoint) * scale`. The *scale* and *zeroPoint* tensors can be smaller than the *input* tensor as they are [=blockwise broadcastable=]. + + + +
+ **Arguments:** + - input: an {{MLOperand}}. The input tensor. + - scale: an {{MLOperand}}. The scale tensor to multiply each input value by after adjusting by the zero point. It must be [=blockwise broadcastable=] with the input. + - zeroPoint: an {{MLOperand}}. The zero point tensor to subtract from each input value. It has the same [=MLOperand/shape=] as the scale. + - options: an {{MLOperatorOptions}}. Specifies the optional parameters of the operation. + + **Returns:** an {{MLOperand}}. The output tensor that contains the dequantized values. +
+ +
Constraints for element-wise unary options
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Constraints for {{MLGraphBuilder/dequantizeLinear()}}
operand[=/allowed data types=][=/allowed ranks=]
{{input}}{{MLOperandDataType/"uint8"}}, {{MLOperandDataType/"uint32"}}, {{MLOperandDataType/"int32"}}[=/any rank|N=]
{{scale}}{{MLOperandDataType/"float32"}}, {{MLOperandDataType/"float16"}}[=/same rank as|same as=] {{input}}
{{zeroPoint}}[=/same type as|same as=] {{input}}[=/same rank as|same as=] {{input}}
*output*[=/same type as|same as=] {{scale}}[=/same rank as|same as=] {{input}}
+ +{{MLQuantizationSupportLimits}} has the following members: +
+ : input + :: {{MLSupportLimits}} for input operand. + : scale + :: {{MLSupportLimits}} for scale operand. + : zeroPoint + :: {{MLSupportLimits}} for zeroPoint operand. + : output + :: {{MLSupportLimits}} for output operand. +
+ +{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/dequantizeLinear()}}: +
+ : dequantizeLinear + :: Support limits for operator {{MLGraphBuilder/dequantizeLinear()}}. +
+ +
+ + The dequantizeLinear(|input|, |scale|, |zeroPoint|, |options|) method steps are: + + 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |input|, |scale|, and |zeroPoint| returns false, then [=exception/throw=] a {{TypeError}}. + 1. If |scale|'s [=MLOperand/rank=] or |zeroPoint|'s [=MLOperand/rank=] is not equal to |input|'s [=MLOperand/rank=], then [=exception/throw=] a {{TypeError}}. + 1. If |scale|'s [=MLOperand/shape=] is not equal to |zeroPoint|'s [=MLOperand/shape=], then [=exception/throw=] a {{TypeError}}. + 1. If [=blockwise broadcasting=] |scale|'s [=MLOperand/shape=] and |input|'s [=MLOperand/shape=] returns failure, then [=exception/throw=] a {{TypeError}}. + 1. If [=blockwise broadcasting=] |zeroPoints|'s [=MLOperand/shape=] and |input|'s [=MLOperand/shape=] returns failure, then [=exception/throw=] a {{TypeError}}. + 1. If |input|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-quantizelinear)), then [=exception/throw=] a {{TypeError}}. + 1. If |scale|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-quantizelinear)), then [=exception/throw=] a {{TypeError}}. + 1. If |zeroPoints|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-quantizelinear)), then [=exception/throw=] a {{TypeError}}. + 1. Let |outputDescriptor| be the result of [=creating an MLOperandDescriptor=] given |scale|'s [=MLOperand/dataType=] and |input|'s [=MLOperand/shape=]. + 1. *Make graph connections:* + 1. Let |output| be the result of [=creating an MLOperand=] given [=this=] and |outputDescriptor|. + 1. Let |operator| be an [=operator=] for the "dequantizeLinear" operation, given |input|, |scale|, |zeroPoint|, and |options|. + 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. + 1. Set |operator|'s [=operator/input=] to |input|. + 1. Set |operator|'s [=operator/output=] to |output|. + 1. Return |output|. +
+ +
+
+ + The behavior of this operation can be [EMULATED] + +
+    function dequantizeLinear(builder, input, scale, zeroPoint, options) {
+      // output = (input - zeroPoint) * scale
+      const floatInput = builder.cast(input, scale.dataType);
+      const floatZeroPoint = builder.cast(zeroPoint, scale.dataType);
+      const upsampledScale = blockwiseExpand(builder, scale, input.shape);
+      const upsampledZeroPoint = blockwiseExpand(builder, floatZeroPoint, input.shape);
+      return builder.mul(builder.sub(floatInput, upsampledZeroPoint), upsampledScale);
+    }
+
+    function blockwiseExpand(builder, input, targetShape) {
+      // This expands each axis by repeating the block the number of times per that axis, given the
+      // original input shape and target shape. However, backend implementations might have much more
+      // efficient upsampling operators that can accept multiple dimensions to upsample all
+      // dimensions at once by integer multiples (like tile) using nearest neighbor resampling:
+      // output = resample(scale, {sizes: input.shape})
+
+      let expandedInput = input;
+
+      for (let axis = 0; axis < input.shape.length; ++axis) {
+        const inputShape = expandedInput.shape;
+        const oldDimensionLength = inputShape[axis];
+        const newDimensionLength = targetShape[axis];
+
+        if (newDimensionLength != oldDimensionLength) {
+          // Since tile/expand can only accept repetitions of entire dimension slices (not repeating
+          // individual elements along an axis), temporarily reshape the tensor to enable them to broadcast
+          // the elements up to the full block size, utilizing an inserted dimension of size 1.
+          const elementRepeatCount = newDimensionLength / oldDimensionLength;
+          const flattenedShape = getFlattenedShapeAroundAxis(inputShape, axis);
+          const unexpandedShape = [flattenedShape[0], flattenedShape[1], 1, flattenedShape[2]];
+          const expandedShape = [flattenedShape[0], flattenedShape[1], elementRepeatCount, flattenedShape[2]];
+          const reshapedInput = builder.reshape(expandedInput, unexpandedShape);
+          expandedInput = builder.expand(reshapedInput, expandedShape);
+        }
+
+        let newInputShape = [...inputShape];
+        newInputShape[axis] = newDimensionLength;
+        expandedInput = builder.reshape(expandedInput, newInputShape);
+      }
+
+      return expandedInput;
+    }
+
+    // Compute the flattened shape before and after the given axis, yielding a 3-element list.
+    // e.g. inputShape = [2,3,4,5,6] with axis = 2 yields shape [6,4,30].
+    // e.g. inputShape = [4] with axis = 0 yields shape [1,4,1].
+    function getFlattenedShapeAroundAxis(inputShape, axis) {
+      axis = Math.max(Math.min(axis, input.shape.length - 1), 0);
+      const countBefore = axis.slice(0, axis).reduce((a, b) => a * b, 1);
+      const countAfter = axis.slice(axis + 1, input.shape.length).reduce((a, b) => a * b, 1);
+      return [countBefore, inputShape[axis], countAfter];
+    }
+    
+
+
+ + +### quantizeLinear ### {#api-mlgraphbuilder-quantizelinear} +Quantizes a floating point tensor to integer tensor using the scale and zero-point bias, where `output = clamp(roundEven(input / scale) + zeroPoint, 0, 255)`. The *scale* and *zeroPoint* tensors can be smaller than the *input* tensor as they are [=blockwise broadcast=]. + + + +
+ **Arguments:** + - input: an {{MLOperand}}. The input tensor. + - scale: an {{MLOperand}}. The scale tensor to divide each input value before adjusting by the zero point. It must be [=blockwise broadcastable=] with the input. + - zeroPoint: an {{MLOperand}}. The zero point tensor to add to each rescaled input value. It has the same [=MLOperand/shape=] as the scale. + - options: an {{MLOperatorOptions}}. Specifies the optional parameters of the operation. + + **Returns:** an {{MLOperand}}. The output tensor that contains the quantized values. +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Constraints for {{MLGraphBuilder/quantizeLinear()}}
operand[=/allowed data types=][=/allowed ranks=]
{{input}}{{MLOperandDataType/"float32"}}, {{MLOperandDataType/"float16"}}[=/any rank|N=]
{{scale}}[=/same type as|same as=] {{input}}[=/same rank as|same as=] {{input}}
{{zeroPoint}}{{MLOperandDataType/"uint8"}}, {{MLOperandDataType/"uint32"}}, {{MLOperandDataType/"int32"}}[=/same rank as|same as=] {{input}}
*output*[=/same type as|same as=] {{zeroPoint}}[=/same rank as|same as=] {{input}}
+ +{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/quantizeLinear()}}: +
+ : quantizeLinear + :: Support limits for operator {{MLGraphBuilder/quantizeLinear()}}. +
+ + +
+ + The quantizeLinear(|input|, |scale|, |zeroPoint|, |options|) method steps are: + + 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |input|, |scale|, and |zeroPoint| returns false, then [=exception/throw=] a {{TypeError}}. + 1. If |scale|'s [=MLOperand/rank=] or |zeroPoint|'s [=MLOperand/rank=] is not equal to |input|'s [=MLOperand/rank=], then [=exception/throw=] a {{TypeError}}. + 1. If |scale|'s [=MLOperand/shape=] is not equal to |zeroPoint|'s [=MLOperand/shape=], then [=exception/throw=] a {{TypeError}}. + 1. If [=blockwise broadcasting=] |scale|'s [=MLOperand/shape=] and |input|'s [=MLOperand/shape=] returns failure, then [=exception/throw=] a {{TypeError}}. + 1. If [=blockwise broadcasting=] |zeroPoints|'s [=MLOperand/shape=] and |input|'s [=MLOperand/shape=] returns failure, then [=exception/throw=] a {{TypeError}}. + 1. If |input|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-quantizelinear)), then [=exception/throw=] a {{TypeError}}. + 1. If |scale|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-quantizelinear)), then [=exception/throw=] a {{TypeError}}. + 1. If |zeroPoints|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-quantizelinear)), then [=exception/throw=] a {{TypeError}}. + 1. Let |outputDescriptor| be the result of [=creating an MLOperandDescriptor=] given |zeroPoint|'s [=MLOperand/dataType=] and |input|'s [=MLOperand/shape=]. + 1. *Make graph connections:* + 1. Let |output| be the result of [=creating an MLOperand=] given [=this=] and |outputDescriptor|. + 1. Let |operator| be an [=operator=] for the "quantizeLinear" operation, given |input|, |scale|, |zeroPoint|, and |options|. + 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. + 1. Set |operator|'s [=operator/input=] to |input|. + 1. Set |operator|'s [=operator/output=] to |output|. + 1. Return |output|. +
+ +
+
+ + The behavior of this operation can be [EMULATED] + + This emulation relies on a pending `roundEven` operator in [Issue webnn#817]. +
+    function quantizeLinear(builder, input, scale, zeroPoint, options) {
+      // output = clamp(roundEven(input / scale) + zeroPoint, 0, 255)
+      // Note blockwiseExpand is defined in dequantizeLinear.
+
+      const floatZeroPoint = builder.cast(zeroPoint, scale.dataType);
+      const upsampledScale = blockwiseExpand(builder, scale, input.shape);
+      const upsampledZeroPoint = blockwiseExpand(builder, floatZeroPoint, input.shape);
+      const quantizedInput = builder.roundEven(builder.div(input, upsampledScale));
+      const zeroPointAdjustedInput = builder.add(quantizedInput, upsampledZeroPoint);
+      const clampedInput = builder.clamp(zeroPointAdjustedInput, {'minValue': 0, 'maxValue': 255});
+      return builder.cast(clampedInput, zeroPoint.dataType);
+    }
+    
+
+
+ + ### elu ### {#api-mlgraphbuilder-elu} Calculate the exponential linear unit function (ELU) on the input tensor element-wise. The calculation follows the expression `max(0, x) + alpha * (exp(min(0, x)) - 1)`. @@ -3677,7 +4137,7 @@ partial dictionary MLOpSupportLimits {
**Arguments:** - input: an {{MLOperand}}. The input N-D tensor from which the values are gathered. - - indices: an {{MLOperand}}. The indices N-D tensor of the input values to gather. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by {{MLGatherOptions/axis}}, and a negative index means indexing from the end of the dimension. + - indices: an {{MLOperand}}. The indices N-D tensor of the input values to gather. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}}, or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by {{MLGatherOptions/axis}}, and a negative index means indexing from the end of the dimension. - options: an optional {{MLGatherOptions}}. The optional parameters of the operation. **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the [=MLOperand/rank=] of {{MLGraphBuilder/gather(input, indices, options)/input}} + the [=MLOperand/rank=] of {{MLGraphBuilder/gather(input, indices, options)/indices}} - 1. @@ -3736,7 +4196,7 @@ partial dictionary MLOpSupportLimits { 1. If [=this=] [=MLGraphBuilder/can not build=], then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |input| and |indices| returns false, then [=exception/throw=] a {{TypeError}}. 1. If |indices|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-gather)), then [=exception/throw=] a {{TypeError}}. - 1. Let |shapeInput| be |input|'s [=MLOperand/shape=] and |rankInput| be |shapeInput|'s [=MLOperand/rank=]. + 1. Let |shapeInput| be |input|'s [=MLOperand/shape=] and |rankInput| be |input|'s [=MLOperand/rank=]. 1. Let |shapeIndices| be |indices|'s [=MLOperand/shape=]. 1. Let |axis| be |options|.{{MLGatherOptions/axis}}. 1. If |axis| is greater than or equal to |rankInput|, then [=exception/throw=] a {{TypeError}}. @@ -3810,16 +4270,313 @@ partial dictionary MLOpSupportLimits { // [32, 31, 31]] const output2 = builder.gather(input, indices2, {axis: 1}); - // axis = 1 + // axis = 1 + // indices of shape [2,2]: + // [[0, 1], + // [1, 2]] + // output of shape [4,2,2]: + // [[[ 0, 1], [ 1, 2]], + // [[10, 11], [11, 12]], + // [[20, 21], [21, 22]], + // [[30, 31], [31, 32]]] + const output3 = builder.gather(input, indices3, {axis: 1}); + + +
+ +### gatherElements ### {#api-mlgraphbuilder-gatherelements} +Gather values of the input tensor along an axis according to the indices. + + + +
+ **Arguments:** + - input: an {{MLOperand}}. The input N-D tensor from which the values are gathered. + - indices: an {{MLOperand}}. The indices N-D tensor of the input values to gather. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}}, or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by *options.axis*, and a negative index means indexing from the end of the dimension. + - options: an optional {{MLGatherOptions}}. The optional parameters of the operation. + + **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the [=MLOperand/rank=] of *input*. +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Constraints for {{MLGraphBuilder/gatherElements()}}
operand[=/allowed data types=][=/allowed ranks=]
{{input}}[=/any data type|any=]1 to [=/any rank|N=]
{{indices}}{{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}}, {{MLOperandDataType/"int64"}}*input*'s [=MLOperand/rank=]
*output*[=/same type as|same as=] {{input}}*input*'s [=MLOperand/rank=]
+ +{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/gatherElements()}}: +
+ : gatherElements + :: Support limits for operator {{MLGraphBuilder/gatherElements()}}. +
+ +
+ The {{MLGraphBuilder/gatherElements(input, indices, options)/indices}} parameter to {{MLGraphBuilder/gatherElements()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index. +
+ +
+ + The gatherElements(|input|, |indices|, |options|) method steps are: + + 1. If [=this=] [=MLGraphBuilder/can not build=], then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |input| and |indices| returns false, then [=exception/throw=] a {{TypeError}}. + 1. If |indices|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-gather)), then [=exception/throw=] a {{TypeError}}. + 1. Let |axis| be |options|.{{MLGatherOptions/axis}}. + 1. If |axis| is greater than or equal to |input|'s [=MLOperand/rank=], then [=exception/throw=] a {{TypeError}}. + 1. Let |expectedShapeIndices| be a copy of |input|'s [=MLOperand/shape=]. + 1. Set |expectedShapeIndices|[|axis|] to 1. + 1. If |indices|'s [=MLOperand/shape=] is not equal to |expectedShapeIndices| then [=exception/throw=] a {{TypeError}}. + 1. Let |desc| be the result of [=creating an MLOperandDescriptor=] given |input|'s [=MLOperand/dataType=] and |input|'s [=MLOperand/dataType=]. + 1. *Make graph connections:* + 1. Let |output| be the result of [=creating an MLOperand=] given |desc|. + 1. Let |operator| be an [=operator=] for the "gatherElements" operation, given |input|, |indices|, and |options|. + 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. + 1. Set |operator|'s [=operator/inputs=] to |input| and |indices|. + 1. Set |operator|'s [=operator/output=] to |output|. + 1. Return |output|. +
+ +
+
+ + Examples of how gatherElements works in different slicing schemes. + +
+    // input of shape [4,3]:
+    //   [[ 0,  1,  2],
+    //    [10, 11, 12],
+    //    [20, 21, 22],
+    //    [30, 31, 32]]
+    // indices of shape [2,3]:
+    //   [[3, 1, 1],
+    //    [2, 0, 3]]
+    // axis = 0 (default)
+    // output of shape [2,3]:
+    //   [[30, 11, 12],
+    //    [20,  1, 32]]
+
+    const input1 = builder.constant(
+      {shape: [4, 3]},
+      new Float32Array([0, 1, 2, 10, 11, 12, 20, 21, 22, 30, 31, 32]));
+
+    const indices1 = builder.constant(
+      {dataType: 'uint32', shape: [2, 3]},
+      new Uint32Array([3, 1, 2, 2, 0, 1]));
+
+    const output1 = builder.gatherElements(input1, indices1);
+
+    // input of shape [4,3]:
+    //   [[ 0,  1,  2],
+    //    [10, 11, 12],
+    //    [20, 21, 22],
+    //    [30, 31, 32]]
+    // indices of shape [4,1]:
+    //   [[2],
+    //    [1],
+    //    [0],
+    //    [2]],
+    // axis = 1
+    // output of shape [4,3]:
+    //   [[ 2],
+    //    [11],
+    //    [20],
+    //    [32]]
+
+    const indices2 = builder.constant(
+      {dataType: 'uint32', shape: [4, 1]},
+      new Uint32Array([2, 1, 0, 2]));
+
+    const output2 = builder.gatherElements(input1, indices2, {axis: 1});
+
+    // input of shape [4,2,2]:
+    //   [[[  0,   1],
+    //     [ 10,  11]],
+    //    [[100, 101],
+    //     [110, 111]],
+    //    [[200, 201],
+    //     [210, 211]],
+    //    [[300, 301],
+    //     [310, 311]],]
+    // indices of shape [1,2,2]:
+    //   [[[0, 2],
+    //     [1, 3]]],
+    // axis = 2
+    // output of shape [1,2,2]:
+    //   [[[  0, 201],
+    //     [110, 311]]]
+
+    const input2 = builder.constant(
+      {shape: [4, 2, 2]},
+      new Float32Array([0, 1, 10, 11, 100, 101, 110, 111, 200, 201, 210, 211, 300, 301, 310, 311]));
+
+    const indices3 = builder.constant(
+      {dataType: 'uint32', shape: [1, 2, 2]},
+      new Uint32Array([0, 1, 1, 2]));
+
+    const output3 = builder.gatherElements(input2, indices3, {axis: 2});
+  
+
+
+ +### gatherNd ### {#api-mlgraphbuilder-gathernd} +Gather slices of the input tensor according to the indices. + + + +
+ **Arguments:** + - input: an {{MLOperand}}. The input N-D tensor from which the values are gathered. + - indices: an {{MLOperand}}. The indices array contains entire coordinates into the input tensor, with the rightmost dimension holding the number of dimensions per coordinate. So an indices tensor of shape [10,1] holds 10 single-axis indices, and a shape of [4,3] holds 4 indices of 3D coordinates. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}}, or {{MLOperandDataType/"int64"}}, and each must be in the range -N (inclusive) to N (exclusive) where N is the size of the corresponding input dimension, and a negative index means indexing from the end of the corresponding dimension. + - options: an optional {{MLOperatorOptions}}. The optional parameters of the operation. + + **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the *input*'s [=MLOperand/rank=] + *indices*'s [=MLOperand/rank=] - *indices*'s [=MLOperand/shape=][-1] - 1. +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Constraints for {{MLGraphBuilder/gatherNd()}}
operand[=/allowed data types=][=/allowed ranks=]
{{input}}[=/any data type|any=]1 to [=/any rank|N=]
{{indices}}{{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}}, {{MLOperandDataType/"int64"}}1 to [=/any rank|N=]
*output*[=/same type as|same as=] {{input}}*input*'s [=MLOperand/rank=] + *indices*'s [=MLOperand/rank=] - *indices*'s [=MLOperand/shape=][-1] - 1
+ +{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/gatherNd()}}: +
+ : gatherNd + :: Support limits for operator {{MLGraphBuilder/gatherNd()}}. +
+ +
+ The {{MLGraphBuilder/gatherNd(input, indices, options)/indices}} parameter to {{MLGraphBuilder/gatherNd()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index. +
+ +
+ + The gatherNd(|input|, |indices|, |options|) method steps are: + + TODO: +
+ +
+
+ + Examples of how gatherNd works in different slicing schemes. + +
+    // input of shape [2,2]:
+    //   [[0, 1],
+    //    [2, 3]]
+    // indices of shape [2,2]:
+    //   [[0, 0],
+    //    [1, 1]]
+    // output of shape [2]:
+    //   [0, 3]
+
+    const input1 = builder.constant(
+      {shape: [2, 2]},
+      new Float32Array([0, 1, 2, 3]));
+
+    const indices1 = builder.constant(
+      {dataType: 'uint32', shape: [2, 2]},
+      new Uint32Array([0, 0, 1, 1]));
+
+    const output1 = builder.gatherNd(input1, indices1);
+
+    // input of shape [2,2]:
+    //   [[0, 1],
+    //    [2, 3]]
+    // indices of shape [2,1]:
+    //   [[1],
+    //    [0]]
+    // output of shape [2,2]:
+    //   [[2 ,3]
+    //    [0, 1]]
+
+    const indices2 = builder.constant(
+      {dataType: 'uint32', shape: [2, 1]},
+      new Uint32Array([1, 0]));
+
+    const output2 = builder.gatherNd(input1, indices2);
+
+    // input of shape [2,2,2]:
+    //   [[[0, 1],
+    //     [2, 3]],
+    //    [[4, 5],
+    //     [6, 7]]]
     // indices of shape [2,2]:
     //   [[0, 1],
-    //    [1, 2]]
-    // output of shape [4,2,2]:
-    //   [[[ 0,  1], [ 1,  2]],
-    //    [[10, 11], [11, 12]],
-    //    [[20, 21], [21, 22]],
-    //    [[30, 31], [31, 32]]]
-    const output3 = builder.gather(input, indices3, {axis: 1});
+    //    [1, 0]]
+    // output of shape [2,2]:
+    //   [[2, 3],
+    //    [4, 5]]
+
+    const input2 = builder.constant(
+      {shape: [2, 2, 2]},
+      new Float32Array([0, 1, 2, 3, 4, 5, 6, 7]));
+
+    const indices3 = builder.constant(
+      {dataType: 'uint32', shape: [2, 2]},
+      new Uint32Array([0, 1, 1, 0]));
+
+    const output3 = builder.gatherNd(input1, indices1);
   
@@ -7167,8 +7924,290 @@ partial dictionary MLOpSupportLimits { The default value is [2, 3]. - - +
Constraints for {{MLGraphBuilder/resample2d()}}
+ + + + + + + + + + + + + + + + + + +
Constraints for {{MLGraphBuilder/resample2d()}}
operand[=/allowed data types=][=/allowed ranks=]
{{input}}{{MLOperandDataType/"float32"}}, {{MLOperandDataType/"float16"}}4
*output*[=/same type as|same as=] {{input}}4
+ +{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/resample2d()}}: +
+ : resample2d + :: Support limits for operator {{MLGraphBuilder/resample2d()}}. +
+ +
+ + The resample2d(|input|, |options|) method steps are: + + 1. If [=this=] [=MLGraphBuilder/can not build=], then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |input| returns false, then [=exception/throw=] a {{TypeError}}. + 1. If |input|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-resample2d)), then [=exception/throw=] a {{TypeError}}. + 1. If |input|'s [=MLOperand/rank=] is not its [=/allowed rank=], then [=exception/throw=] a {{TypeError}}. + 1. If |options|.{{MLResample2dOptions/scales}} does not [=map/exist=], set it to the [=/list=] « 1.0, 1.0 ». + 1. Otherwise, if any of its values is not greater than 0, or if its [=list/size=] is not 2, then [=exception/throw=] a {{TypeError}}. + 1. If |options|.{{MLResample2dOptions/sizes}} [=map/exists=], and if its size is not 2, or if any of its values is not greater than 0, then [=exception/throw=] a {{TypeError}}. + 1. If |options|.{{MLResample2dOptions/axes}} does not [=map/exists=], set it to the [=/list=] « 2, 3 ». + 1. Otherwise, if |options|.{{MLResample2dOptions/axes}} contains duplicate values, or if any of its elements is not in [=the range=] 0 to |input|'s [=MLOperand/rank=], exclusive, then [=exception/throw=] a {{TypeError}}. + 1. *Calculate the output shape:* + 1. Let |inputDescriptor| be |input|.{{MLOperand/[[descriptor]]}}. + 1. Let |outputShape| be a [=list/clone=] of |inputDescriptor|.{{MLOperandDescriptor/shape}}. + 1. [=list/For each=] |index| in [=the range=] 0 to |options|.{{MLResample2dOptions/axes}}'s [=list/size=], exclusive: + 1. If |options|.{{MLResample2dOptions/sizes}} [=map/exists=], then let |size| be |options|.{{MLResample2dOptions/sizes}}[|index|]. + 1. Otherwise, let |size| be floor(|input|'s [=MLOperand/shape=][|options|.{{MLResample2dOptions/axes}}[|index|]] * |options|.{{MLResample2dOptions/scales}}[|index|]). + 1. If |size| is not a [=valid dimension=], then return failure. + 1. Set |outputShape|[|options|.{{MLResample2dOptions/axes}}[|index|]] to |size|. + 1. Let |desc| be the result of [=creating an MLOperandDescriptor=] given |inputDescriptor|.{{MLOperandDescriptor/dataType}} and |outputShape|. + 1. *Make graph connections:* + 1. Let |output| be the result of [=creating an MLOperand=] given [=this=] and |desc|. + 1. Let |operator| be an [=operator=] for the "resample2d" operation, given |options|. + 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. + 1. Set |operator|'s [=operator/input=] to |input|. + 1. Set |operator|'s [=operator/output=] to |output|. + 1. Return |output|. +
+ + +
+ The specific sampling algorithms are based on those widely used in existing Machine Learning frameworks. For example, when performing {{MLInterpolationMode/linear}} resampling from the following *[4, 4]* input tensor (considering only spatial dimensions): + + ``` + [ 0 1 2 3 ] + [ 0 1 2 3 ] + [ 12 13 14 15 ] + [ 12 13 14 15 ] + ``` + + For an *[8, 8]* output tensor, the expected values are: + + ``` + [ 0 0.25 0.75 1.25 1.75 2.25 2.75 3 ] + [ 0 0.25 0.75 1.25 1.75 2.25 2.75 3 ] + [ 0 0.25 0.75 1.25 1.75 2.25 2.75 3 ] + [ 3 3.25 3.75 4.25 4.75 5.25 5.75 6 ] + [ 9 9.25 9.75 10.25 10.75 11.25 11.75 12 ] + [ 12 12.25 12.75 13.25 13.75 14.25 14.75 15 ] + [ 12 12.25 12.75 13.25 13.75 14.25 14.75 15 ] + [ 12 12.25 12.75 13.25 13.75 14.25 14.75 15 ] + ``` + + This has the convenient properties that the sampling is evenly distributed, symmetric, robust to image mirroring, and the corner values are aligned. +
+ +### reshape ### {#api-mlgraphbuilder-reshape-method} +Alter the shape of a tensor to a new shape. Reshape does not copy or change the content of the tensor. It just changes the tensor's logical shape for the subsequent operations. + +
+ **Arguments:** + - input: an {{MLOperand}}. The input tensor. + - newShape: [=sequence=]<{{unsigned long}}>. The shape of the output tensor. + The number of elements implied by {{MLGraphBuilder/reshape(input, newShape, options)/newShape}} must be the same as the + number of elements in the input tensor. + - options: an {{MLOperatorOptions}}. Specifies the optional parameters of the operation. + + **Returns:** an {{MLOperand}}. The output tensor. The values of the output + tensor are the same as values of the input tensor. The shape of the output + tensor is specified by {{MLGraphBuilder/reshape(input, newShape, options)/newShape}}. +
+ + + + + + + + + + + + + + + + + + + + +
Constraints for {{MLGraphBuilder/reshape()}}
operand[=/allowed data types=][=/allowed ranks=]
{{input}}[=/any data type|any=][=/any rank|N=]
*output*[=/same type as|same as=] {{input}}{{newShape}}'s [=list/size=]
+ +{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/reshape()}}: +
+ : reshape + :: Support limits for operator {{MLGraphBuilder/reshape()}}. +
+ +
+ + The reshape(|input|, |newShape|, |options|) method steps are: + + 1. If [=this=] [=MLGraphBuilder/can not build=], then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |input| returns false, then [=exception/throw=] a {{TypeError}}. + 1. Let |outputShape| be an empty array of {{unsigned long}}. + 1. If |newShape|'s [=list/size=] is 0, set |outputShape| to an empty [=/list=] for a scalar. + 1. If any [=list/item=] in |newShape| is not a [=valid dimension=], then [=exception/throw=] a {{TypeError}}. + 1. Let |inputElementCount| be the product of all elements in |input|'s [=MLOperand/shape=]. Empty dimensions yield an |inputElementCount| of 1. + 1. If product of all values in |newShape| is not equal to |inputElementCount|, then [=exception/throw=] a {{TypeError}}. + 1. Let |desc| be a copy of |input|.{{MLOperand/[[descriptor]]}}. + 1. Set |desc|.{{MLOperandDescriptor/shape}} to |newShape|. + 1. *Make graph connections:* + 1. Let |output| be the result of [=creating an MLOperand=] given [=this=] and |desc|. + 1. Let |operator| be an [=operator=] for the "reshape" operation, given |options|. + 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. + 1. Set |operator|'s [=operator/input=] to |input|. + 1. Set |operator|'s [=operator/output=] to |output|. + 1. Return |output|. +
+ +### reverse ### {#api-mlgraphbuilder-reverse-method} +Reverse a tensor along the given axes. + + + +{{MLReverseOptions}} has the following members: +
+ : axes + :: + The indices to the input dimensions to reverse. When this member is not present, it is treated as if all dimensions are reversed. If explicitly passed as empty, no dimensions are reversed. +
+ +
+ **Arguments:** + - input: an {{MLOperand}}. The input tensor. + - options: an {{MLOperatorOptions}}. Specifies the optional parameters of the operation. + + **Returns:** + - an {{MLOperand}}. The output tensor of the same shape as *input*. +
+ + + + + + + + + + + + + + + + + + + + +
Constraints for {{MLGraphBuilder/reverse()}}
operand[=/allowed data types=][=/allowed ranks=]
{{input}}[=/any data type|any=][=/any rank|N=]
*output*[=/same type as|same as=] {{input}}[=/same rank as|same as=] {{input}}
+ +{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/reverse()}}: +
+ : reverse + :: Support limits for operator {{MLGraphBuilder/reverse()}}. +
+ +
+ + The reverse(|input|, |options|) method steps are: + + 1. If [=this=] [=MLGraphBuilder/can not build=], then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |input| returns false, then [=exception/throw=] a {{TypeError}}. + 1. If |input|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-reverse)), then [=exception/throw=] a {{TypeError}}. + 1. If |axes| is not given, let |axes| be [=the range=] 0 to |inputRank|, exclusive. + 1. Otherwise, if |axes| contains duplicate values, or if any of its elements is not in [=the range=] 0 to |inputRank|, exclusive, then return failure. + 1. *Make graph connections:* + 1. Let |output| be the result of [=copying an MLOperand=] given |input|. + 1. Let |operator| be an [=operator=] for the "reverse" operation and |options|. + 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. + 1. Set |operator|'s [=operator/input=] to |input|. + 1. Set |operator|'s [=operator/output=] to |output|. + 1. Return |output|. +
+ +### scatterElements ### {#api-mlgraphbuilder-scatterelements} +Scatter values from the updates tensor atop a copy of the input tensor the along an axis according to the indices. + + + +{{MLScatterOptions}} has the following members: +
+ : axis + :: + The axis along which the scattered values are obtained. Its value must be in the range [0, N-1] where N is the [=MLOperand/rank=] of the input tensor. +
+ +
+ **Arguments:** + - input: an {{MLOperand}}. The input N-D tensor from to initialize the output with. + - indices: an {{MLOperand}}. The indices N-D tensor of the input values to scatter over. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}}, or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by *options.axis*, and a negative index means indexing from the end of the dimension. + - updates: an {{MLOperand}}. New values to replace atop the input. + - options: an optional {{MLScatterOptions}}. The optional parameters of the operation. + + **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to *input*'s [=MLOperand/rank=]. +
+ + + @@ -7178,108 +8217,212 @@ partial dictionary MLOpSupportLimits { - - + + + + + + + + + + + + - +
Constraints for {{MLGraphBuilder/scatterElements()}}
operand
{{input}}{{MLOperandDataType/"float32"}}, {{MLOperandDataType/"float16"}}4[=/any data type|any=]1 to [=/any rank|N=]
{{indices}}{{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}}, {{MLOperandDataType/"int64"}}{{input}}'s [=MLOperand/rank=]
{{updates}}[=/same type as|same as=] {{input}}{{input}}'s [=MLOperand/rank=] and {{indices}}'s [=MLOperand/shape=]
*output* [=/same type as|same as=] {{input}}4{{input}}'s [=MLOperand/rank=]
-{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/resample2d()}}: +{{MLScatterSupportLimits}} has the following members: +
+ : input + :: {{MLSupportLimits}} for input operand. + : indices + :: {{MLSupportLimits}} for indices operand. + : updates + :: {{MLSupportLimits}} for updates operand. + : output + :: {{MLSupportLimits}} for output operand. +
+ +{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/scatterElements()}}:
- : resample2d - :: Support limits for operator {{MLGraphBuilder/resample2d()}}. + : scatterElements + :: Support limits for operator {{MLGraphBuilder/scatterElements()}}.
+
+ The {{MLGraphBuilder/scatterElements(input, indices, updates, options)/indices}} parameter to {{MLGraphBuilder/scatterElements()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index. +
+
- The resample2d(|input|, |options|) method steps are: + The scatterElements(|input|, |indices|, |updates|, |options|) method steps are: 1. If [=this=] [=MLGraphBuilder/can not build=], then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. - 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |input| returns false, then [=exception/throw=] a {{TypeError}}. - 1. If |input|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-resample2d)), then [=exception/throw=] a {{TypeError}}. - 1. If |input|'s [=MLOperand/rank=] is not its [=/allowed rank=], then [=exception/throw=] a {{TypeError}}. - 1. If |options|.{{MLResample2dOptions/scales}} does not [=map/exist=], set it to the [=/list=] « 1.0, 1.0 ». - 1. Otherwise, if any of its values is not greater than 0, or if its [=list/size=] is not 2, then [=exception/throw=] a {{TypeError}}. - 1. If |options|.{{MLResample2dOptions/sizes}} [=map/exists=], and if its size is not 2, or if any of its values is not greater than 0, then [=exception/throw=] a {{TypeError}}. - 1. If |options|.{{MLResample2dOptions/axes}} does not [=map/exists=], set it to the [=/list=] « 2, 3 ». - 1. Otherwise, if |options|.{{MLResample2dOptions/axes}} contains duplicate values, or if any of its elements is not in [=the range=] 0 to |input|'s [=MLOperand/rank=], exclusive, then [=exception/throw=] a {{TypeError}}. - 1. *Calculate the output shape:* - 1. Let |inputDescriptor| be |input|.{{MLOperand/[[descriptor]]}}. - 1. Let |outputShape| be a [=list/clone=] of |inputDescriptor|.{{MLOperandDescriptor/shape}}. - 1. [=list/For each=] |index| in [=the range=] 0 to |options|.{{MLResample2dOptions/axes}}'s [=list/size=], exclusive: - 1. If |options|.{{MLResample2dOptions/sizes}} [=map/exists=], then let |size| be |options|.{{MLResample2dOptions/sizes}}[|index|]. - 1. Otherwise, let |size| be floor(|input|'s [=MLOperand/shape=][|options|.{{MLResample2dOptions/axes}}[|index|]] * |options|.{{MLResample2dOptions/scales}}[|index|]). - 1. If |size| is not a [=valid dimension=], then return failure. - 1. Set |outputShape|[|options|.{{MLResample2dOptions/axes}}[|index|]] to |size|. - 1. Let |desc| be the result of [=creating an MLOperandDescriptor=] given |inputDescriptor|.{{MLOperandDescriptor/dataType}} and |outputShape|. + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |input| and |indices| and |updates| returns false, then [=exception/throw=] a {{TypeError}}. + 1. If |indices|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-gather)), then [=exception/throw=] a {{TypeError}}. + 1. If |updates|'s [=MLOperand/dataType=] is not equal to |input|'s [=MLOperand/dataType=], then [=exception/throw=] a {{TypeError}}. + 1. Let |axis| be |options|.{{MLGatherOptions/axis}}. + 1. If |axis| is greater than or equal to |input|'s [=MLOperand/rank=], then [=exception/throw=] a {{TypeError}}. + 1. Let |expectedShapeIndices| be a copy of |input|'s [=MLOperand/shape=]. + 1. Set |expectedShapeIndices|[|axis|] to 1. + 1. If |indices|'s [=MLOperand/shape=] is not equal to |expectedShapeIndices|, then [=exception/throw=] a {{TypeError}}. + 1. If |updates|'s [=MLOperand/shape=] is not equal to |indices|'s [=MLOperand/shape=], then [=exception/throw=] a {{TypeError}}. + 1. Let |desc| be the result of [=creating an MLOperandDescriptor=] given |input|'s [=MLOperand/dataType=] and |input|'s [=MLOperand/dataType=]. 1. *Make graph connections:* - 1. Let |output| be the result of [=creating an MLOperand=] given [=this=] and |desc|. - 1. Let |operator| be an [=operator=] for the "resample2d" operation, given |options|. + 1. Let |output| be the result of [=creating an MLOperand=] given |desc|. + 1. Let |operator| be an [=operator=] for the "scatterElements" operation, given |input|, |indices|, |updates|, and |options|. 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. - 1. Set |operator|'s [=operator/input=] to |input|. + 1. Set |operator|'s [=operator/inputs=] to |input|, |indices|, and |updates|. 1. Set |operator|'s [=operator/output=] to |output|. 1. Return |output|.
+
+
+ + Examples of how scatterElements works in different slicing schemes. + +
+    // input of shape [4,3]:
+    //   [[ 0,  1,  2],
+    //    [10, 11, 12],
+    //    [20, 21, 22],
+    //    [30, 31, 32]]
+    // indices of shape [2,3]:
+    //   [[3, 1, 1],
+    //    [2, 0, 3]]
+    // updates of shape [2,3]:
+    //   [[-1, -2, -3],
+    //    [-4, -5, -6]]
+    // axis = 0 (default)
+    // output of shape [4,3]:
+    //   [[ 0, -5,  2],
+    //    [10, -2, -3],
+    //    [-4, 21, 22],
+    //    [-1, 31, -6]]
+
+    const input1 = builder.constant(
+      {shape: [4, 3]},
+      new Float32Array([0, 1, 2, 10, 11, 12, 20, 21, 22, 30, 31, 32]));
 
-
- The specific sampling algorithms are based on those widely used in existing Machine Learning frameworks. For example, when performing {{MLInterpolationMode/linear}} resampling from the following *[4, 4]* input tensor (considering only spatial dimensions): + const indices1 = builder.constant( + {dataType: 'uint32', shape: [2, 3]}, + new Uint32Array([3, 1, 1, 2, 0, 3])); - ``` - [ 0 1 2 3 ] - [ 0 1 2 3 ] - [ 12 13 14 15 ] - [ 12 13 14 15 ] - ``` + const updates1 = builder.constant( + {dataType: 'float32', shape: [2, 3]}, + new Uint32Array([-1, -2, -3, -4, -5, -6])); - For an *[8, 8]* output tensor, the expected values are: + const output1 = builder.scatterElements(input1, indices1, updates1); - ``` - [ 0 0.25 0.75 1.25 1.75 2.25 2.75 3 ] - [ 0 0.25 0.75 1.25 1.75 2.25 2.75 3 ] - [ 0 0.25 0.75 1.25 1.75 2.25 2.75 3 ] - [ 3 3.25 3.75 4.25 4.75 5.25 5.75 6 ] - [ 9 9.25 9.75 10.25 10.75 11.25 11.75 12 ] - [ 12 12.25 12.75 13.25 13.75 14.25 14.75 15 ] - [ 12 12.25 12.75 13.25 13.75 14.25 14.75 15 ] - [ 12 12.25 12.75 13.25 13.75 14.25 14.75 15 ] - ``` + // input of shape [4,3]: + // [[ 0, 1, 2], + // [10, 11, 12], + // [20, 21, 22], + // [30, 31, 32]] + // indices of shape [4,1]: + // [[2], + // [1], + // [0], + // [2]], + // updates of shape [4,1]: + // [[-1], + // [-2], + // [-3], + // [-4]], + // axis = 1 + // output of shape [4,3]: + // [[ 0, 1, -1], + // [10, -2, 12], + // [-3, 21, 22], + // [30, 31, -4]] - This has the convenient properties that the sampling is evenly distributed, symmetric, robust to image mirroring, and the corner values are aligned. + const indices2 = builder.constant( + {dataType: 'uint32', shape: [4, 1]}, + new Uint32Array([2, 1, 0, 2])); + + const updates2 = + builder.constant({dataType: 'float32', shape: [4, 1]}, + new Uint32Array([-1, -2, -3, -4])); + + const output2 = builder.scatterElements(input1, indices2, updates2, {axis: 1}); + + // input of shape [4,2,2]: + // [[[ 0, 1], + // [ 10, 11]], + // [[100, 101], + // [110, 111]], + // [[200, 201], + // [210, 211]], + // [[300, 301], + // [310, 311]],] + // indices of shape [1,2,2]: + // [[[0, 2], + // [1, 3]]], + // updates of shape [1,2,2]: + // [[[-1, -2], + // [-3, -4]]], + // axis = 2 + // output of shape [4,2,2]: + // [[[ -1, 1], + // [ 10, 11]], + // [[100, 101], + // [ -3, 111]], + // [[200, -2], + // [210, 211]], + // [[300, 301], + // [310, -4]],] + const input2 = builder.constant( + {shape: [4, 2, 2]}, + new Float32Array([0, 1, 10, 11, 100, 101, 110, 111, 200, 201, 210, 211, 300, 301, 310, 311])); + + const indices3 = builder.constant( + {dataType: 'uint32', shape: [1, 2, 2]}, + new Uint32Array([0, 2, 1, 3])); + + const updates3 = + builder.constant({dataType: 'float32', shape: [1, 2, 2]}, + new Uint32Array([-1, -2, -3, -4])); + + const output3 = builder.scatterElements(input2, indices3, updates3, {axis: 2}); +
+
-### reshape ### {#api-mlgraphbuilder-reshape-method} -Alter the shape of a tensor to a new shape. Reshape does not copy or change the content of the tensor. It just changes the tensor's logical shape for the subsequent operations. +### scatterNd ### {#api-mlgraphbuilder-scatternd} +Scatter slices of values from the update tensor atop a copy of the input tensor according to the indices. + -
+ +
**Arguments:** - - input: an {{MLOperand}}. The input tensor. - - newShape: [=sequence=]<{{unsigned long}}>. The shape of the output tensor. - The number of elements implied by {{MLGraphBuilder/reshape(input, newShape, options)/newShape}} must be the same as the - number of elements in the input tensor. - - options: an {{MLOperatorOptions}}. Specifies the optional parameters of the operation. + - input: an {{MLOperand}}. The input N-D tensor from to initialize the output with. + - indices: an {{MLOperand}}. The indices array contains entire coordinates into the output tensor, with the rightmost dimension holding the number of dimensions per coordinate. So an indices tensor of shape [10,1] holds 10 single-axis indices, and a shape of [4,3] holds 4 indices of 3D coordinates. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}}, or {{MLOperandDataType/"int64"}}, and each must be in the range -N (inclusive) to N (exclusive) where N is the size of the corresponding output dimension, and a negative index means indexing from the end of the corresponding dimension. + - updates: an {{MLOperand}}. New values to replace atop the input. + - options: an optional {{MLScatterOptions}}. The optional parameters of the operation. - **Returns:** an {{MLOperand}}. The output tensor. The values of the output - tensor are the same as values of the input tensor. The shape of the output - tensor is specified by {{MLGraphBuilder/reshape(input, newShape, options)/newShape}}. + **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the [=MLOperand/rank=] of *input*'s [=MLOperand/rank=] + *indices*'s [=MLOperand/rank=] - *indices*'s [=MLOperand/shape=][-1] - 1.
- - +
Constraints for {{MLGraphBuilder/reshape()}}
+ @@ -7290,42 +8433,162 @@ partial dictionary MLOpSupportLimits { - + + + + + + + + + + + - +
Constraints for {{MLGraphBuilder/scatterNd()}}
operand
{{input}} [=/any data type|any=][=/any rank|N=]1 to [=/any rank|N=]
{{indices}}{{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}}, {{MLOperandDataType/"int64"}}1 to [=/any rank|N=]
{{updates}}[=/same type as|same as=] {{input}}*input*'s [=MLOperand/rank=] + *indices*'s [=MLOperand/rank=] - *indices*'s [=MLOperand/shape=][-1] - 1
*output* [=/same type as|same as=] {{input}}{{newShape}}'s [=list/size=]1 to [=/any rank|N=]
-{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/reshape()}}: +{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/scatterNd()}}:
- : reshape - :: Support limits for operator {{MLGraphBuilder/reshape()}}. + : scatterNd + :: Support limits for operator {{MLGraphBuilder/scatterNd()}}.
+
+ The {{MLGraphBuilder/scatterNd(input, indices, updates, options)/indices}} parameter to {{MLGraphBuilder/scatterNd()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index. +
+
- The reshape(|input|, |newShape|, |options|) method steps are: + The scatterNd(|input|, |indices|, |updates|, |options|) method steps are: - 1. If [=this=] [=MLGraphBuilder/can not build=], then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. - 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |input| returns false, then [=exception/throw=] a {{TypeError}}. - 1. Let |outputShape| be an empty array of {{unsigned long}}. - 1. If |newShape|'s [=list/size=] is 0, set |outputShape| to an empty [=/list=] for a scalar. - 1. If any [=list/item=] in |newShape| is not a [=valid dimension=], then [=exception/throw=] a {{TypeError}}. - 1. Let |inputElementCount| be the product of all elements in |input|'s [=MLOperand/shape=]. Empty dimensions yield an |inputElementCount| of 1. - 1. If product of all values in |newShape| is not equal to |inputElementCount|, then [=exception/throw=] a {{TypeError}}. - 1. Let |desc| be a copy of |input|.{{MLOperand/[[descriptor]]}}. - 1. Set |desc|.{{MLOperandDescriptor/shape}} to |newShape|. - 1. *Make graph connections:* - 1. Let |output| be the result of [=creating an MLOperand=] given [=this=] and |desc|. - 1. Let |operator| be an [=operator=] for the "reshape" operation, given |options|. - 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. - 1. Set |operator|'s [=operator/input=] to |input|. - 1. Set |operator|'s [=operator/output=] to |output|. - 1. Return |output|. + TODO: +
+ +
+
+ + Examples of how scatterNd works in different slicing schemes. + +
+    // input of shape [8]:
+    //   [0, 1, 2, 3, 4, 5, 6, 7]
+    // indices of shape [4, 1]:
+    //   [[4],
+    //    [3],
+    //    [1],
+    //    [7]]
+    // updates of shape [4]:
+    //   [-1, -2, -3, -4]
+    // output of shape [8]:
+    //   [0, -3, 2, -2, -1, 5, 6, -4]
+
+    const input1 = builder.constant(
+      {shape: [8]},
+      new Float32Array([0, 1, 2, 3, 4, 5, 6, 7]));
+
+    const indices1 = builder.constant(
+      {dataType: 'uint32', shape: [4, 1]},
+      new Uint32Array([4, 3, 1, 7]));
+
+    const updates1 = builder.constant(
+      {dataType: 'uint32', shape: [4]},
+      new Uint32Array([-1, -2, -3, -4]));
+
+    const output1 = builder.scatterNd(input1, indices1, updates1);
+
+    // input of shape [2,2]:
+    //   [[0, 1],
+    //    [2, 3]]
+    // indices of shape [2,2]:
+    //   [[0, 0],
+    //    [1, 1]]
+    // updates of shape [2]:
+    //   [-1, -2]
+    // output of shape [2,2]:
+    //   [[-1,  1],   <= -1 written to element coordinate [0, 0]
+    //    [ 2, -2]]   <= -2 written to element coordinate [1, 1]
+
+    const input2 = builder.constant(
+      {shape: [2, 2]},
+      new Float32Array([0, 1, 2, 3]));
+
+    const indices2 = builder.constant(
+      {dataType: 'uint32', shape: [2, 2]},
+      new Uint32Array([0, 0, 1, 1]));
+
+    const updates2 = builder.constant(
+      {dataType: 'uint32', shape: [2]},
+      new Uint32Array([-1, -2]));
+
+    const output2 = builder.scatterNd(input2, indices2, updates2);
+
+    // input of shape [3,2]:
+    //   [[0, 1],
+    //    [2, 3],
+    //    [4, 5]]
+    // indices of shape [2,1]:
+    //   [[2],
+    //    [0]]
+    // updates of shape [2,2]:
+    //   [[-1, -2],
+    //    [-3, -4]]
+    // output of shape [3,2]:
+    //   [[-3 ,-4],    <= [-3, -4] written to element coordinate [0, *]
+    //    [ 2,  3],
+    //    [-1, -2]]    <= [-1, -2] written to element coordinate [2, *]
+
+    const input3 = builder.constant(
+      {shape: [3, 2]},
+      new Float32Array([0, 1, 2, 3, 4, 5]));
+
+    const indices3 = builder.constant(
+      {dataType: 'uint32', shape: [2, 1]},
+      new Uint32Array([1, 0]));
+
+    const updates3 = builder.constant(
+      {dataType: 'uint32', shape: [2, 2]},
+      new Uint32Array([-1, -2, -3, 4]));
+
+    const output3 = builder.scatterNd(input3, indices3, updates3);
+
+    // input of shape [2,2,2]:
+    //   [[[0, 1],
+    //     [2, 3]],
+    //    [[4, 5],
+    //     [6, 7]]]
+    // indices of shape [2,2]:
+    //   [[0, 1],
+    //    [1, 0]]
+    // updates of shape [2,2]:
+    //   [[-1, -2],
+    //    [-3, -4]]
+    // output of shape [2,2,2]:
+    //   [[[ 0,  1],
+    //     [-1, -2]],   <= [-1, -2] written to element coordinate [0, 1, *]
+    //    [[-3, -4],    <= [-3, -4] written to element coordinate [1, 0, *]
+    //     [ 6,  7]]]
+
+    const input4 = builder.constant(
+      {shape: [2, 2, 2]},
+      new Float32Array([0, 1, 2, 3, 4, 5, 6, 7]));
+
+    const indices4 = builder.constant(
+      {dataType: 'uint32', shape: [2, 2]},
+      new Uint32Array([0, 1, 1, 0]));
+
+    const updates4 = builder.constant(
+      {dataType: 'uint32', shape: [2, 2]},
+      new Uint32Array([-1, -2, -3, 4]));
+
+    const output4 = builder.scatterNd(input4, indices4, updates4);
+  
+
### sigmoid ### {#api-mlgraphbuilder-sigmoid-method} @@ -7411,23 +8674,39 @@ partial dictionary MLOpSupportLimits { ### slice ### {#api-mlgraphbuilder-slice} Produce a slice of the input tensor. + +{{MLSliceOptions}} has the following members: +
+ : strides + :: + The stride to step over each input along each axis. + The length of the strides array must equal the [=MLOperand/rank=] of the input tensor. + The the default is an array of length [=MLOperand/rank=] consisting of all 1's. + e.g. [1,1,1] for a 3-D tensor. + Strides must be greater than zero. +
+
**Arguments:** - input: an {{MLOperand}}. The input tensor. - - starts: [=sequence=]<{{unsigned long}}>. The starting index to slice of each input dimension, of length N where N is the [=MLOperand/rank=] of the input tensor. For each dimension *d* of {{MLGraphBuilder/slice(input, starts, sizes, options)/input}}, {{MLGraphBuilder/slice(input, starts, sizes, options)/starts}}[*d*] indicates the starting index to slice in that dimension. The starting index must be in the range [0, input size - 1] in that dimension. - - sizes: a [=sequence=]<{{unsigned long}}>. The number of elements to slice of each input dimension, of length N where N is the [=MLOperand/rank=] of the input tensor. For each dimension *d* of {{MLGraphBuilder/slice(input, starts, sizes, options)/input}}, {{MLGraphBuilder/slice(input, starts, sizes, options)/sizes}}[*d*] indicates the number of elements to slice in that dimension. The size must not be 0 and must satisfy the constraint `starting index + size <= input size` in that dimension. - - options: an {{MLOperatorOptions}}. Specifies the optional parameters of the operation. + - starts: an [=sequence=]<{{unsigned long}}>. The starting index to slice of each input dimension, of length N where N is the [=MLOperand/rank=] of the input tensor. For each dimension *d* of {{MLGraphBuilder/slice(input, starts, sizes, options)/input}}, {{MLGraphBuilder/slice(input, starts, sizes, options)/starts}}[*d*] indicates the starting index to slice in that dimension. The starting index must be in the range [0, input size - 1] in that dimension. + - sizes: an [=sequence=]<{{unsigned long}}>. The number of elements to slice of each input dimension, of length N where N is the [=MLOperand/rank=] of the input tensor. For each dimension *d* of {{MLGraphBuilder/slice(input, starts, sizes, options)/input}}, {{MLGraphBuilder/slice(input, starts, sizes, options)/sizes}}[*d*] indicates the number of elements to slice in that dimension. The size must not be 0 and must satisfy the constraint `starting index + size <= input size` in that dimension. + - options: an {{MLSliceOptions}}. Specifies the optional parameters of the operation. **Returns:** an {{MLOperand}}. The output tensor of the same rank as the input tensor with tensor values stripped to the specified starting and ending indices in each dimension.
@@ -7467,6 +8746,8 @@ partial dictionary MLOpSupportLimits { 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |input| returns false, then [=exception/throw=] a {{TypeError}}. 1. If any of |sizes|'s [=list/items=] are 0, then [=exception/throw=] a {{TypeError}}. 1. If |starts|'s [=list/size=] and |sizes|'s [=list/size=] are not both equal to |input|'s [=MLOperand/rank=], then [=exception/throw=] a {{TypeError}}. + 1. If |options|.{{MLSliceOptions/strides}} [=map/exists=]: + 1. If |options|.{{MLSliceOptions/strides}}'s [=list/size=] is not equal to |input|'s [=MLOperand/rank=], then [=exception/throw=] a {{TypeError}}. 1. [=list/For each=] |index| in [=the range=] 0 to |input|'s [=MLOperand/rank=], exclusive: 1. If |sizes|[|index|] is 0, then [=exception/throw=] a {{TypeError}}. @@ -7474,6 +8755,8 @@ partial dictionary MLOpSupportLimits { 1. If |starts|[|index|] is greater than or equal to |input|'s [=MLOperand/shape=][|index|], then [=exception/throw=] a {{TypeError}}. 1. If |starts|[|index|] + |sizes|[|index|] is greater than |input|'s [=MLOperand/shape=][|index|], then [=exception/throw=] a {{TypeError}}. + 1. If |options|.{{MLSliceOptions/strides}} [=map/exists=]: + 1. If |options|.{{MLSliceOptions/strides}}[|index|] is less than 1, then [=exception/throw=] a {{TypeError}}. 1. *Make graph connections:* 1. Let |output| be the result of [=copying an MLOperand=] given |input|. 1. Let |operator| be an [=operator=] for the "slice" operation, given |starts|, |sizes|, and |options|. @@ -7949,6 +9232,77 @@ partial dictionary MLOpSupportLimits {
+### tile ### {#api-mlgraphbuilder-tile} +Repeat a tensor the given number of times along each dimension. + + + +
+ **Arguments:** + - input: an {{MLOperand}}. The input N-D tensor. + - repetitions: A count per dimension of how many times to repeat that dimension. The |repetitions| [=list/size=] must match the |input|'s [=MLOperand/rank=], using 1's for any axis that should retain the same size. + - options: an optional {{MLOperatorOptions}}. The optional parameters of the operation. + + **Returns:** an {{MLOperand}}. The reversed N-D tensor. +
+ + + + + + + + + + + + + + + + + + + + +
Constraints for {{MLGraphBuilder/tile()}}
operand[=/allowed data types=][=/allowed ranks=]
{{input}}[=/any data type|any=][=/any rank|N=]
*output*[=/same type as|same as=] {{input}}[=/same rank as|same as=] {{input}}
+ +{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/tile()}}: +
+ : tile + :: Support limits for operator {{MLGraphBuilder/tile()}}. +
+ +
+ + The tile(|input|, |repetitions|, |options|) method steps are: + + 1. If [=this=] [=MLGraphBuilder/can not build=], then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |input| returns false, then [=exception/throw=] a {{TypeError}}. + 1. If |repetitions|'s [=list/size=] is not equal to |input|'s [=MLOperand/rank=], then [=exception/throw=] a {{TypeError}}. + 1. If |repetitions|'s values contain 0's, then [=exception/throw=] a {{TypeError}}. + + Issue(391): If 0-size dimensions are allowed, revise these steps. + + 1. *Make graph connections:* + 1. Let |output| be the result of [=copying an MLOperand=] given |input|. + 1. Let |operator| be an [=operator=] for the "tile" operation, given |options|. + 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. + 1. Set |operator|'s [=operator/input=] to |input|. + 1. Set |operator|'s [=operator/output=] to |output|. + 1. Return |output|. +
+ ### transpose ### {#api-mlgraphbuilder-transpose} Permute the dimensions of the input tensor according to {{MLTransposeOptions/permutation}}. @@ -8308,6 +9662,8 @@ The shapes of the input tensors must be compatible. A tensor is [=unidirectional Two tensors are [=bidirectionally broadcastable=] if they can be mutually "stretched" (repeated) across their various dimensions, starting from the last dimension. For example, a *[5,1]* tensor can be bidirectionally broadcast with a *[1,6]* tensor by repeating the first tensor 6 times in the last dimension and the second tensor 5 times in preceding dimension. The result of the operation will be a *[5,6]* tensor. Bidirectional broadcasting is convenient for element-wise operations. +A tensor is [=blockwise broadcastable=] if the all dimensions can be upsampled by integer multiples to the target tensor's shape. For example, a *[4,5]* tensor can be blockwise broadcast up to a *[16,10]* tensor as it is an exact multiple (16 % 4 = 0, 10 % 5 = 0) by repeating every element 4 times in the first dimension and every element 2 times in the last dimension (e.g. values *[1,2,3,4,5]* in a single slice would be repeated to *[1,1,2,2,3,3,4,4,5,5]*). However, a *[4,5]* tensor would be incompatible with a *[9,3]* tensor since both dimensions have a nonzero remainder (9 % 4 = 1, 3 % 5 = 3). Blockwise broadcasting is useful for sharing common values in larger blocks to save memory. Both tensors are expected to have the same rank, and the output shape is simply the target tensor's shape which the smaller one is being upsampled to. + Some operations allow broadcasting with special semantics. For example, {{MLGraphBuilder/matmul()}} treats the last two dimensions of the input tensors as the rows and columns of the matrices, and the number of columns in the first matrix must be equal to the number of rows in the second matrix. The matrix multiplication is bidirectionally broadcast across any additional dimensions, treating the input tensors as stacks of matrices to multiply.
@@ -8360,6 +9716,22 @@ To bidirectionally broadcast the sha |shapeA| is bidirectionally broadcastable to |shapeB| if [=bidirectionally broadcasting=] |shapeA| and |shapeB| does not result in failure.

+
+ +To blockwise broadcast the shapes |shapeFrom| and |shapeTo|, perform the following steps. |shapeFrom| and |shapeTo| are [=/lists=] of positive integers, representing the dimensions of tensors, and the steps return a new [=/list=] of positive integers, or failure. + + +1. If |shapeFrom|'s [=list/size=] is not equal to |shapeTo|'s [=list/size=], then return failure. +1. [=list/For each=] |index| in [=the range=] 0 to |shapeTo|'s [=list/size=], exclusive: + 1. If |shapeFrom|[|index|] is not exactly divisible into |shapeTo|[|index|], then return failure. +1. Return |shapeTo|. + +
+ +

+|shapeFrom| is blockwise broadcastable to |shapeTo| if [=blockwise broadcasting=] |shapeFrom| and |shapeTo| does not result in failure. +

+ ## Casting ## {#algorithms-casting} Explicit numeric casting is used in algorithms where parameters passed as {{MLNumber}} or {{double}} need to be converted to match the {{MLOperandDataType}} of input or output {{MLOperand}}s. @@ -8569,8 +9941,8 @@ Operations present in other neural network inference APIs can often be emulated function flatten(builder, input, axis) { if (axis > input.shape.length) return input; - const before = axis.slice(0, axis).reduce((a, b) => a * b); - const after = axis.slice(axis, input.shape.length).reduce((a, b) => a * b); + const before = axis.slice(0, axis).reduce((a, b) => a * b, 1); + const after = axis.slice(axis, input.shape.length).reduce((a, b) => a * b, 1); return builder.reshape(input, [before, after]); } @@ -9127,6 +10499,12 @@ Thanks to Feng Dai for his continuous contributions that keep web-platform-tests "Thomas Scialom" ], "date": "July 2023" + }, + "Prefix-Sum": { + "href": "https://en.wikipedia.org/wiki/Prefix_sum", + "title": "Prefix Sum", + "authors": ["The Wikipedia community"], + "date": "January 2025" } }