From add81ffeedb05725e3aaf960a990bf4326a039af Mon Sep 17 00:00:00 2001 From: Ningxin Hu Date: Thu, 28 Apr 2022 17:14:30 +0800 Subject: [PATCH] Import tfjs webgpu backend build The build is based on 51bf6e2ffe764c92c8356626717f7887aa2d333e --- src/js/third_party/tfjs/tf-backend-webgpu.js | 10337 ++++++++++++++++ .../third_party/tfjs/tf-backend-webgpu.js.map | 1 + 2 files changed, 10338 insertions(+) create mode 100644 src/js/third_party/tfjs/tf-backend-webgpu.js create mode 100644 src/js/third_party/tfjs/tf-backend-webgpu.js.map diff --git a/src/js/third_party/tfjs/tf-backend-webgpu.js b/src/js/third_party/tfjs/tf-backend-webgpu.js new file mode 100644 index 000000000..c1f3679c8 --- /dev/null +++ b/src/js/third_party/tfjs/tf-backend-webgpu.js @@ -0,0 +1,10337 @@ +/** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ +(function (global, factory) { + typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('@tensorflow/tfjs-core')) : + typeof define === 'function' && define.amd ? define(['exports', '@tensorflow/tfjs-core'], factory) : + (global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global.tf = global.tf || {}, global.tf)); +}(this, (function (exports, tf) { 'use strict'; + + function _interopNamespace(e) { + if (e && e.__esModule) return e; + var n = Object.create(null); + if (e) { + Object.keys(e).forEach(function (k) { + if (k !== 'default') { + var d = Object.getOwnPropertyDescriptor(e, k); + Object.defineProperty(n, k, d.get ? d : { + enumerable: true, + get: function () { + return e[k]; + } + }); + } + }); + } + n['default'] = e; + return n; + } + + var tf__namespace = /*#__PURE__*/_interopNamespace(tf); + + /*! ***************************************************************************** + Copyright (c) Microsoft Corporation. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH + REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, + INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR + OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + PERFORMANCE OF THIS SOFTWARE. + ***************************************************************************** */ + /* global Reflect, Promise */ + var extendStatics = function (d, b) { + extendStatics = Object.setPrototypeOf || + ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) || + function (d, b) { for (var p in b) + if (b.hasOwnProperty(p)) + d[p] = b[p]; }; + return extendStatics(d, b); + }; + function __extends(d, b) { + extendStatics(d, b); + function __() { this.constructor = d; } + d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __()); + } + function __awaiter(thisArg, _arguments, P, generator) { + function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } + return new (P || (P = Promise))(function (resolve, reject) { + function fulfilled(value) { try { + step(generator.next(value)); + } + catch (e) { + reject(e); + } } + function rejected(value) { try { + step(generator["throw"](value)); + } + catch (e) { + reject(e); + } } + function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } + step((generator = generator.apply(thisArg, _arguments || [])).next()); + }); + } + function __generator(thisArg, body) { + var _ = { label: 0, sent: function () { if (t[0] & 1) + throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g; + return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function () { return this; }), g; + function verb(n) { return function (v) { return step([n, v]); }; } + function step(op) { + if (f) + throw new TypeError("Generator is already executing."); + while (_) + try { + if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) + return t; + if (y = 0, t) + op = [op[0] & 2, t.value]; + switch (op[0]) { + case 0: + case 1: + t = op; + break; + case 4: + _.label++; + return { value: op[1], done: false }; + case 5: + _.label++; + y = op[1]; + op = [0]; + continue; + case 7: + op = _.ops.pop(); + _.trys.pop(); + continue; + default: + if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { + _ = 0; + continue; + } + if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { + _.label = op[1]; + break; + } + if (op[0] === 6 && _.label < t[1]) { + _.label = t[1]; + t = op; + break; + } + if (t && _.label < t[2]) { + _.label = t[2]; + _.ops.push(op); + break; + } + if (t[2]) + _.ops.pop(); + _.trys.pop(); + continue; + } + op = body.call(thisArg, _); + } + catch (e) { + op = [6, e]; + y = 0; + } + finally { + f = t = 0; + } + if (op[0] & 5) + throw op[1]; + return { value: op[0] ? op[1] : void 0, done: true }; + } + } + function __values(o) { + var s = typeof Symbol === "function" && Symbol.iterator, m = s && o[s], i = 0; + if (m) + return m.call(o); + if (o && typeof o.length === "number") + return { + next: function () { + if (o && i >= o.length) + o = void 0; + return { value: o && o[i++], done: !o }; + } + }; + throw new TypeError(s ? "Object is not iterable." : "Symbol.iterator is not defined."); + } + function __read(o, n) { + var m = typeof Symbol === "function" && o[Symbol.iterator]; + if (!m) + return o; + var i = m.call(o), r, ar = [], e; + try { + while ((n === void 0 || n-- > 0) && !(r = i.next()).done) + ar.push(r.value); + } + catch (error) { + e = { error: error }; + } + finally { + try { + if (r && !r.done && (m = i["return"])) + m.call(i); + } + finally { + if (e) + throw e.error; + } + } + return ar; + } + function __spread() { + for (var ar = [], i = 0; i < arguments.length; i++) + ar = ar.concat(__read(arguments[i])); + return ar; + } + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var ENV = tf.env(); + /** The batched dispatching calls size in the device queue. */ + ENV.registerFlag('WEBGPU_DEFERRED_SUBMIT_BATCH_SIZE', function () { return 15; }); + /** + * Whether we forward execution to the CPU backend if tensors are small and + * reside on the CPU. + */ + ENV.registerFlag('WEBGPU_CPU_FORWARD', function () { return true; }); + /** + * Thread register block size for matmul kernel. + */ + ENV.registerFlag('WEBGPU_MATMUL_WORK_PER_THREAD', function () { return 4; }); + /** + * Whether to use conv2d_naive which directly implement the conv2d logic rather + * than using a matmul to simulate. + * Note that NCHW is not supported. + */ + ENV.registerFlag('WEBGPU_USE_NAIVE_CONV2D', function () { return false; }); + /** + * Whether to use conv2dTranspose_naive which directly implement the + * conv2dTranspose logic rather than using a matmul to simulate. + */ + ENV.registerFlag('WEBGPU_USE_NAIVE_CONV2D_TRANSPOSE', function () { return false; }); + /** + * Whether we will run im2col as a separate shader for convolution. + * Note that NCHW is not supported. + */ + ENV.registerFlag('WEBGPU_CONV_SEPARATE_IM2COL_SHADER', function () { return false; }); + /** + * Whether we use low power GPU. Otherwise, a high performance GPU will be + * requested. + */ + ENV.registerFlag('WEBGPU_USE_LOW_POWER_GPU', function () { return false; }); + /** + * Threshold for input tensor size that determines whether WebGPU backend will + * delegate computation to CPU. + * + * Default value is 1000. + */ + ENV.registerFlag('WEBGPU_CPU_HANDOFF_SIZE_THRESHOLD', function () { return 1000; }); + /** + * Whether to use a dummy canvas to make profiling tools like PIX work with + * TFJS webgpu backend. + */ + ENV.registerFlag('WEBGPU_USE_PROFILE_TOOL', function () { return false; }); + /** + * Whether to use import API. + */ + ENV.registerFlag('WEBGPU_USE_IMPORT', function () { return false; }); + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var BinaryOpType; + (function (BinaryOpType) { + BinaryOpType[BinaryOpType["MUL"] = 0] = "MUL"; + BinaryOpType[BinaryOpType["ADD"] = 1] = "ADD"; + BinaryOpType[BinaryOpType["SUB"] = 2] = "SUB"; + BinaryOpType[BinaryOpType["DIV"] = 3] = "DIV"; + BinaryOpType[BinaryOpType["EQUAL"] = 4] = "EQUAL"; + BinaryOpType[BinaryOpType["GREATER"] = 5] = "GREATER"; + BinaryOpType[BinaryOpType["GREATER_EQUAL"] = 6] = "GREATER_EQUAL"; + BinaryOpType[BinaryOpType["LESS"] = 7] = "LESS"; + BinaryOpType[BinaryOpType["LESS_EQUAL"] = 8] = "LESS_EQUAL"; + BinaryOpType[BinaryOpType["LOGICAL_AND"] = 9] = "LOGICAL_AND"; + BinaryOpType[BinaryOpType["NOT_EQUAL"] = 10] = "NOT_EQUAL"; + BinaryOpType[BinaryOpType["SQUARED_DIFFERENCE"] = 11] = "SQUARED_DIFFERENCE"; + BinaryOpType[BinaryOpType["INT_DIV"] = 12] = "INT_DIV"; + BinaryOpType[BinaryOpType["POW"] = 13] = "POW"; + BinaryOpType[BinaryOpType["PRELU"] = 14] = "PRELU"; + BinaryOpType[BinaryOpType["MAX"] = 15] = "MAX"; + BinaryOpType[BinaryOpType["MIN"] = 16] = "MIN"; + BinaryOpType[BinaryOpType["COMPLEX_MULTIPLY_REAL"] = 17] = "COMPLEX_MULTIPLY_REAL"; + BinaryOpType[BinaryOpType["COMPLEX_MULTIPLY_IMAG"] = 18] = "COMPLEX_MULTIPLY_IMAG"; + })(BinaryOpType || (BinaryOpType = {})); + var ADD = 'return a + b;'; + // (Ar + Ai)(Br + Bi) = + // ArBr + ArBi + AiBr + AiBi = ArBr - AB + ArBi + AiBr + // Yr = ArBr - AB + // Yi = ArBi + AiBr + var COMPLEX_MULTIPLY_REAL = 'return areal * breal - aimag * bimag;'; + var COMPLEX_MULTIPLY_IMAG = 'return areal * bimag + aimag * breal;'; + var DIV = 'return a / b;'; + var MUL = 'return a * b;'; + var SQUARED_DIFFERENCE = 'return (a - b) * (a - b);'; + var SUB = 'return a - b;'; + var EQUAL = 'return f32(a == b);'; + var EQUAL_VEC4 = 'return vec4(a == b);'; + var GREATER = 'return f32(a > b);'; + var GREATER_VEC4 = 'return vec4(a > b);'; + var GREATER_EQUAL = 'return f32(a >= b);'; + var GREATER_EQUAL_VEC4 = 'return vec4(a >= b);'; + var LESS = 'return f32(a < b);'; + var LESS_VEC4 = 'return vec4(a < b);'; + var LESS_EQUAL = 'return f32(a <= b);'; + var LESS_EQUAL_VEC4 = 'return vec4(a <= b);'; + var LOGICAL_AND = 'return f32(f32(a) >= 1.0 && f32(b) >= 1.0);'; + var LOGICAL_AND_VEC4 = "return (vec4(a >= vec4(1.0)) *\n vec4(b >= vec4(1.0)));"; + var CHECK_NAN_SNIPPET = "\n if (isnan(a)) { return a; }\n if (isnan(b)) { return b; }\n "; + var CHECK_NAN_SNIPPET_VEC4 = "\n if (isNaN.r) {\n resultTemp.r = uniforms.NAN;\n }\n if (isNaN.g) {\n resultTemp.g = uniforms.NAN;\n }\n if (isNaN.b) {\n resultTemp.b = uniforms.NAN;\n }\n if (isNaN.a) {\n resultTemp.a = uniforms.NAN;\n }\n "; + var INT_DIV = "\n let s = sign(a) * sign(b);\n let ia = i32(round(a));\n let ib = i32(round(b));\n return f32(idiv(ia, ib, s));\n "; + var INT_DIV_VEC4 = "\n let ia = vec4(round(a));\n let ib = vec4(round(b));\n let cond = ib != vec4(0);\n var resultTemp = vec4(0);\n let s = sign(a) * sign(b);\n\n // Windows (D3D) wants guaranteed non-zero int division at compile-time.\n if (cond[0]) {\n resultTemp[0] = idiv(ia[0], ib[0], s[0]);\n }\n if (cond[1]) {\n resultTemp[1] = idiv(ia[1], ib[1], s[1]);\n }\n if (cond[2]) {\n resultTemp[2] = idiv(ia[2], ib[2], s[2]);\n }\n if (cond[3]) {\n resultTemp[3] = idiv(ia[3], ib[3], s[3]);\n }\n return vec4(resultTemp);\n "; + var NOT_EQUAL = 'return f32(a != b);'; + var NOT_EQUAL_VEC4 = 'return vec4(a != b);'; + var POW = "\n if(a < 0.0 && floor(b) < b) {\n return uniforms.NAN;\n }\n if (b == 0.0) {\n return 1.0;\n }\n if (round(abs(b) % 2.0) != 1.0) {\n return pow(abs(a), b);\n }\n return sign(a) * pow(abs(a), b);\n "; + var POW_VEC4 = "\n let isModRound1Bool = vec4(round(abs(b) % vec4(2.0))) == vec4(1);\n let isModRound1 = vec4(isModRound1Bool);\n let multiplier = sign(a) * isModRound1 + (vec4(1.0) - isModRound1);\n var resultTemp = multiplier * pow(abs(a), b);\n\n // Ensure that a^0 = 1, including 0^0 = 1 as this correspond to TF and JS\n let isExpZero = b == vec4(0.0);\n if (isExpZero.r) {\n resultTemp.r = 1.0;\n }\n if (isExpZero.g) {\n resultTemp.g = 1.0;\n }\n if (isExpZero.b) {\n resultTemp.b = 1.0;\n }\n if (isExpZero.a) {\n resultTemp.a = 1.0;\n }\n let isNaN = a < vec4(0.0) & floor(b) < b;\n " + CHECK_NAN_SNIPPET_VEC4 + "\n return resultTemp;\n "; + var PRELU = "if (a < 0.0) { return b * a; } return a;"; + var PRELU_VEC4 = "\n let aLessThanZero = vec4(a < vec4(0.0));\n return (aLessThanZero * (b * a)) + ((vec4(1.0) - aLessThanZero) * a);\n "; + function getMinMaxString(op, useVec4) { + var checkNanSnippet = useVec4 ? CHECK_NAN_SNIPPET_VEC4 : CHECK_NAN_SNIPPET; + return useVec4 ? "\n var resultTemp = vec4(" + op + "(a, b));\n let isNaN = isnanVec4(a) | isnanVec4(b);\n " + checkNanSnippet + + "\n return resultTemp;\n " : + checkNanSnippet + ("\n return " + op + "(a, b);\n "); + } + function getBinaryOpString(type, useVec4) { + switch (type) { + case BinaryOpType.MUL: + return MUL; + case BinaryOpType.ADD: + return ADD; + case BinaryOpType.SUB: + return SUB; + case BinaryOpType.DIV: + return DIV; + case BinaryOpType.EQUAL: + return useVec4 ? EQUAL_VEC4 : EQUAL; + case BinaryOpType.GREATER: + return useVec4 ? GREATER_VEC4 : GREATER; + case BinaryOpType.GREATER_EQUAL: + return useVec4 ? GREATER_EQUAL_VEC4 : GREATER_EQUAL; + case BinaryOpType.LESS: + return useVec4 ? LESS_VEC4 : LESS; + case BinaryOpType.LESS_EQUAL: + return useVec4 ? LESS_EQUAL_VEC4 : LESS_EQUAL; + case BinaryOpType.LOGICAL_AND: + return useVec4 ? LOGICAL_AND_VEC4 : LOGICAL_AND; + case BinaryOpType.NOT_EQUAL: + return useVec4 ? NOT_EQUAL_VEC4 : NOT_EQUAL; + case BinaryOpType.SQUARED_DIFFERENCE: + return SQUARED_DIFFERENCE; + case BinaryOpType.INT_DIV: + return useVec4 ? INT_DIV_VEC4 : INT_DIV; + case BinaryOpType.PRELU: + return useVec4 ? PRELU_VEC4 : PRELU; + case BinaryOpType.MAX: + return getMinMaxString('max', useVec4); + case BinaryOpType.MIN: + return getMinMaxString('min', useVec4); + case BinaryOpType.POW: + return useVec4 ? POW_VEC4 : POW; + case BinaryOpType.COMPLEX_MULTIPLY_REAL: + return COMPLEX_MULTIPLY_REAL; + case BinaryOpType.COMPLEX_MULTIPLY_IMAG: + return COMPLEX_MULTIPLY_IMAG; + default: + throw new Error("BinaryType " + type + " is not implemented!"); + } + } + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var UnaryOpType; + (function (UnaryOpType) { + UnaryOpType[UnaryOpType["ABS"] = 0] = "ABS"; + UnaryOpType[UnaryOpType["CEIL"] = 1] = "CEIL"; + UnaryOpType[UnaryOpType["COS"] = 2] = "COS"; + UnaryOpType[UnaryOpType["COSH"] = 3] = "COSH"; + UnaryOpType[UnaryOpType["ELU"] = 4] = "ELU"; + UnaryOpType[UnaryOpType["EXP"] = 5] = "EXP"; + UnaryOpType[UnaryOpType["EXPM1"] = 6] = "EXPM1"; + UnaryOpType[UnaryOpType["FLOOR"] = 7] = "FLOOR"; + UnaryOpType[UnaryOpType["LINEAR"] = 8] = "LINEAR"; + UnaryOpType[UnaryOpType["LOG"] = 9] = "LOG"; + UnaryOpType[UnaryOpType["LOGICAL_NOT"] = 10] = "LOGICAL_NOT"; + UnaryOpType[UnaryOpType["NEG"] = 11] = "NEG"; + UnaryOpType[UnaryOpType["RELU"] = 12] = "RELU"; + UnaryOpType[UnaryOpType["RELU6"] = 13] = "RELU6"; + UnaryOpType[UnaryOpType["LEAKYRELU"] = 14] = "LEAKYRELU"; + UnaryOpType[UnaryOpType["RSQRT"] = 15] = "RSQRT"; + UnaryOpType[UnaryOpType["SIN"] = 16] = "SIN"; + UnaryOpType[UnaryOpType["SINH"] = 17] = "SINH"; + UnaryOpType[UnaryOpType["SIGMOID"] = 18] = "SIGMOID"; + UnaryOpType[UnaryOpType["SQRT"] = 19] = "SQRT"; + UnaryOpType[UnaryOpType["SQUARE"] = 20] = "SQUARE"; + UnaryOpType[UnaryOpType["TANH"] = 21] = "TANH"; + UnaryOpType[UnaryOpType["TO_INT"] = 22] = "TO_INT"; + })(UnaryOpType || (UnaryOpType = {})); + var ABS = "return abs(a);"; + var CEIL = "return ceil(a);"; + var COS = "return cos(a);"; + var COSH = "\n let e2x = exp(-a);\n return (e2x + 1.0 / e2x) / 2.0;\n"; + var EXPM1 = "return exp(a) - 1.0;"; + var ELU = "if (a >= 0.0) { return a; } return (exp(a) - 1.0);"; + var ELU_VEC4 = "\n var resFloat = exp(a) - vec4(1.0);\n if (a.r >= 0.0) {\n resFloat.r = a.r;\n }\n if (a.g >= 0.0) {\n resFloat.g = a.g;\n }\n if (a.b >= 0.0) {\n resFloat.b = a.b;\n }\n if (a.a >= 0.0) {\n resFloat.a = a.a;\n }\n return resFloat;\n"; + var EXP = "return exp(a);"; + var FLOOR = "return floor(a);"; + var LINEAR = "return a;"; + var LOG = "if (a < 0.0) { return 1.0/0.0; }\n return log(a);"; + var LOGICAL_NOT = "return f32(!(a >= 1.0));"; + var NEG = "return -a;"; + var LEAKYRELU = "if (a < 0.0) { return uniforms.alpha * a; } return a;"; + var LEAKYRELU_VEC4 = "\n let aLessThanZero = vec4(a < vec4(0.0));\n return (aLessThanZero * (uniforms.alpha * a)) + ((vec4(1.0) - aLessThanZero) * a);\n"; + var RELU = "if(a < 0.0) { return 0.0; } return a;"; + var RELU6 = 'return clamp(a, 0.0, 6.0);'; + var RELU6_VEC4 = 'return clamp(a, vec4(0.0, 0.0, 0.0, 0.0), vec4(6.0, 6.0, 6.0, 6.0));'; + var RELU_VEC4 = "\n var resFloat = a * vec4(a >= vec4(0.0));\n let isNaN = isnanVec4(a);\n\n if (isNaN.r) {\n resFloat.r = a.r;\n }\n if (isNaN.g) {\n resFloat.g = a.g;\n }\n if (isNaN.b) {\n resFloat.b = a.b;\n }\n if (isNaN.a) {\n resFloat.a = a.a;\n }\n return resFloat;\n"; + var RSQRT = "return 1.0/sqrt(a);"; + var SIGMOID = "return 1.0 / (1.0 + exp(-1.0 * a));"; + var SIN = "return sin(a);"; + var SINH = "\n let e2x = exp(a);\n return (e2x - 1.0 / e2x) / 2.0;\n"; + var SQRT = "return sqrt(a);"; + var SQUARE = "return a * a;"; + var TANH = "\n let e2x = exp(-2.0 * abs(a));\n return sign(a) * (1.0 - e2x) / (1.0 + e2x);\n"; + var TO_INT = "return f32(i32((a)));"; + function getUnaryOpString(type, useVec4) { + switch (type) { + case UnaryOpType.ABS: + return ABS; + case UnaryOpType.COS: + return COS; + case UnaryOpType.COSH: + return COSH; + case UnaryOpType.CEIL: + return CEIL; + case UnaryOpType.ELU: + return useVec4 ? ELU_VEC4 : ELU; + case UnaryOpType.EXP: + return EXP; + case UnaryOpType.EXPM1: + return EXPM1; + case UnaryOpType.FLOOR: + return FLOOR; + case UnaryOpType.LINEAR: + return LINEAR; + case UnaryOpType.LOG: + return LOG; + case UnaryOpType.LOGICAL_NOT: + return LOGICAL_NOT; + case UnaryOpType.NEG: + return NEG; + case UnaryOpType.LEAKYRELU: + return useVec4 ? LEAKYRELU_VEC4 : LEAKYRELU; + case UnaryOpType.RELU: + return useVec4 ? RELU_VEC4 : RELU; + case UnaryOpType.RELU6: + return useVec4 ? RELU6_VEC4 : RELU6; + case UnaryOpType.RSQRT: + return RSQRT; + case UnaryOpType.SIGMOID: + return SIGMOID; + case UnaryOpType.SIN: + return SIN; + case UnaryOpType.SINH: + return SINH; + case UnaryOpType.SQRT: + return SQRT; + case UnaryOpType.SQUARE: + return SQUARE; + case UnaryOpType.TANH: + return TANH; + case UnaryOpType.TO_INT: + return TO_INT; + default: + throw new Error("BinaryType " + type + " is not implemented!"); + } + } + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function mapActivationToShaderProgram(activation, packed) { + if (packed === void 0) { packed = false; } + if (activation === null) { + return null; + } + else if (activation === 'linear') { + return getUnaryOpString(UnaryOpType.LINEAR); + } + else if (activation === 'relu') { + return getUnaryOpString(UnaryOpType.RELU, packed); + } + else if (activation === 'elu') { + return getUnaryOpString(UnaryOpType.ELU, packed); + } + else if (activation === 'relu6') { + return getUnaryOpString(UnaryOpType.RELU6, packed); + } + else if (activation === 'prelu') { + return getBinaryOpString(BinaryOpType.PRELU, packed); + } + else if (activation === 'sigmoid') { + return getUnaryOpString(UnaryOpType.SIGMOID, packed); + } + else if (activation === 'leakyrelu') { + return getUnaryOpString(UnaryOpType.LEAKYRELU, packed); + } + throw new Error("Activation " + activation + " has not been implemented for the WebGPU backend."); + } + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + // Generates GLSL that computes strides. + function symbolicallyComputeStrides(indicesArr, variableName) { + if (Math.max.apply(Math, __spread(indicesArr)) > 3) { + throw new Error('Cannot symbolically compute strides for rank > 4 tensor.'); + } + var numCoords = indicesArr.length; + var shape = indicesArr.map(function (d) { return variableName + "[" + d + "]"; }); + var strides = new Array(numCoords - 1); + strides[numCoords - 2] = shape[numCoords - 1]; + for (var i = numCoords - 3; i >= 0; --i) { + strides[i] = "(" + strides[i + 1] + " * " + shape[i + 1] + ")"; + } + return strides; + } + + function getCoordsDataType(rank) { + if (rank <= 1) { + return 'i32'; + } + else if (rank === 2) { + return "vec2"; + } + else if (rank === 3) { + return "vec3"; + } + else if (rank === 4) { + return "vec4"; + } + else if (rank === 5) { + return "vec5"; + } + else if (rank === 6) { + return "vec6"; + } + else { + throw Error("GPU for rank " + rank + " is not yet supported"); + } + } + function getCoordsXYZ(index) { + if (index === 0) { + return 'x'; + } + else if (index === 1) { + return 'y'; + } + else if (index === 2) { + return 'z'; + } + else if (index === 3) { + return 'w'; + } + else if (index === 4) { + return 'u'; + } + else if (index === 5) { + return 'v'; + } + else { + throw Error("Index " + index + " is not yet supported"); + } + } + function mapToWgslTypes(type, isVec4) { + if (type === 'float32') { + return isVec4 ? 'vec4' : 'f32'; + } + else if (type === 'int32') { + return isVec4 ? 'vec4' : 'i32'; + } + else if (type === 'bool') { + // Type 'bool' cannot be used in storage class, + // https://www.w3.org/TR/WGSL/#host-shareable-types. + return isVec4 ? 'vec4' : 'i32'; + } + return type; + } + function getWorkGroupSizeString() { + return "\n @stage(compute) @workgroup_size(workGroupSizeX, workGroupSizeY, workGroupSizeZ)\n"; + } + function getMainHeaderString() { + return "\n " + getWorkGroupSizeString() + "\n fn main(@builtin(local_invocation_id) LocalId : vec3,\n @builtin(global_invocation_id) GlobalId : vec3,\n @builtin(num_workgroups) NumWorkgroups: vec3) {\n localId = LocalId;\n globalId = GlobalId;\n numWorkgroups = NumWorkgroups;\n"; + } + function getMainHeaderAndGlobalIndexString() { + return "\n " + getMainHeaderString() + "\n let index = getGlobalIndex();\n"; + } + function makeShader(inputInfo, outputData, program, isFromPixel) { + if (isFromPixel === void 0) { isFromPixel = false; } + var prefixSnippets = []; + prefixSnippets.push("\n let workGroupSizeX = " + program.workGroupSize[0] + "u;\n let workGroupSizeY = " + program.workGroupSize[1] + "u;\n let workGroupSizeZ = " + program.workGroupSize[2] + "u;\n\n var localId: vec3;\n var globalId: vec3;\n var numWorkgroups: vec3;\n\n // Only used when the y/z dimension of workgroup size is 1.\n fn getGlobalIndex() -> i32 {\n if (numWorkgroups.y == 1u && numWorkgroups.z == 1u) {\n return i32(globalId.x);\n }\n\n let localInvocationIndex = localId.z * workGroupSizeX * workGroupSizeY +\n localId.y * workGroupSizeX + localId.x;\n let workGroupID = (globalId - localId)/vec3(\n workGroupSizeX, workGroupSizeY, workGroupSizeZ);\n\n return i32((workGroupID.z * numWorkgroups.x * numWorkgroups.y +\n workGroupID.y * numWorkgroups.x + workGroupID.x) *\n (workGroupSizeX * workGroupSizeY * workGroupSizeZ) +\n localInvocationIndex);\n }\n "); + if (isFromPixel === true) { + prefixSnippets.push("\n struct Uniform {\n size : i32,\n numChannels : i32,\n outShapeStrides : vec2,\n dispatchSize : vec3,\n };\n\n @group(0) @binding(0) var result: array<" + mapToWgslTypes(outputData.dtype, program.isVec4) + ">;\n @group(0) @binding(2) var uniforms: Uniform;\n "); + return [ + commonSnippet, + prefixSnippets.join('\n'), + getCoordsFromIndexSnippet(outputData.shape), + program.getUserCode(), + ].join('\n'); + } + var preMemberIsStruct = false; + var currentMemberIsStruct = false; + var uniformDeclaration = 'struct Uniforms { NAN : f32, '; + program.variableNames.forEach(function (x, i) { + var perDataType = getCoordsDataType(inputInfo[i].shape.length); + if (perDataType === 'vec5' || perDataType === 'vec6') { + currentMemberIsStruct = true; + } + if (preMemberIsStruct || currentMemberIsStruct) { + uniformDeclaration += "@align(16) "; + } + preMemberIsStruct = currentMemberIsStruct; + uniformDeclaration += + x.charAt(0).toLowerCase() + x.slice(1) + "Shape : " + perDataType + ", "; + }); + var outputDataType = getCoordsDataType(outputData.shape.length); + currentMemberIsStruct = + outputDataType === 'vec5' || outputDataType === 'vec6'; + if (preMemberIsStruct || currentMemberIsStruct) { + uniformDeclaration += "@align(16) "; + } + preMemberIsStruct = currentMemberIsStruct; + uniformDeclaration += "outShape : " + outputDataType + ", "; + var stridesLength = outputData.shape.length - 1; + var stridesDataType = getCoordsDataType(stridesLength); + currentMemberIsStruct = + stridesDataType === 'vec5' || stridesDataType === 'vec6'; + if (preMemberIsStruct || currentMemberIsStruct) { + uniformDeclaration += "@align(16) "; + } + preMemberIsStruct = currentMemberIsStruct; + uniformDeclaration += "\n outShapeStrides: " + stridesDataType + ", "; + if (program.size) { + if (preMemberIsStruct) { + uniformDeclaration += "@align(16) "; + } + preMemberIsStruct = false; + uniformDeclaration += 'size : i32, '; + } + if (program.uniforms) { + if (preMemberIsStruct) { + uniformDeclaration += "@align(16) "; + } + uniformDeclaration += program.uniforms; + } + uniformDeclaration += '};'; + prefixSnippets.push(uniformDeclaration); + // Output buffer. + if (program.atomic) { + prefixSnippets.push("\n @group(0) @binding(0) var result: array>;\n "); + } + else { + prefixSnippets.push("\n @group(0) @binding(0) var result: array<" + mapToWgslTypes(outputData.dtype, program.isVec4) + ">;\n "); + } + program.variableNames.forEach(function (x, i) { + prefixSnippets.push("\n @group(0) @binding(" + (1 + i) + ") var " + x + ": array<" + mapToWgslTypes(inputInfo[i].dtype, program.isVec4) + ">;\n "); + }); + if (uniformDeclaration !== '') { + prefixSnippets.push("\n @group(0) @binding(" + (1 + program.variableNames.length) + ") var uniforms: Uniforms;\n "); + } + var _a = __read(getOutputCoordsSnippet(outputData.shape, program.dispatchLayout), 2), coordsSnippet = _a[0], dispatchLayoutRank = _a[1]; + var sources = [ + commonSnippet, prefixSnippets.join('\n'), + getCoordsFromIndexSnippet(outputData.shape), coordsSnippet, + getOutputIndexFromCoordsSnippet(outputData.shape.length) + ]; + if (!program.atomic) { + sources.push(setOutputSnippet(outputData.shape, outputData.dtype, program.isVec4)); + } + if (dispatchLayoutRank === outputData.shape.length) { + // Input snippet is only meaningful when the output isn't getting + // implicitly reshaped (like it does in conv2d_matmul). + var inputSnippet = inputInfo + .map(function (x) { return getInputSnippet(x, outputData.shape, program.isVec4, program.dispatchLayout.x.length === + outputData.shape.length); }) + .join('\n'); + sources.push(inputSnippet); + } + sources.push(program.getUserCode()); + var source = sources.join('\n'); + return source; + } + var commonSnippet = "\n struct vec5 {x: i32, y: i32, z: i32, w: i32, u: i32};\n struct vec6 {x: i32, y: i32, z: i32, w: i32, u: i32, v: i32};\n\n // Checks whether coordinates lie within the bounds of the shape.\n fn coordsInBounds2D(coord : vec2, shape : vec2) -> bool {\n return all(coord >= vec2(0)) && all(coord < shape);\n }\n fn coordsInBounds3D(coord : vec3, shape : vec3) -> bool {\n return all(coord >= vec3(0)) && all(coord < shape);\n }\n fn coordsInBounds4D(coord : vec4, shape : vec4) -> bool {\n return all(coord >= vec4(0)) && all(coord < shape);\n }\n\n fn getIndexFromCoords1D(coord : i32, shape : i32) -> i32 {\n return coord;\n }\n fn getIndexFromCoords2D(coords : vec2, shape : vec2) -> i32 {\n return dot(coords, vec2(shape.y, 1));\n }\n fn getIndexFromCoords3D(coords : vec3, shape : vec3) -> i32 {\n return dot(coords, vec3(shape.y * shape.z, shape.z, 1));\n }\n fn getIndexFromCoords4D(coords : vec4, shape : vec4) -> i32 {\n return dot(coords, vec4(\n shape.y * shape.z * shape.w, shape.z * shape.w, shape.w, 1));\n }\n fn getIndexFromCoords5D(coords : vec5, shape : vec5) -> i32 {\n let shapeStrides: vec5 = vec5(shape.y * shape.z * shape.w * shape.u, shape.z * shape.w * shape.u, shape.w * shape.u, shape.u, 1);\n return coords.x*shapeStrides.x + coords.y*shapeStrides.y + coords.z*shapeStrides.z + coords.w*shapeStrides.w + coords.u*shapeStrides.u;\n }\n fn getIndexFromCoords6D(coords : vec6, shape : vec6) -> i32 {\n let shapeStrides: vec6 = vec6(shape.y * shape.z * shape.w * shape.u * shape.v, shape.z * shape.w * shape.u * shape.v, shape.w * shape.u * shape.v, shape.u * shape.v, shape.v, 1);\n return coords.x*shapeStrides.x + coords.y*shapeStrides.y + coords.z*shapeStrides.z + coords.w*shapeStrides.w + coords.u*shapeStrides.u + coords.v*shapeStrides.v;\n }\n\n fn idiv(a: i32, b: i32, sign: f32) -> i32 {\n var res: i32 = a / b;\n let mod: i32 = a % b;\n if (sign < 0. && mod != 0) {\n res = res - 1;\n }\n return res;\n }\n\n // NaN defination in IEEE 754-1985 is :\n // - sign = either 0 or 1.\n // - biased exponent = all 1 bits.\n // - fraction = anything except all 0 bits (since all 0 bits represents infinity).\n // https://en.wikipedia.org/wiki/IEEE_754-1985#Representation_of_non-numbers\n fn isnan(val: f32) -> bool {\n let floatToUint: u32 = bitcast(val);\n return (floatToUint & 0x7fffffffu) > 0x7f800000u;\n }\n fn isnanVec4(val : vec4) -> vec4 {\n return vec4(isnan(val[0]), isnan(val[1]), isnan(val[2]), isnan(val[3]));\n }\n"; + function getOutputIndexFromCoordsSnippet(outRank) { + var snippet = ''; + switch (outRank) { + case 0: + case 1: + snippet += "\n fn getOutputIndexFromCoords(coords : i32) -> i32 {\n return coords;\n }\n "; + break; + case 2: + snippet += "\n fn getOutputIndexFromCoords(coords : vec2) -> i32 {\n return dot(coords, vec2(uniforms.outShapeStrides, 1));\n }\n "; + break; + case 3: + snippet += "\n fn getOutputIndexFromCoords(coords : vec3) -> i32 {\n return dot(coords, vec3(uniforms.outShapeStrides.x, uniforms.outShapeStrides.y, 1));\n }\n "; + break; + case 4: + snippet += "\n fn getOutputIndexFromCoords(coords : vec4) -> i32 {\n return dot(coords, vec4(\n uniforms.outShapeStrides.x, uniforms.outShapeStrides.y, uniforms.outShapeStrides.z, 1));\n }\n "; + break; + case 5: + snippet += "\n fn getOutputIndexFromCoords(coords : vec5) -> i32 {\n return coords.x * uniforms.outShapeStrides.x +\n coords.y * uniforms.outShapeStrides.y +\n coords.z * uniforms.outShapeStrides.z +\n coords.w * uniforms.outShapeStrides.w +\n coords.u;\n }\n "; + break; + case 6: + snippet += "\n fn getOutputIndexFromCoords(coords : vec6) -> i32 {\n return coords.x * uniforms.outShapeStrides.x +\n coords.y * uniforms.outShapeStrides.y +\n coords.z * uniforms.outShapeStrides.z +\n coords.w * uniforms.outShapeStrides.w +\n coords.u * uniforms.outShapeStrides.u +\n coords.v;\n }\n "; + break; + default: + tf.util.assert(false, function () { return "Unsupported " + outRank + "D shape"; }); + break; + } + return snippet; + } + function setOutputSnippet(outShape, outBufferType, isVec4) { + var outRank = outShape.length; + var wgslType = mapToWgslTypes(outBufferType, isVec4); + var snippet; + if (isVec4) { + snippet = "fn setOutputAtIndex(flatIndex : i32, value : vec4) {\n result[flatIndex] = " + wgslType + "(value);\n }\n fn setOutputAtIndexI32(flatIndex : i32, value : vec4) {\n result[flatIndex] = " + wgslType + "(value);\n }"; + } + else { + snippet = "fn setOutputAtIndex(flatIndex : i32, value : f32) {\n result[flatIndex] = " + wgslType + "(value);\n }\n fn setOutputAtIndexI32(flatIndex : i32, value : i32) {\n result[flatIndex] = " + wgslType + "(value);\n }"; + } + if (outRank >= 2) { + var dims = ['d0', 'd1', 'd2', 'd3', 'd4', 'd5'].slice(0, outRank); + var type = getCoordsDataType(outRank); + if (isVec4) { + snippet += "\n fn setOutputAtCoords(" + dims.map(function (d) { return d + " : i32"; }).join(', ') + ", value : vec4) {\n let flatIndex = getOutputIndexFromCoords(" + type + "(" + dims.join(', ') + "));\n setOutputAtIndex(flatIndex / 4, value);\n }\n fn setOutputAtCoordsI32(" + dims.map(function (d) { return d + " : i32"; }).join(', ') + ", value : vec4) {\n let flatIndex = getOutputIndexFromCoords(" + type + "(" + dims.join(', ') + "));\n setOutputAtIndexI32(flatIndex / 4, value);\n }\n "; + } + else { + snippet += "\n fn setOutputAtCoords(" + dims.map(function (d) { return d + " : i32"; }).join(', ') + ", value : f32) {\n let flatIndex = getOutputIndexFromCoords(" + type + "(" + dims.join(', ') + "));\n setOutputAtIndex(flatIndex, value);\n }\n fn setOutputAtCoordsI32(" + dims.map(function (d) { return d + " : i32"; }).join(', ') + ", value : i32) {\n let flatIndex = getOutputIndexFromCoords(" + type + "(" + dims.join(', ') + "));\n setOutputAtIndexI32(flatIndex, value);\n }\n "; + } + } + return snippet; + } + function getInputSnippet(inputInfo, outShape, isVec4, isFlatDispatchLayout) { + var res = getInputAtCoordsSnippet(inputInfo, isVec4); + var inShape = inputInfo.shape; + if (inShape.length <= outShape.length) { + res += getInputByOutputSnippet(inputInfo, outShape, isVec4, isFlatDispatchLayout); + } + return res; + } + function getInputAtCoordsSnippet(inputInfo, isVec4) { + var texName = inputInfo.name; + var rank = inputInfo.shape.length; + var type = getCoordsDataType(rank); + var funcName = 'get' + texName.charAt(0).toUpperCase() + texName.slice(1); + var dims = ['d0', 'd1', 'd2', 'd3', 'd4', 'd5'].slice(0, rank); + var inputs = dims.map(function (d) { return d + " : i32"; }).join(', '); + if (rank < 1) { + if (isVec4) { + return "\n fn " + funcName + "() -> vec4 {\n return vec4(" + texName + "[0]);\n }\n "; + } + return "\n fn " + funcName + "() ->f32 {\n return f32(" + texName + "[0]);\n }\n "; + } + var shapeStr = "uniforms." + (texName.charAt(0).toLowerCase() + texName.slice(1)) + "Shape"; + var rankStr = rank + "D"; + if (rank === 0) { + rankStr = '1D'; + } + if (isVec4) { + return "\n fn " + funcName + "(" + inputs + ") -> vec4 {\n return vec4(" + texName + "[getIndexFromCoords" + rankStr + "(" + type + "(" + dims.join(',') + "),\n " + shapeStr + ") / 4]);\n }\n "; + } + return "\n fn " + funcName + "(" + inputs + ") -> f32 {\n return f32(" + texName + "[getIndexFromCoords" + rankStr + "(" + type + "(" + dims.join(',') + "),\n " + shapeStr + ")]);\n }\n "; + } + function getInputByOutputSnippet(inputInfo, outShape, isVec4, isFlatDispatchLayout) { + var texName = inputInfo.name; + var texFuncSnippet = texName.charAt(0).toUpperCase() + texName.slice(1); + var funcName = 'get' + texFuncSnippet + 'ByOutput'; + var inRank = inputInfo.shape.length; + var outRank = outShape.length; + var type = getCoordsDataType(outRank); + // If the inShape equals the outShape and the dispatch layout is flat, we can + // directly use |gl_GlobalInvocationID.x| as the index and don't need coords + // conversion between these two shapes. + if (tf.util.arraysEqual(inputInfo.shape, outShape) && isFlatDispatchLayout) { + if (isVec4) { + return "\n fn " + funcName + "Index(globalIndex : i32) -> vec4 {\n return vec4(" + texName + "[globalIndex]);\n }\n\n fn " + funcName + "Coords(coords : " + type + ") -> vec4 {\n return vec4(" + texName + "[" + (outRank > 1 ? 'getOutputIndexFromCoords(coords)' : 'coords') + " / 4]);\n }\n "; + } + else { + return "\n fn " + funcName + "Index(globalIndex : i32) -> f32 {\n return f32(" + texName + "[globalIndex]);\n }\n\n fn " + funcName + "Coords(coords : " + type + ") -> f32 {\n return f32(" + texName + "[" + (outRank > 1 ? 'getOutputIndexFromCoords(coords)' : 'coords') + "]);\n }\n "; + } + } + var broadcastDims = tf.backend_util.getBroadcastDims(inputInfo.shape, outShape); + var rankDiff = outRank - inRank; + var coordsSnippet = ''; + if (inRank === 0) { + if (isVec4) { + return "\n fn " + funcName + "Index(globalIndex : i32) -> vec4 {\n return get" + texFuncSnippet + "();\n }\n\n fn " + funcName + "Coords(coords : " + type + ") -> vec4 {\n return get" + texFuncSnippet + "();\n }\n "; + } + return "\n fn " + funcName + "Index(globalIndex : i32) -> f32{\n return get" + texFuncSnippet + "();\n }\n\n fn " + funcName + "Coords(coords : " + type + ") -> f32{\n return get" + texFuncSnippet + "();\n }\n "; + } + else { + if (outRank < 2 && broadcastDims.length >= 1) { + coordsSnippet = 'coords = 0;'; + } + else { + coordsSnippet = + broadcastDims.map(function (d) { return "coords." + getCoordsXYZ(d + rankDiff) + " = 0;"; }) + .join('\n'); + } + } + var unpackedCoordsSnippet = ''; + if (outRank < 2 && inRank > 0) { + unpackedCoordsSnippet = 'coords'; + } + else { + if (outRank > 1) { + var coordsType = getCoordsDataType(inRank); + var coordsValues = inputInfo.shape.map(function (s, i) { return "coords." + getCoordsXYZ(i + rankDiff); }) + .join(', '); + unpackedCoordsSnippet = coordsType + "(" + coordsValues + ")"; + } + else { + unpackedCoordsSnippet = 'coords'; + } + } + var shapeStr = "uniforms." + (texName.charAt(0).toLowerCase() + texName.slice(1)) + "Shape"; + var rankStr = inRank + "D"; + if (isVec4) { + return "\n fn " + funcName + "Index(globalIndex : i32) -> vec4 {\n var coords = getCoordsFromIndex(globalIndex);\n " + coordsSnippet + "\n return " + texName + "[getIndexFromCoords" + rankStr + "(" + unpackedCoordsSnippet + ", " + shapeStr + ") / 4];\n }\n\n fn " + funcName + "Coords(coordsIn : " + type + ") -> vec4 {\n var coords = coordsIn;\n " + coordsSnippet + "\n return " + texName + "[getIndexFromCoords" + rankStr + "(" + unpackedCoordsSnippet + ", " + shapeStr + ") / 4];\n }\n "; + } + return "\n fn " + funcName + "Index(globalIndex : i32) -> f32 {\n var coords = getCoordsFromIndex(globalIndex);\n " + coordsSnippet + "\n return f32(" + texName + "[getIndexFromCoords" + rankStr + "(" + unpackedCoordsSnippet + ", " + shapeStr + ")]);\n }\n\n fn " + funcName + "Coords(coordsIn : " + type + ") -> f32 {\n var coords = coordsIn;\n " + coordsSnippet + "\n return f32(" + texName + "[getIndexFromCoords" + rankStr + "(" + unpackedCoordsSnippet + ", " + shapeStr + ")]);\n }\n "; + } + /** + * Generates getOutputCoords() function that computes output coordinates from + * dispatch geometry to reduce arithmetic. + */ + function getOutputCoordsSnippet(outShape, dispatchLayout) { + var x = dispatchLayout.x, _a = dispatchLayout.y, y = _a === void 0 ? [] : _a, _b = dispatchLayout.z, z = _b === void 0 ? [] : _b; + var outRank = outShape.length; + if (x.length === outRank) { + var dtype_1 = getCoordsDataType(outRank); + var snippet_1 = "fn getOutputCoords() -> " + dtype_1 + "{\n let globalIndex = getGlobalIndex();\n return getCoordsFromIndex(globalIndex);\n }\n "; + return [snippet_1, outRank]; + } + var gatherDimensionsStr = ''; + var dims = [x, y, z]; + var rank = 0; + for (var i = 0; i < dims.length; i++) { + var arr = dims[i]; + if (arr.length === 0) { + continue; + } + rank += arr.length; + if (arr.length === 1) { + gatherDimensionsStr += "let d" + arr[0] + " = i32(globalId[" + i + "]);"; + } + else { + var strides = symbolicallyComputeStrides(arr, 'uniforms.outShape'); + gatherDimensionsStr += "var index" + i + " = i32(globalId[" + i + "]);"; + for (var j = 0; j < strides.length; j++) { + gatherDimensionsStr += "let d" + arr[j] + " = index" + i + " / " + strides[j] + ";"; + if (j === strides.length - 1) { + gatherDimensionsStr += "let d" + arr[j + 1] + " = " + + ("index" + i + " - d" + arr[j] + " * " + strides[j] + ";"); + } + else { + gatherDimensionsStr += + "index" + i + " = index" + i + " - d" + arr[j] + " * " + strides[j] + ";"; + } + } + } + } + var dimensions = []; + for (var i = 0; i < rank; i++) { + dimensions.push("d" + i); + } + var dtype = getCoordsDataType(rank); + var snippet = "fn getOutputCoords() -> " + dtype + " {\n " + gatherDimensionsStr + "\n "; + if (dimensions.length === 0) { + snippet += "return " + dtype + "(0); }"; + } + else { + snippet += "return " + dtype + "(" + dimensions.join(',') + "); }"; + } + return [snippet, rank]; + } + /** + * Derives logical coordinates from a flat index. Performs integer division + * with each stride and decrements the index until the index equals the final + * dimension coordinate. + */ + function getCoordsFromIndexSnippet(shape) { + var rank = shape.length; + if (rank <= 1) { + return "fn getCoordsFromIndex(index : i32) -> i32 { return index; }"; + } + var strides = tf.util.computeStrides(shape); + var dtype = getCoordsDataType(rank); + var coords = []; + for (var i = 0; i < rank; i++) { + coords.push("d" + i); + } + if (strides.length === 1) { + return " fn getCoordsFromIndex(index : i32) -> vec2 {\n let d0 = index / uniforms.outShapeStrides; let d1 = index - d0 * uniforms.outShapeStrides;\n return vec2(d0, d1);\n }"; + } + var snippet; + snippet = 'var index2 = index;' + + strides + .map(function (_, i) { + var line1 = "let " + coords[i] + " = index2 / uniforms.outShapeStrides." + getCoordsXYZ(i); + var line2 = i === strides.length - 1 ? + "let " + coords[i + 1] + " = index2 - " + coords[i] + " * uniforms.outShapeStrides." + getCoordsXYZ(i) : + "index2 = index2 - " + coords[i] + " * uniforms.outShapeStrides." + getCoordsXYZ(i); + return line1 + "; " + line2 + ";"; + }) + .join(''); + return "\n fn getCoordsFromIndex(index : i32) -> " + dtype + " {\n " + snippet + "\n return " + dtype + "(" + coords.join(',') + ");\n }\n "; + } + + var arrayProduct = function (arr) { + var product = 1; + for (var i = 0; i < arr.length; i++) { + product *= arr[i]; + } + return product; + }; + function tilesFitEvenlyIntoShape(tileSize, shape) { + if (tileSize.length !== shape.length) { + throw new Error("Cannot compute whether rank " + tileSize.length + + (" tiles fit evenly into rank " + shape.length + " shape") + + " - ranks must match."); + } + return shape.every(function (dim, dimIdx) { return dim % tileSize[dimIdx] === 0; }); + } + // Computes dispatch geometry based on layout of output dimensions and + // workGroupSize. + function computeDispatch(layout, outputShape, workGroupSize, elementsPerThread) { + if (workGroupSize === void 0) { workGroupSize = [1, 1, 1]; } + if (elementsPerThread === void 0) { elementsPerThread = [1, 1, 1]; } + var _a = __read([ + Math.ceil(arrayProduct(layout.x.map(function (d) { return outputShape[d]; })) / + (workGroupSize[0] * elementsPerThread[0])), + layout.y ? Math.ceil(arrayProduct(layout.y.map(function (d) { return outputShape[d]; })) / + (workGroupSize[1] * elementsPerThread[1])) : + 1, + layout.z ? Math.ceil(arrayProduct(layout.z.map(function (d) { return outputShape[d]; })) / + (workGroupSize[2] * elementsPerThread[2])) : + 1 + ], 3), dispatchX = _a[0], dispatchY = _a[1], dispatchZ = _a[2]; + return [dispatchX, dispatchY, dispatchZ]; + } + function computeWorkGroupSizeForConv2d(layout, outputShape) { + var dim0 = arrayProduct(layout.x.map(function (d) { return outputShape[d]; })); + var dim1 = arrayProduct(layout.y.map(function (d) { return outputShape[d]; })); + // TODO(jiajia.qin@intel.com): More fine tune based on outputShape. + // These are experimental values. Usually, we need to adjust the work group + // size based on the output shape. For example, when one dimension is smaller + // than 4, it will be wasteful if we assign a larger size for this dimension, + // which results lots of threads doing useless work and reduces parallelism + // of hardware threads. But it is always a balance between work group size + // and shared memory. If one dimension is too small, such as 1, shared memory + // will won't be fully utilized. + if (dim0 <= 4) { + return [4, 16, 1]; + } + if (dim1 <= 4) { + return [16, 4, 1]; + } + return [16, 16, 1]; + } + function computeWorkGroupSizeForMatMul(dimAOuter, dimInner, dimBOuter) { + // These are experimental values. Usually, we need to adjust the work group + // size based on the input shapes to improve the EU occupancy. + // TODO: WebGPU limits the maximum allowed shared memory size as 16K. To make + // sure it doesn't exceed this limitations. Temporarily reduce the work group + // size to [8, 8, 1] and the work per thread size is [4, 4, 1]. But we should + // revisit it and find the balance between work group size and work per thread + // size. + if (dimAOuter === 1) { + return [32, 1, 1]; + } + else if (dimBOuter === 1) { + return [1, 32, 1]; + } + return [8, 8, 1]; + } + function computeWorkPerThreadForConv2d(layout, outputShape) { + var dim0 = arrayProduct(layout.x.map(function (d) { return outputShape[d]; })); + var dim1 = arrayProduct(layout.y.map(function (d) { return outputShape[d]; })); + // TODO(jiajia.qin@intel.com): More fine tune based on outputShape. + // The following conditions correspond to the values set in + // computeWorkGroupSizeForConv2d. + if (dim0 <= 4) { + return [1, 2, 1]; + } + if (dim1 <= 4) { + return [2, 1, 1]; + } + return [2, 2, 1]; + } + function flatDispatchLayout(shape) { + return { x: shape.map(function (d, i) { return i; }) }; + } + function GPUBytesPerElement(dtype) { + if (dtype === 'float32' || dtype === 'int32' || dtype === 'bool' || + dtype === 'string') { + return 4; + } + else if (dtype === 'complex64') { + return 8; + } + else { + throw new Error("Unknown dtype " + dtype); + } + } + function ArrayBufferToTypedArray(data, dtype) { + if (dtype === 'float32') { + return new Float32Array(data); + } + else if (dtype === 'int32') { + return new Int32Array(data); + } + else if (dtype === 'bool' || dtype === 'string') { + return Uint8Array.from(new Int32Array(data)); + } + else { + throw new Error("Unknown dtype " + dtype); + } + } + function isWebGPUSupported() { + return ((typeof window !== 'undefined') || + //@ts-ignore + (typeof WorkerGlobalScope !== 'undefined')) && !!navigator.gpu; + } + + var webgpu_util = { + __proto__: null, + tilesFitEvenlyIntoShape: tilesFitEvenlyIntoShape, + computeDispatch: computeDispatch, + computeWorkGroupSizeForConv2d: computeWorkGroupSizeForConv2d, + computeWorkGroupSizeForMatMul: computeWorkGroupSizeForMatMul, + computeWorkPerThreadForConv2d: computeWorkPerThreadForConv2d, + flatDispatchLayout: flatDispatchLayout, + GPUBytesPerElement: GPUBytesPerElement, + ArrayBufferToTypedArray: ArrayBufferToTypedArray, + isWebGPUSupported: isWebGPUSupported + }; + + function makeMatMulPackedVec4Source(workPerThread, tileAOuter, tileBOuter, tileInner) { + tf.util.assert(tileInner % 4 === 0 && workPerThread[0] === 4, function () { return 'tileInner must be divisible by 4. And ColPerThread must be 4'; }); + return "\n var mm_Asub : array, " + tileInner / workPerThread[0] + ">, " + tileAOuter + ">;\n var mm_Bsub : array, " + tileBOuter / workPerThread[0] + ">, " + tileInner + ">;\n\n let RowPerThread = " + workPerThread[1] + ";\n let ColPerThread = " + workPerThread[0] + ";\n let TileInner = " + tileInner + ";\n\n " + getMainHeaderString() + "\n\n let tileRow = " + (tileAOuter === 1 ? '0' : 'i32(localId.y) * RowPerThread') + ";\n let tileCol = i32(localId.x);\n\n let globalRow = " + (tileAOuter === 1 ? '0' : 'i32(globalId.y) * RowPerThread') + ";\n let globalCol = i32(globalId.x);\n let numTiles = (uniforms.dimInner - 1) / TileInner + 1;\n\n var acc: array, RowPerThread>;\n var ACached : vec4;\n var BCached : array, 4>;\n\n // Loop over shared dimension.\n var globalColA = tileCol;\n let RowPerThreadB = TileInner / i32(workGroupSizeY);\n let tileRowB = i32(localId.y) * RowPerThreadB;\n for (var t = 0; t < numTiles; t = t + 1) {\n // Load one tile of A into local memory.\n for (var innerRow = 0; innerRow < RowPerThread; innerRow = innerRow + 1) {\n let inputRow = tileRow + innerRow;\n let inputCol = tileCol;\n mm_Asub[inputRow][inputCol] = mm_readA(globalRow + innerRow, globalColA, globalId);\n }\n globalColA = globalColA + TileInner / ColPerThread;\n\n // Load one tile of B into local memory.\n for (var innerRow = 0; innerRow < RowPerThreadB; innerRow = innerRow + 1) {\n let inputRow = tileRowB + innerRow;\n let inputCol = tileCol;\n mm_Bsub[inputRow][inputCol] = mm_readB(t * TileInner + inputRow, globalCol, globalId);\n }\n\n workgroupBarrier();\n\n // Compute acc values for a single thread.\n for (var k = 0; k < TileInner / ColPerThread; k = k + 1) {\n BCached[0] = mm_Bsub[k * ColPerThread][tileCol];\n BCached[1] = mm_Bsub[k * ColPerThread + 1][tileCol];\n BCached[2] = mm_Bsub[k * ColPerThread + 2][tileCol];\n BCached[3] = mm_Bsub[k * ColPerThread + 3][tileCol];\n\n for (var i = 0; i < RowPerThread; i = i + 1) {\n ACached = mm_Asub[tileRow + i][k];\n acc[i] = BCached[0] * ACached.x + acc[i];\n acc[i] = BCached[1] * ACached.y + acc[i];\n acc[i] = BCached[2] * ACached.z + acc[i];\n acc[i] = BCached[3] * ACached.w + acc[i];\n }\n }\n\n workgroupBarrier();\n }\n\n for (var innerRow = 0; innerRow < RowPerThread; innerRow = innerRow + 1) {\n mm_write(globalRow + innerRow,\n globalCol,\n acc[innerRow], globalId);\n }\n }"; + } + var MatMulPackedVec4Program = /** @class */ (function () { + function MatMulPackedVec4Program(aShape, outputShape, rowPerThread, batchAEqualOne, batchBEqualOne, bias, activation, preluActivationWeights) { + var _a; + if (bias === void 0) { bias = null; } + if (activation === void 0) { activation = null; } + if (preluActivationWeights === void 0) { preluActivationWeights = null; } + this.variableNames = ['A', 'B']; + this.uniforms = "dimAOuter : i32, dimBOuter : i32, dimInner : i32,"; + this.workGroupSize = [8, 8, 1]; + this.isVec4 = true; + this.outputShape = outputShape; + this.dispatchLayout = { x: [2], y: [1], z: [0] }; + // The first element in elementsPerThread must be 4. + if (outputShape[1] === 1) { + this.elementsPerThread = [4, 1, 1]; + } + else { + this.elementsPerThread = [4, 4, 1]; + } + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize, this.elementsPerThread); + var addBias = bias != null; + var hasPreluActivationWeights = preluActivationWeights != null; + if (addBias) { + this.variableNames.push('bias'); + } + if (hasPreluActivationWeights) { + this.variableNames.push('preluActivationWeights'); + } + this.tileAOuter = outputShape[1] === 1 ? + 1 : + this.workGroupSize[1] * this.elementsPerThread[1]; + this.tileBOuter = this.workGroupSize[0] * this.elementsPerThread[0]; + this.tileInner = this.tileBOuter; + this.aShape = aShape; + this.addBias = addBias; + this.activation = activation; + this.hasPreluActivationWeights = hasPreluActivationWeights; + this.batchAEqualOne = batchAEqualOne; + this.batchBEqualOne = batchBEqualOne; + _a = __read(this.getShapeFit(), 2), this.fitA = _a[0], this.fitB = _a[1]; + this.shaderKey = "matMulPackedVec4_" + this.activation + "_" + this.fitA + "_" + this.fitB + "_" + this.elementsPerThread + "_" + this.batchAEqualOne + "_" + this.batchBEqualOne; + } + MatMulPackedVec4Program.prototype.getShapeFit = function () { + var dimInner = this.aShape[2]; + var dimBOuter = this.outputShape[2]; + var bShape = [this.outputShape[0], dimInner, dimBOuter]; + var tileSizeA = [this.tileAOuter, this.tileInner]; + var tileSizeB = [this.tileInner, this.tileBOuter]; + return [ + tilesFitEvenlyIntoShape(tileSizeA, this.aShape.slice(1)), + tilesFitEvenlyIntoShape(tileSizeB, bShape.slice(1)) + ]; + }; + MatMulPackedVec4Program.prototype.getUserCode = function () { + var sampleA = this.fitA ? + "return A[batch * batchASize + row * uniforms.dimInner / 4 + col]" : + "if (coordsInBounds2D(vec2(row, col * 4), vec2(uniforms.dimAOuter, uniforms.dimInner))) {\n return A[batch * batchASize + row * uniforms.dimInner / 4 + col];\n }\n return vec4(0.0)"; + var sampleB = this.fitB ? + "return B[batch * batchBSize + row * uniforms.dimBOuter / 4 + col]" : + "if(coordsInBounds2D(vec2(row, col * 4), vec2(uniforms.dimInner, uniforms.dimBOuter))) {\n return B[batch * batchBSize + row * uniforms.dimBOuter / 4 + col];\n }\n return vec4(0.0)"; + var activationSnippet = '', applyActivationSnippet = ''; + if (this.activation) { + var activationOp = mapActivationToShaderProgram(this.activation, this.isVec4); + if (this.hasPreluActivationWeights) { + activationSnippet = + "fn activation(a : vec4, outCoord : vec3) -> vec4 {\n let b = getPreluActivationWeightsByOutputCoords(outCoord);\n " + activationOp + "\n }"; + } + else { + activationSnippet = "\n fn activation(a : vec4, outCoord : vec3) -> vec4 {\n " + activationOp + "\n }"; + } + applyActivationSnippet = 'value = activation(value, outCoord);'; + } + var addBiasSnippet = this.addBias ? 'value = value + getBiasByOutputCoords(outCoord);' : ''; + var userCode = "\n " + activationSnippet + "\n fn mm_readA(row : i32, col : i32, globalId : vec3) -> vec4 {\n " + (this.batchAEqualOne ? "\n let batchASize = 0;\n let batch = 0;\n " : + "\n let batchASize = uniforms.aShape[1] * uniforms.aShape[2] / 4;\n let batch = i32(globalId.z);\n ") + "\n\n " + sampleA + ";\n }\n\n fn mm_readB(row : i32, col : i32, globalId : vec3) -> vec4 {\n " + (this.batchBEqualOne ? "\n let batchBSize = 0;\n let batch = 0;\n " : + "\n let batchBSize = uniforms.bShape[1] * uniforms.bShape[2] / 4;\n let batch = i32(globalId.z);\n ") + "\n " + sampleB + ";\n }\n\n fn mm_write(row : i32, col : i32, valueIn : vec4, globalId : vec3) {\n if (row < uniforms.aShape[1] && col * 4 < uniforms.bShape[2])\n {\n var value = valueIn;\n let batch = i32(globalId.z);\n let outCoord = vec3(batch, row, col * 4);\n " + addBiasSnippet + "\n " + applyActivationSnippet + "\n setOutputAtCoords(outCoord[0], outCoord[1], outCoord[2], value);\n }\n }\n " + makeMatMulPackedVec4Source(this.elementsPerThread, this.tileAOuter, this.tileBOuter, this.tileInner) + "\n "; + return userCode; + }; + return MatMulPackedVec4Program; + }()); + + function makeMatMulPackedSource(workPerThread, workGroupSize) { + var tileAOuter = workGroupSize[1] * workPerThread[1]; + var tileBOuter = workGroupSize[0] * workPerThread[0]; + var tileInner = tileAOuter > tileBOuter ? tileAOuter : tileBOuter; + return "\n var mm_Asub : array, " + tileAOuter + ">;\n var mm_Bsub : array, " + tileInner + ">;\n " + getMainHeaderString() + "\n let tileRow = i32(localId.y) * " + workPerThread[1] + ";\n let tileCol = i32(localId.x) * " + workPerThread[0] + ";\n\n let globalRow = i32(globalId.y) * " + workPerThread[1] + ";\n let globalCol = i32(globalId.x) * " + workPerThread[0] + ";\n\n let numTiles = (uniforms.dimInner - 1) / " + tileInner + " + 1;\n\n var acc : array, " + workPerThread[1] + ">;\n var ACached : f32;\n var BCached : array;\n\n // Without this initialization strange values show up in acc.\n for (var innerRow = 0; innerRow < " + workPerThread[1] + "; innerRow = innerRow + 1) {\n for (var innerCol = 0; innerCol < " + workPerThread[0] + "; innerCol = innerCol + 1) {\n acc[innerRow][innerCol] = 0.0;\n }\n }\n\n let ColPerThreadA = " + tileInner + " / " + workGroupSize[0] + ";\n let tileColA = i32(localId.x) * ColPerThreadA;\n let RowPerThreadB = " + tileInner + " / " + workGroupSize[1] + ";\n let tileRowB = i32(localId.y) * RowPerThreadB;\n\n // Loop over shared dimension.\n for (var t = 0; t < numTiles; t = t + 1) {\n // Load one tile of A into local memory.\n for (var innerRow = 0; innerRow < " + workPerThread[1] + "; innerRow = innerRow + 1) {\n for (var innerCol = 0; innerCol < ColPerThreadA; innerCol = innerCol + 1) {\n let inputRow = tileRow + innerRow;\n let inputCol = tileColA + innerCol;\n\n mm_Asub[inputRow][inputCol] = mm_readA(\n globalRow + innerRow,\n t * " + tileInner + " + inputCol, globalId);\n }\n }\n // Load one tile of B into local memory.\n for (var innerRow = 0; innerRow < RowPerThreadB; innerRow = innerRow + 1) {\n for (var innerCol = 0; innerCol < " + workPerThread[0] + "; innerCol = innerCol + 1) {\n let inputRow = tileRowB + innerRow;\n let inputCol = tileCol + innerCol;\n\n mm_Bsub[inputRow][inputCol] = mm_readB(\n t * " + tileInner + " + inputRow,\n globalCol + innerCol, globalId);\n }\n }\n\n workgroupBarrier();\n\n // Compute acc values for a single thread.\n for (var k = 0; k < " + tileInner + "; k = k + 1) {\n for (var inner = 0; inner < " + workPerThread[0] + "; inner = inner + 1) {\n BCached[inner] = mm_Bsub[k][tileCol + inner];\n }\n\n for (var innerRow = 0; innerRow < " + workPerThread[1] + "; innerRow = innerRow + 1) {\n ACached = mm_Asub[tileRow + innerRow][k];\n for (var innerCol = 0; innerCol < " + workPerThread[0] + "; innerCol = innerCol + 1) {\n acc[innerRow][innerCol] = acc[innerRow][innerCol] + ACached * BCached[innerCol];\n }\n }\n }\n\n workgroupBarrier();\n }\n\n for (var innerRow = 0; innerRow < " + workPerThread[1] + "; innerRow = innerRow + 1) {\n for (var innerCol = 0; innerCol < " + workPerThread[0] + "; innerCol = innerCol + 1) {\n\n if ((globalCol + innerCol) < uniforms.dimBOuter &&\n (globalRow + innerRow) < uniforms.dimAOuter) {\n mm_write(globalRow + innerRow,\n globalCol + innerCol,\n acc[innerRow][innerCol], globalId);\n }\n }\n }\n }\n "; + } + function makeMatMulVectorSource(workGroupSize) { + return "\n let TileSize = " + workGroupSize[0] * 4 + ";\n var mm_Asub : array, " + workGroupSize[0] + ">;\n\n " + getMainHeaderString() + "\n let tileCol = i32(localId.x);\n let globalCol = i32(globalId.x);\n let globalRow = i32(globalId.y);\n\n let numTiles = (uniforms.dimInner - 1) / TileSize + 1;\n\n // Without this initialization strange values show up in acc.\n var acc = 0.0;\n\n // Loop over shared dimension.\n for (var t = 0; t < numTiles; t = t + 1) {\n // Load one tile of A into local memory.\n let colA = t * TileSize + tileCol * 4;\n mm_Asub[tileCol] = vec4(mm_readA(globalRow, colA, globalId),\n mm_readA(globalRow, colA + 1, globalId),\n mm_readA(globalRow, colA + 2, globalId),\n mm_readA(globalRow, colA + 3, globalId));\n workgroupBarrier();\n\n // Compute acc values for a single thread.\n for (var k = 0; k < TileSize / 4; k = k + 1) {\n let rowB = t * TileSize + k * 4;\n let BCached = vec4(mm_readB(rowB, globalCol, globalId),\n mm_readB(rowB + 1, globalCol, globalId),\n mm_readB(rowB + 2, globalCol, globalId),\n mm_readB(rowB + 3, globalCol, globalId));\n\n let ACached = mm_Asub[k];\n acc = acc + dot(ACached, BCached);\n }\n\n workgroupBarrier();\n }\n\n if (globalRow < uniforms.dimAOuter && globalCol < uniforms.dimBOuter) {\n mm_write(globalRow, globalCol, acc, globalId);\n }\n }\n "; + } + var MatMulPackedProgram = /** @class */ (function () { + function MatMulPackedProgram(aShape, outputShape, workPerThread, batchAEqualOne, batchBEqualOne, transposeA, transposeB, bias, activation, preluActivationWeights) { + var _a; + if (transposeA === void 0) { transposeA = false; } + if (transposeB === void 0) { transposeB = false; } + if (bias === void 0) { bias = null; } + if (activation === void 0) { activation = null; } + if (preluActivationWeights === void 0) { preluActivationWeights = null; } + this.variableNames = ['A', 'B']; + this.uniforms = "dimAOuter : i32, dimBOuter : i32, dimInner : i32,"; + this.workGroupSize = [16, 16, 1]; + this.outputShape = outputShape; + this.dispatchLayout = { x: [2], y: [1], z: [0] }; + var dimInner = transposeA ? aShape[1] : aShape[2]; + this.workGroupSize = + computeWorkGroupSizeForMatMul(outputShape[1], dimInner, outputShape[2]); + if (outputShape[1] === 1 || outputShape[2] === 1) { + workPerThread = 1; + } + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize, [workPerThread, workPerThread, 1]); + // If dispaching number is one, it means only one work group is running. + // For modern GPUs, it supports multiple work groups running in parallel. + // So there may be some idle hardware threads. + // In this case, we prefer to reduce the work per thread and improve the + // thread utilization + if (tf.util.arraysEqual(this.dispatch, [1, 1, 1])) { + workPerThread = 1; + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize, [workPerThread, workPerThread, 1]); + } + var addBias = bias != null; + var hasPreluActivationWeights = preluActivationWeights != null; + if (addBias) { + this.variableNames.push('bias'); + } + if (hasPreluActivationWeights) { + this.variableNames.push('preluActivationWeights'); + } + this.workPerThread = workPerThread; + this.aShape = aShape; + this.transposeA = transposeA; + this.transposeB = transposeB; + this.addBias = addBias; + this.activation = activation; + this.hasPreluActivationWeights = hasPreluActivationWeights; + this.batchAEqualOne = batchAEqualOne; + this.batchBEqualOne = batchBEqualOne; + var dimBOuter = this.outputShape[2]; + var bShape = this.transposeB ? + [this.outputShape[0], dimBOuter, dimInner] : + [this.outputShape[0], dimInner, dimBOuter]; + _a = __read(this.getShapeFit(bShape), 2), this.fitA = _a[0], this.fitB = _a[1]; + this.shaderKey = "matMulPacked_" + this.workPerThread + "_" + transposeA + "_" + transposeB + "_" + this.activation + "_" + this.fitA + "_" + this.fitB + "_" + (this.outputShape[1] > 1) + "_" + this.batchAEqualOne + "_" + this.batchBEqualOne; + } + MatMulPackedProgram.prototype.getShapeFit = function (bShape) { + var tileAOuter = this.workGroupSize[1] * this.workPerThread; + var tileBOuter = this.workGroupSize[0] * this.workPerThread; + var tileInner = tileAOuter > tileBOuter ? tileAOuter : tileBOuter; + if (this.outputShape[1] === 1) { + tileInner *= 4; + } + tf.util.assert(tileInner % this.workGroupSize[0] === 0 && + tileInner % this.workGroupSize[1] === 0, function () { return "tileInner must be multiple of workgroupsize.x " + + "and workgroupsize.y"; }); + var tileSizeA = [tileAOuter, tileInner]; + var tileSizeB = [tileInner, tileBOuter]; + return [ + tilesFitEvenlyIntoShape(tileSizeA, this.aShape.slice(1)), + tilesFitEvenlyIntoShape(tileSizeB, bShape.slice(1)) + ]; + }; + MatMulPackedProgram.prototype.getUserCode = function () { + var sampleA; + if (this.transposeA === false) { + sampleA = this.fitA ? + "return A[batch * batchASize + row * uniforms.dimInner + col];" : + "if(coordsInBounds2D(vec2(row, col), vec2(uniforms.dimAOuter, uniforms.dimInner))) {\n return A[batch * batchASize + row * uniforms.dimInner + col];\n }\n return 0.0;"; + } + else { + sampleA = this.fitA ? + "return A[batch * batchASize + col * uniforms.dimAOuter + row];" : + "if(coordsInBounds2D(vec2(row, col), vec2(uniforms.dimAOuter, uniforms.dimInner))) {\n return A[batch* batchASize + col * uniforms.dimAOuter + row];\n }\n return 0.0;"; + } + var sampleB; + if (this.transposeB === false) { + sampleB = this.fitB ? + "return B[batch * batchBSize + row * uniforms.dimBOuter + col];" : + "if(coordsInBounds2D(vec2(row, col), vec2(uniforms.dimInner, uniforms.dimBOuter))) {\n return B[batch * batchBSize + row * uniforms.dimBOuter + col];\n }\n return 0.0;"; + } + else { + sampleB = this.fitB ? + "return B[batch * batchBSize + col * uniforms.dimInner + row];" : + "if(coordsInBounds2D(vec2(row, col), vec2(uniforms.dimInner, uniforms.dimBOuter))) {\n return B[batch * batchBSize + col * uniforms.dimInner + row];\n }\n return 0.0;"; + } + var activationSnippet = '', applyActivationSnippet = ''; + if (this.activation) { + var activationOp = mapActivationToShaderProgram(this.activation, false); + if (this.hasPreluActivationWeights) { + activationSnippet = + "fn activation(a : f32, outCoord : vec3) -> f32 {\n let b = getPreluActivationWeightsByOutputCoords(outCoord);\n " + activationOp + "\n }"; + } + else { + activationSnippet = "\n fn activation(a : f32, outCoord : vec3) -> f32 {\n " + activationOp + "\n }\n "; + } + applyActivationSnippet = 'value = activation(value, outCoord);'; + } + var addBiasSnippet = this.addBias ? 'value = value + getBiasByOutputCoords(outCoord);' : ''; + var userCode = "\n " + activationSnippet + "\n\n fn mm_readA(row : i32, col : i32, globalId : vec3) -> f32 {\n " + (this.batchAEqualOne ? "\n let batch = 0;\n let batchASize = 0;\n " : + "\n let batch = i32(globalId.z);\n let batchASize = uniforms.aShape[1] * uniforms.aShape[2];\n ") + "\n " + sampleA + "\n }\n\n fn mm_readB(row : i32, col : i32, globalId : vec3) -> f32 {\n " + (this.batchBEqualOne ? "\n let batch = 0;\n let batchBSize = 0;\n " : + "\n let batch = i32(globalId.z);\n let batchBSize = uniforms.bShape[1] * uniforms.bShape[2];\n ") + "\n " + sampleB + "\n }\n\n fn mm_write(row : i32, col : i32, valueIn : f32, globalId : vec3) {\n var value = valueIn;\n let batch = i32(globalId.z);\n let outCoord = vec3(batch, row, col);\n " + addBiasSnippet + "\n " + applyActivationSnippet + "\n setOutputAtCoords(batch, row, col, value);\n }\n " + (this.outputShape[1] > 1 ? + makeMatMulPackedSource([this.workPerThread, this.workPerThread, 1], this.workGroupSize) : + makeMatMulVectorSource(this.workGroupSize)) + "\n "; + return userCode; + }; + return MatMulPackedProgram; + }()); + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function makeMatMulReduceSource() { + return "\n var sumValues : array;\n " + getMainHeaderString() + "\n let coords = getOutputCoords();\n let batch = coords[0];\n let row = coords[1];\n let col = coords[2];\n var sum = 0.0;\n let Length = uniforms.dimInner;\n for (var k = i32(localId.x); k < Length; k = k + i32(workGroupSizeX)) {\n let dataA = mm_readA(batch, row, k);\n let dataB = mm_readB(batch, k, col);\n sum = sum + dataA * dataB;\n }\n sumValues[localId.x] = sum;\n workgroupBarrier();\n\n for(var currentSize = workGroupSizeX / 2u; currentSize > 1u;\n currentSize = currentSize / 2u) {\n if (localId.x < currentSize)\n {\n sumValues[localId.x] = sumValues[localId.x] + sumValues[localId.x + currentSize];\n }\n workgroupBarrier();\n }\n\n if (localId.x == 0u) {\n sum = sumValues[0] + sumValues[1];\n mm_write(batch, row, col, sum);\n }\n }\n "; + } + var MatMulReduceProgram = /** @class */ (function () { + function MatMulReduceProgram(outputShape, batchAEqualOne, batchBEqualOne, transposeA, transposeB, bias, activation, preluActivationWeights) { + if (transposeA === void 0) { transposeA = false; } + if (transposeB === void 0) { transposeB = false; } + if (bias === void 0) { bias = null; } + if (activation === void 0) { activation = null; } + if (preluActivationWeights === void 0) { preluActivationWeights = null; } + this.variableNames = ['A', 'B']; + this.uniforms = "dimAOuter : i32, dimBOuter : i32, dimInner : i32,"; + this.workGroupSize = [256, 1, 1]; + this.outputShape = outputShape; + this.dispatchLayout = { x: [], y: [1, 2], z: [0] }; + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize); + var addBias = bias != null; + var hasPreluActivationWeights = preluActivationWeights != null; + if (addBias) { + this.variableNames.push('bias'); + } + if (hasPreluActivationWeights) { + this.variableNames.push('preluActivationWeights'); + } + this.transposeA = transposeA; + this.transposeB = transposeB; + this.addBias = addBias; + this.activation = activation; + this.hasPreluActivationWeights = hasPreluActivationWeights; + this.batchAEqualOne = batchAEqualOne; + this.batchBEqualOne = batchBEqualOne; + this.shaderKey = "matMulReduce_" + this.activation + "_" + transposeA + "_" + transposeB + "_" + this.batchAEqualOne + "_" + this.batchBEqualOne; + } + MatMulReduceProgram.prototype.getUserCode = function () { + var sampleA; + if (this.transposeA === false) { + sampleA = + "return f32(A[batch * batchASize + row * uniforms.dimInner + col]);"; + } + else { + sampleA = + "return f32(A[batch * batchASize + col * uniforms.dimAOuter + row]);"; + } + var sampleB; + if (this.transposeB === false) { + sampleB = + "return f32(B[batch * batchBSize + row * uniforms.dimBOuter + col]);"; + } + else { + sampleB = + "return f32(B[batch * batchBSize + col * uniforms.dimInner + row]);"; + } + var activationSnippet = '', applyActivationSnippet = ''; + if (this.activation) { + var activationOp = mapActivationToShaderProgram(this.activation, false); + if (this.hasPreluActivationWeights) { + activationSnippet = + "fn activation(a : f32, outCoord : vec3) -> f32 {\n let b = getPreluActivationWeightsByOutputCoords(outCoord);\n " + activationOp + "\n }"; + } + else { + activationSnippet = "\n fn activation(a : f32, outCoord : vec3) -> f32 {\n " + activationOp + "\n }\n "; + } + applyActivationSnippet = 'value = activation(value, outCoord);'; + } + var addBiasSnippet = this.addBias ? 'value = value + getBiasByOutputCoords(outCoord);' : ''; + var userCode = "\n " + activationSnippet + "\n\n fn mm_readA(batchIn: i32, row : i32, col : i32) -> f32 {\n " + (this.batchAEqualOne ? "\n let batchASize = 0;\n let batch = 0;\n " : + "\n let batchASize = uniforms.aShape[1] * uniforms.aShape[2];\n let batch = batchIn;\n ") + "\n " + sampleA + "\n }\n\n fn mm_readB(batchIn: i32, row : i32, col : i32) -> f32 {\n " + (this.batchBEqualOne ? "\n let batch = 0;\n let batchBSize = 0;\n " : + "\n let batch = batchIn;\n let batchBSize = uniforms.bShape[1] * uniforms.bShape[2];\n ") + "\n " + sampleB + "\n }\n\n fn mm_write(batch: i32, row : i32, col : i32, valueIn : f32) {\n var value = valueIn;\n let outCoord = vec3(batch, row, col);\n " + addBiasSnippet + "\n " + applyActivationSnippet + "\n setOutputAtCoords(batch, row, col, value);\n }\n " + makeMatMulReduceSource() + "\n "; + return userCode; + }; + return MatMulReduceProgram; + }()); + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function makeMatMulSmallOutputSizeSource(workGroupSize) { + var tileAOuter = workGroupSize[1] / 2; + var tileBOuter = workGroupSize[0]; + var tileInner = tileAOuter > tileBOuter ? tileAOuter : tileBOuter; + return "\n var mm_Asub1 : array, " + tileAOuter + ">;\n var mm_Bsub1 : array, " + tileInner + ">;\n var mm_Asub2 : array, " + tileAOuter + ">;\n var mm_Bsub2 : array, " + tileInner + ">;\n\n // If the output size is small for matrix multiplication, avoid to use vec4\n // and handle some elements per thread to optimally utilize the ALU.\n // Introduces two shared memory buffers, some logical threads could handle\n // arithmetic operations and others handle IO operations between barrier api,\n // makes ALUs and load/store units work simultaneously, could improves\n // the performance.\n " + getMainHeaderString() + "\n let tileRow = i32(localId.y);\n let tileCol = i32(localId.x);\n let globalRow = i32(globalId.y);\n let globalCol = i32(globalId.x);\n\n // uniforms.dimInner should be greater than 0.\n let numTiles = (uniforms.dimInner - 1) / " + tileInner + " + 1;\n var acc = 0.0;\n\n var globalColA = tileCol;\n var globalRowB = tileRow;\n for (var t = 0; t < numTiles; t = t + 1) {\n if (t == 0) {\n if (tileRow < " + tileAOuter + ") {\n // Load one tile of A and B into local memory.\n // globalRow is always greater than or equal tileRow.\n mm_Asub1[tileRow][tileCol] =\n mm_readA((globalRow - tileRow) / 2 + tileRow, globalColA, globalId);\n globalColA = globalColA + " + tileInner + ";\n mm_Bsub1[tileRow][tileCol] = mm_readB(globalRowB, globalCol, globalId);\n globalRowB = globalRowB + " + tileInner + ";\n }\n } else {\n if (tileRow < " + tileAOuter + ") {\n // Load one tile of A and B into local memory.\n // globalRow is always greater than or equal tileRow.\n mm_Asub1[tileRow][tileCol] =\n mm_readA((globalRow - tileRow) / 2 + tileRow, globalColA, globalId);\n globalColA = globalColA + " + tileInner + ";\n mm_Bsub1[tileRow][tileCol] = mm_readB(globalRowB, globalCol, globalId);\n globalRowB = globalRowB + " + tileInner + ";\n } else {\n // Compute acc values for a single thread.\n for (var k = 0; k < " + tileInner + "; k = k + 1) {\n let subRow = tileRow - " + tileAOuter + ";\n if (subRow < 0) {\n continue;\n }\n acc = acc + mm_Asub2[subRow][k] * mm_Bsub2[k][tileCol];\n }\n }\n }\n workgroupBarrier();\n if (t != 0) {\n t = t + 1;\n }\n\n if (t < numTiles) {\n if (tileRow < " + tileAOuter + ") {\n // Load one tile of A and B into local memory.\n // globalRow is always greater than or equal tileRow.\n mm_Asub2[tileRow][tileCol] =\n mm_readA((globalRow - tileRow) / 2 + tileRow, globalColA, globalId);\n globalColA = globalColA + " + tileInner + ";\n mm_Bsub2[tileRow][tileCol] = mm_readB(globalRowB, globalCol, globalId);\n globalRowB = globalRowB + " + tileInner + ";\n } else {\n // Compute acc values for a single thread.\n for (var k = 0; k < " + tileInner + "; k = k + 1) {\n let subRow = tileRow - " + tileAOuter + ";\n if (subRow < 0) {\n continue;\n }\n acc = acc + mm_Asub1[subRow][k] * mm_Bsub1[k][tileCol];\n }\n }\n }\n workgroupBarrier();\n }\n let writeCol = (globalRow - tileRow) / 2 + tileRow - " + tileAOuter + ";\n if (tileRow >= " + tileAOuter + " && writeCol >= 0) {\n mm_write(writeCol, globalCol, acc, globalId);\n }\n }\n "; + } + var MatMulSmallOutputSizeProgram = /** @class */ (function () { + function MatMulSmallOutputSizeProgram(aShape, bShape, outputShape, bias, activation, preluActivationWeights) { + if (bias === void 0) { bias = null; } + if (activation === void 0) { activation = null; } + if (preluActivationWeights === void 0) { preluActivationWeights = null; } + this.variableNames = ['A', 'B']; + this.uniforms = "dimAOuter : i32, dimBOuter : i32, dimInner : i32,"; + this.workGroupSize = [8, 16, 1]; + tf.util.assert(aShape[1] <= 16 || bShape[2] <= 16, function () { return 'This program can be only used when A width or B Height are small'; }); + this.outputShape = outputShape; + this.dispatchLayout = { x: [2], y: [1], z: [0] }; + this.dispatch = [ + Math.ceil(outputShape[2] / this.workGroupSize[0]), + Math.ceil(outputShape[1] * 2 / this.workGroupSize[1]), outputShape[0] + ]; + var addBias = bias != null; + if (addBias) { + this.variableNames.push('bias'); + } + var hasPreluActivationWeights = preluActivationWeights != null; + if (hasPreluActivationWeights) { + this.variableNames.push('preluActivationWeights'); + } + this.addBias = addBias; + this.activation = activation; + this.hasPreluActivationWeights = hasPreluActivationWeights; + this.batchAEqualOne = aShape[0] === 1; + this.batchBEqualOne = bShape[0] === 1; + this.shaderKey = "matMulSmallOutputSize_" + this.activation + "_" + this.batchAEqualOne + "_" + this.batchBEqualOne; + } + MatMulSmallOutputSizeProgram.prototype.getUserCode = function () { + var sampleA = "if (coordsInBounds2D(vec2(row, col), vec2(uniforms.dimAOuter, uniforms.dimInner))) {\n return A[batch * batchASize + row * uniforms.dimInner + col];\n }\n return 0.0;"; + var sampleB = "if (coordsInBounds2D(vec2(row, col), vec2(uniforms.dimInner, uniforms.dimBOuter))) {\n return B[batch * batchBSize + row * uniforms.dimBOuter + col];\n }\n return 0.0;"; + var activationSnippet = '', applyActivationSnippet = ''; + if (this.activation) { + var activationOp = mapActivationToShaderProgram(this.activation, false); + if (this.hasPreluActivationWeights) { + activationSnippet = + "fn activation(a : f32, outCoord : vec3) -> f32 {\n let b = getPreluActivationWeightsByOutputCoords(outCoord);\n " + activationOp + "\n }"; + } + else { + activationSnippet = + "fn activation(a : f32, outCoord : vec3) -> f32 {\n " + activationOp + "\n }"; + } + applyActivationSnippet = 'value = activation(value, outCoord);'; + } + var addBiasSnippet = this.addBias ? 'value = value + getBiasByOutputCoords(outCoord);' : ''; + var userCode = "\n " + activationSnippet + "\n\n fn mm_readA(row : i32, col : i32, globalId : vec3) -> f32 {\n " + (this.batchAEqualOne ? "\n let batch = 0;\n let batchASize = 0;\n " : + "\n let batchASize = uniforms.aShape[1] * uniforms.aShape[2];\n let batch = i32(globalId.z);\n ") + "\n " + sampleA + "\n }\n fn mm_readB(row : i32, col : i32, globalId : vec3) -> f32 {\n " + (this.batchBEqualOne ? "\n let batch = 0;\n let batchBSize = 0;\n " : + "\n let batch = i32(globalId.z);\n let batchBSize = uniforms.bShape[1] * uniforms.bShape[2];\n ") + "\n " + sampleB + "\n }\n fn mm_write(row : i32, col : i32, valueIn : f32, globalId : vec3) {\n if (coordsInBounds2D(vec2(row, col), vec2(uniforms.dimAOuter, uniforms.dimBOuter))) {\n let batch = i32(globalId.z);\n let outCoord = vec3(batch, row, col);\n var value = valueIn;\n " + addBiasSnippet + "\n " + applyActivationSnippet + "\n setOutputAtCoords(batch, row, col, value);\n }\n }\n " + makeMatMulSmallOutputSizeSource(this.workGroupSize) + "\n "; + return userCode; + }; + return MatMulSmallOutputSizeProgram; + }()); + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function reshape(args) { + var inputs = args.inputs, attrs = args.attrs; + var x = inputs.x; + var shape = attrs.shape; + var xSize = tf.util.sizeFromShape(x.shape); + var $shape = tf.util.inferFromImplicitShape(shape, xSize); + var $xSize = tf.util.sizeFromShape($shape); + tf.util.assert(xSize === $xSize, function () { return "The new shape (" + $shape + ") has " + $xSize + " elements and the old " + + ("shape (" + x.shape + ") has " + xSize + " elements. The new shape and old ") + + "shape must have the same number of elements."; }); + // Backend needs to track refCount for the dataId for reshape op + args.backend.incRef(x.dataId); + return { dataId: x.dataId, shape: $shape, dtype: x.dtype }; + } + var reshapeConfig = { + kernelName: tf.Reshape, + backendName: 'webgpu', + kernelFunc: reshape + }; + + function batchMatMulImpl(_a) { + var e_1, _b; + var a = _a.a, b = _a.b, transposeA = _a.transposeA, transposeB = _a.transposeB, backend = _a.backend, _c = _a.bias, bias = _c === void 0 ? null : _c, _d = _a.preluActivationWeights, preluActivationWeights = _d === void 0 ? null : _d, _e = _a.leakyreluAlpha, leakyreluAlpha = _e === void 0 ? 0 : _e, _f = _a.activation, activation = _f === void 0 ? null : _f; + var aRank = a.shape.length; + var bRank = b.shape.length; + var innerShapeA = transposeA ? a.shape[aRank - 2] : a.shape[aRank - 1]; + var innerShapeB = transposeB ? b.shape[bRank - 1] : b.shape[bRank - 2]; + var outerShapeA = transposeA ? a.shape[aRank - 1] : a.shape[aRank - 2]; + var outerShapeB = transposeB ? b.shape[bRank - 2] : b.shape[bRank - 1]; + var outerDimsA = a.shape.slice(0, -2); + var outerDimsB = b.shape.slice(0, -2); + var batchDimA = tf.util.sizeFromShape(outerDimsA); + var batchDimB = tf.util.sizeFromShape(outerDimsB); + var outShapeOuterDims = tf.broadcast_util.assertAndGetBroadcastShape(a.shape.slice(0, -2), b.shape.slice(0, -2)); + var outShape = outShapeOuterDims.concat([outerShapeA, outerShapeB]); + tf.util.assert(innerShapeA === innerShapeB, function () { return "Error in matMul: inner shapes (" + innerShapeA + ") and (" + + (innerShapeB + ") of Tensors with shapes " + a.shape + " and ") + + (b.shape + " and transposeA=" + transposeA) + + (" and transposeB=" + transposeB + " must match."); }); + var a3dShape = transposeA ? + [batchDimA, innerShapeA, outerShapeA] : + [batchDimA, outerShapeA, innerShapeA]; + var b3dShape = transposeB ? + [batchDimB, outerShapeB, innerShapeB] : + [batchDimB, innerShapeB, outerShapeB]; + // The rest of the implementation is designed to operate on rank-3 tensors + var a3d = reshape({ inputs: { x: a }, backend: backend, attrs: { shape: a3dShape } }); + var b3d = reshape({ inputs: { x: b }, backend: backend, attrs: { shape: b3dShape } }); + var intermediates = [a3d, b3d]; + var batchDim = Math.max(batchDimA, batchDimB); + var batchAEqualOne = batchDimA === 1; + var batchBEqualOne = batchDimB === 1; + var useVec4 = innerShapeA % 4 === 0 && outerShapeB % 4 === 0 && + !transposeA && !transposeB; + var program; + if (outerShapeA * outerShapeB <= 32) { + program = new MatMulReduceProgram([batchDim, outerShapeA, outerShapeB], batchAEqualOne, batchBEqualOne, transposeA, transposeB, bias, activation, preluActivationWeights); + } + else + // When the output size is absolutely small or relatively small, we may + // use MatMulSmallOutputSizeProgram to get better performance. Absolutely + // small size means that the output size is smaller than [16, 512]. + // Relatively small size means that one demension size of the output is + // smaller than 16, and the output size is also more than or equal two + // times smaller than each of the two input sizes. For example, if input + // sizes are [12, 2048] and [2048, 1024], the output size is [12, 1024], + // which is relatively small compared to input sizes. + if (!transposeA && !transposeB && + ((outerShapeA <= 16 && + (outerShapeB <= 512 || innerShapeB >= 2 * outerShapeB)) || + (outerShapeB <= 16 && + (outerShapeA <= 512 || innerShapeA >= 2 * outerShapeA)))) { + program = new MatMulSmallOutputSizeProgram(a3dShape, b3dShape, [batchDim, outerShapeA, outerShapeB], bias, activation, preluActivationWeights); + } + else if (useVec4) { + // TODO: Currently we need to make sure that innerShapeA and outerShapeB + // are divisible by 4 since we use vec4 to get data. In future, we can + // remove this limitation by insert 0 to pack data. + program = new MatMulPackedVec4Program(a3dShape, [batchDim, outerShapeA, outerShapeB], tf.env().get('WEBGPU_MATMUL_WORK_PER_THREAD'), batchAEqualOne, batchBEqualOne, bias, activation, preluActivationWeights); + } + else { + program = new MatMulPackedProgram(a3dShape, [batchDim, outerShapeA, outerShapeB], tf.env().get('WEBGPU_MATMUL_WORK_PER_THREAD'), batchAEqualOne, batchBEqualOne, transposeA, transposeB, bias, activation, preluActivationWeights); + } + var inputs = [a3d, b3d]; + if (bias) { + inputs.push(bias); + } + if (preluActivationWeights) { + inputs.push(preluActivationWeights); + } + var dimensions = [ + { type: 'int32', data: [outerShapeA] }, { type: 'int32', data: [outerShapeB] }, + { type: 'int32', data: [innerShapeA] } + ]; + if (activation === 'leakyrelu') { + dimensions.push({ type: 'float32', data: [leakyreluAlpha] }); + program.uniforms += ' alpha : f32,'; + } + var out = backend.runWebGPUProgram(program, inputs, a.dtype, dimensions); + var outReshaped = reshape({ inputs: { x: out }, backend: backend, attrs: { shape: outShape } }); + intermediates.push(out); + try { + for (var intermediates_1 = __values(intermediates), intermediates_1_1 = intermediates_1.next(); !intermediates_1_1.done; intermediates_1_1 = intermediates_1.next()) { + var i = intermediates_1_1.value; + backend.disposeData(i.dataId); + } + } + catch (e_1_1) { e_1 = { error: e_1_1 }; } + finally { + try { + if (intermediates_1_1 && !intermediates_1_1.done && (_b = intermediates_1.return)) _b.call(intermediates_1); + } + finally { if (e_1) throw e_1.error; } + } + return outReshaped; + } + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function _fusedMatMul(args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var a = inputs.a, b = inputs.b, bias = inputs.bias, preluActivationWeights = inputs.preluActivationWeights; + var transposeA = attrs.transposeA, transposeB = attrs.transposeB, activation = attrs.activation, leakyreluAlpha = attrs.leakyreluAlpha; + return batchMatMulImpl({ + a: a, + b: b, + transposeA: transposeA, + transposeB: transposeB, + backend: backend, + bias: bias, + preluActivationWeights: preluActivationWeights, + leakyreluAlpha: leakyreluAlpha, + activation: activation + }); + } + var _fusedMatMulConfig = { + kernelName: tf._FusedMatMul, + backendName: 'webgpu', + kernelFunc: _fusedMatMul, + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var BinaryOpComplexProgram = /** @class */ (function () { + function BinaryOpComplexProgram(op, aShape, bShape) { + this.variableNames = ['AReal', 'AImag', 'BReal', 'BImag']; + this.workGroupSize = [128, 1, 1]; + this.size = true; + this.outputShape = tf.backend_util.assertAndGetBroadcastShape(aShape, bShape); + this.dispatchLayout = flatDispatchLayout(this.outputShape); + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize); + this.shaderKey = "binaryOpComplex_" + op; + this.op = op; + } + BinaryOpComplexProgram.prototype.getUserCode = function () { + var opStr = getBinaryOpString(this.op, false); + var userCode = "\n fn binaryOpComplex(\n areal : f32, aimag : f32, breal : f32, bimag : f32) -> f32 {\n " + opStr + "\n }\n\n " + getMainHeaderAndGlobalIndexString() + "\n if(index < uniforms.size) {\n let areal = getARealByOutputIndex(index);\n let aimag = getAImagByOutputIndex(index);\n let breal = getBRealByOutputIndex(index);\n let bimag = getBImagByOutputIndex(index);\n setOutputAtIndex(index, binaryOpComplex(areal, aimag, breal, bimag));\n }\n }\n "; + return userCode; + }; + return BinaryOpComplexProgram; + }()); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var BinaryOpSharedProgram = /** @class */ (function () { + function BinaryOpSharedProgram(op, aShape, bShape, useSharedMemoryWithB) { + this.variableNames = ['A', 'B']; + this.size = true; + // This is an experimental value when using shared memory. + // Note that the maximum of workgroup X dimension is 256. + var workGroupSizeX = 256; + this.workGroupSize = [workGroupSizeX, 1, 1]; + this.outputShape = tf.backend_util.assertAndGetBroadcastShape(aShape, bShape); + this.dispatchLayout = flatDispatchLayout(this.outputShape); + this.lastDimensionSize = useSharedMemoryWithB ? bShape[0] : aShape[0]; + if (this.lastDimensionSize < 256) { + this.workPerThread = 1; + } + else if (this.lastDimensionSize < 512) { + this.workPerThread = 2; + } + else { + this.workPerThread = 4; + } + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize, [this.workPerThread, 1, 1]); + this.useSharedMemoryWithB = useSharedMemoryWithB; + this.op = op; + // this.lastDimensionSize is used as sharedBuf array size, so can not be + // used as uniform. + this.shaderKey = "binaryShared_" + op + "_" + this.lastDimensionSize + "_" + this.useSharedMemoryWithB; + } + BinaryOpSharedProgram.prototype.getUserCode = function () { + var sharedIndexSnippet = this.lastDimensionSize > 1 ? + "coords[" + (this.outputShape.length - 1) + "]" : + '0'; + var accessDataSnippet = this.useSharedMemoryWithB ? + "let a = getAByOutputCoords(coords);\n let b = sharedBuf[" + sharedIndexSnippet + "];" : + "let a = sharedBuf[" + sharedIndexSnippet + "];\n let b = getBByOutputCoords(coords);"; + var opStr = getBinaryOpString(this.op, false); + var userCode = "\n fn binaryOperation(a : f32, b : f32) -> f32 {\n " + opStr + "\n }\n var sharedBuf : array;\n " + getMainHeaderAndGlobalIndexString() + "\n\n // Fill in the shared memory buffer. Here we need a loop to make sure\n // that all data in A|B are uploaded when |sharedMemorySize| is larger\n // than work group size.\n for(var localIndex = i32(localId.x); localIndex < " + this.lastDimensionSize + "; localIndex = localIndex + " + this.workGroupSize[0] + ") {\n sharedBuf[localIndex] = f32(" + (this.useSharedMemoryWithB ? 'B' : 'A') + "[localIndex]);\n }\n workgroupBarrier();\n\n for(var i = 0; i < " + this.workPerThread + "; i = i + 1) {\n let flatIndex = index * " + this.workPerThread + " + i;\n if(flatIndex < uniforms.size) {\n let coords = getCoordsFromIndex(flatIndex);\n\n " + accessDataSnippet + "\n setOutputAtIndex(flatIndex, binaryOperation(a, b));\n }\n }\n }\n "; + return userCode; + }; + return BinaryOpSharedProgram; + }()); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var BinaryOpVec4Program = /** @class */ (function () { + function BinaryOpVec4Program(op, aShape, bShape) { + this.variableNames = ['A', 'B']; + this.workPerThread = 4; + this.isVec4 = true; + this.size = true; + // TODO(jiajia.qin@intel.com): Heuristically select a good work group size. + var workGroupSizeX = 128; + this.workGroupSize = [workGroupSizeX, 1, 1]; + this.outputShape = tf.backend_util.assertAndGetBroadcastShape(aShape, bShape); + this.dispatchLayout = flatDispatchLayout(this.outputShape); + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize, [this.workPerThread, 1, 1]); + this.op = op; + this.shaderKey = "binaryVec4_" + op; + } + BinaryOpVec4Program.prototype.getUserCode = function () { + var opStr = getBinaryOpString(this.op, this.isVec4); + var userCode = "\n fn binaryOperation(a : vec4, b : vec4) -> vec4 {\n " + opStr + "\n }\n " + getMainHeaderAndGlobalIndexString() + "\n if (index < uniforms.size) {\n let a = getAByOutputIndex(index);\n let b = getBByOutputIndex(index);\n setOutputAtIndex(index, binaryOperation(a, b));\n }\n }\n "; + return userCode; + }; + return BinaryOpVec4Program; + }()); + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var BinaryOpProgram = /** @class */ (function () { + function BinaryOpProgram(op, aShape, bShape) { + this.variableNames = ['A', 'B']; + this.size = true; + // TODO(jiajia.qin@intel.com): Heuristically select a good work group size. + var workGroupSizeX = 128; + this.workGroupSize = [workGroupSizeX, 1, 1]; + this.outputShape = tf.backend_util.assertAndGetBroadcastShape(aShape, bShape); + this.dispatchLayout = flatDispatchLayout(this.outputShape); + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize); + this.shaderKey = "binary_" + op; + this.op = op; + } + BinaryOpProgram.prototype.getUserCode = function () { + var opStr = getBinaryOpString(this.op, false); + var userCode = "\n fn binaryOperation(a : f32, b : f32) -> f32 {\n " + opStr + "\n }\n " + getMainHeaderAndGlobalIndexString() + "\n if (index < uniforms.size) {\n let a = getAByOutputIndex(index);\n let b = getBByOutputIndex(index);\n setOutputAtIndex(index, binaryOperation(a, b));\n }\n }\n "; + return userCode; + }; + return BinaryOpProgram; + }()); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function getBinaryProgram(op, aShape, bShape) { + var useVec4 = tf.util.arraysEqual(aShape, bShape) && tf.util.sizeFromShape(aShape) % 4 === 0; + if (useVec4) { + return new BinaryOpVec4Program(op, aShape, bShape); + } + var useSharedMemoryWithA = aShape.length === 1 && bShape.length > 1 && aShape[0] < 1024; + var useSharedMemoryWithB = bShape.length === 1 && aShape.length > 1 && bShape[0] < 1024; + if (useSharedMemoryWithA || useSharedMemoryWithB) { + return new BinaryOpSharedProgram(op, aShape, bShape, useSharedMemoryWithB); + } + else { + return new BinaryOpProgram(op, aShape, bShape); + } + } + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function identity(args) { + var inputs = args.inputs; + var x = inputs.x; + args.backend.incRef(x.dataId); + return { dataId: x.dataId, shape: x.shape, dtype: x.dtype }; + } + var identityConfig = { + kernelName: tf.Identity, + backendName: 'webgpu', + kernelFunc: identity + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Complex tensors share data with their real and imaginary components. Complex + * tensors' reference to the components is tracked by refCount on the individual + * component. The refCounts are increased by the identity call. + * + * When a complex tensor is disposed, it will reduce the refCount on the + * components by calling disposeData on each. + */ + function complex(args) { + var inputs = args.inputs, backend = args.backend; + var real = inputs.real, imag = inputs.imag; + var complexInfo = backend.makeTensorInfo(real.shape, 'complex64'); + var complex = backend.tensorMap.get(complexInfo.dataId); + var realTensorInfo = identity({ inputs: { x: real }, backend: backend }); + var imagTensorInfo = identity({ inputs: { x: imag }, backend: backend }); + complex.complexTensorInfos = { real: realTensorInfo, imag: imagTensorInfo }; + return complexInfo; + } + var complexConfig = { + kernelName: tf.Complex, + backendName: 'webgpu', + kernelFunc: complex + }; + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var UnaryOpProgram = /** @class */ (function () { + function UnaryOpProgram(outputShape, op) { + this.variableNames = ['A']; + this.size = true; + // TODO(jiajia.qin@intel.com): Heuristically select a good work group size. + var workGroupSizeX = 128; + this.workGroupSize = [workGroupSizeX, 1, 1]; + this.outputShape = outputShape; + this.dispatchLayout = flatDispatchLayout(this.outputShape); + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize); + this.op = op; + this.shaderKey = "unary_" + op; + } + UnaryOpProgram.prototype.getUserCode = function () { + return "\n fn unaryOperation(a : f32) -> f32 {\n " + getUnaryOpString(this.op, false) + "\n }\n " + getMainHeaderAndGlobalIndexString() + "\n if (index < uniforms.size) {\n let a = getAByOutputIndex(index);\n setOutputAtIndex(index, unaryOperation(a));\n }\n }\n "; + }; + return UnaryOpProgram; + }()); + + /** + * Template that creates a `KernelFunc` for unary ops. + * @param opSnippet Op snippet to create `UnaryOpProgram`. + * @param cpuKernelImpl Optional. Shared functionality from tfjs-backend-cpu, it + * will be involved when necessary. + * @param dtype Optional. If set, the result has this dtype. Otherwise, the + * result has the same dtype as the first input. This is mainly used in + * comparison kernels, such as Equal, Less, Greater, etc. + */ + function unaryKernelFunc(_a) { + var opType = _a.opType, cpuKernelImpl = _a.cpuKernelImpl, dtype = _a.dtype; + return function (_a) { + var inputs = _a.inputs, backend = _a.backend; + var x = inputs.x; + var webgpuBackend = backend; + var $dtype = dtype || x.dtype; + if (webgpuBackend.shouldExecuteOnCPU([x]) && cpuKernelImpl != null) { + var xData = webgpuBackend.tensorMap.get(x.dataId); + var outValues = cpuKernelImpl(xData.values, $dtype); + return webgpuBackend.makeTensorInfo(x.shape, $dtype, outValues); + } + var program = new UnaryOpProgram(x.shape, opType); + return webgpuBackend.runWebGPUProgram(program, [x], $dtype); + }; + } + /** + * Template that creates a `KernelFunc` for binary ops. + * @param opSnippet Op snippet to create `BinaryOpProgram`. + * @param cpuKernelImpl Optional. Shared functionality from tfjs-backend-cpu, it + * will be involved when necessary. + * @param dtype Optional. If set, the result has this dtype. Otherwise, the + * result has the same dtype as the first input. This is mainly used in + * comparison kernels, such as Equal, Less, Greater, etc. + */ + function binaryKernelFunc(_a) { + var opSnippet = _a.opSnippet, cpuKernelImpl = _a.cpuKernelImpl, _b = _a.supportsComplex, supportsComplex = _b === void 0 ? false : _b, dtype = _a.dtype; + return function (_a) { + var _b; + var inputs = _a.inputs, backend = _a.backend; + var a = inputs.a, b = inputs.b; + var webgpuBackend = backend; + if (supportsComplex && a.dtype === 'complex64') { + var aData = webgpuBackend.tensorMap.get(a.dataId); + var bData = webgpuBackend.tensorMap.get(b.dataId); + var real = void 0, imag = void 0; + if (opSnippet !== BinaryOpType.MUL) { + _b = __read([ + [aData.complexTensorInfos.real, bData.complexTensorInfos.real], + [aData.complexTensorInfos.imag, bData.complexTensorInfos.imag] + ].map(function (complexParts) { + var _a = __read(complexParts, 2), aPart = _a[0], bPart = _a[1]; + var aHandle = { + dataId: aPart.dataId, + dtype: aPart.dtype, + shape: a.shape + }; + var bHandle = { + dataId: bPart.dataId, + dtype: bPart.dtype, + shape: b.shape + }; + var program = getBinaryProgram(opSnippet, a.shape, b.shape); + return webgpuBackend.runWebGPUProgram(program, [aHandle, bHandle], tf.upcastType(aPart.dtype, bPart.dtype)); + }), 2), real = _b[0], imag = _b[1]; + } + else { + var realProgram = new BinaryOpComplexProgram(BinaryOpType.COMPLEX_MULTIPLY_REAL, a.shape, b.shape); + var imagProgram = new BinaryOpComplexProgram(BinaryOpType.COMPLEX_MULTIPLY_IMAG, a.shape, b.shape); + var inputs_1 = [ + { + dataId: aData.complexTensorInfos.real.dataId, + dtype: aData.complexTensorInfos.real.dtype, + shape: a.shape + }, + { + dataId: aData.complexTensorInfos.imag.dataId, + dtype: aData.complexTensorInfos.imag.dtype, + shape: a.shape + }, + { + dataId: bData.complexTensorInfos.real.dataId, + dtype: bData.complexTensorInfos.real.dtype, + shape: b.shape + }, + { + dataId: bData.complexTensorInfos.imag.dataId, + dtype: bData.complexTensorInfos.imag.dtype, + shape: b.shape + } + ]; + real = webgpuBackend.runWebGPUProgram(realProgram, inputs_1, 'float32'); + imag = webgpuBackend.runWebGPUProgram(imagProgram, inputs_1, 'float32'); + } + var complexOutput = complex({ inputs: { real: real, imag: imag }, backend: webgpuBackend }); + webgpuBackend.disposeData(real.dataId); + webgpuBackend.disposeData(imag.dataId); + // TODO: Implement CPU forwarding for complex inputs. + return complexOutput; + } + var $dtype = dtype || tf.upcastType(a.dtype, b.dtype); + if ((a.dtype === 'string' || b.dtype === 'string' || + webgpuBackend.shouldExecuteOnCPU([a, b])) && + cpuKernelImpl != null) { + var aData = webgpuBackend.tensorMap.get(a.dataId).values; + var bData = webgpuBackend.tensorMap.get(b.dataId).values; + var decodedAVals = a.dtype === 'string' ? + // tslint:disable-next-line: no-any + tf.backend_util.fromUint8ToStringArray(aData) : + aData; + var decodedBVals = a.dtype === 'string' ? + // tslint:disable-next-line: no-any + tf.backend_util.fromUint8ToStringArray(bData) : + bData; + var _c = __read(cpuKernelImpl(a.shape, b.shape, decodedAVals, decodedBVals, $dtype), 2), outValues = _c[0], outShape = _c[1]; + return webgpuBackend.makeTensorInfo(outShape, $dtype, outValues); + } + var program = getBinaryProgram(opSnippet, a.shape, b.shape); + return webgpuBackend.runWebGPUProgram(program, [a, b], $dtype); + }; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function simpleAbsImpl(vals) { + var resultValues = new Float32Array(vals.length); + for (var i = 0; i < vals.length; ++i) { + resultValues[i] = Math.abs(vals[i]); + } + return resultValues; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Template that creates implementation for binary ops. Supports broadcast. + */ + function createSimpleBinaryKernelImpl(op) { + return function (aShape, bShape, aVals, bVals, dtype) { + var newShape = tf.backend_util.assertAndGetBroadcastShape(aShape, bShape); + var resultRank = newShape.length; + var resultStrides = tf.util.computeStrides(newShape); + var resultSize = tf.util.sizeFromShape(newShape); + var result = tf.util.getTypedArrayFromDType(dtype, resultSize); + var aRank = aShape.length; + var bRank = bShape.length; + var aStrides = tf.util.computeStrides(aShape); + var bStrides = tf.util.computeStrides(bShape); + var aBroadcastDims = tf.backend_util.getBroadcastDims(aShape, newShape); + var bBroadcastDims = tf.backend_util.getBroadcastDims(bShape, newShape); + if (aBroadcastDims.length + bBroadcastDims.length === 0) { + for (var i = 0; i < result.length; ++i) { + result[i] = op(aVals[i % aVals.length], bVals[i % bVals.length]); + } + } + else { + var _loop_1 = function (i) { + var loc = tf.util.indexToLoc(i, resultRank, resultStrides); + var aLoc = loc.slice(-aRank); + aBroadcastDims.forEach(function (d) { return aLoc[d] = 0; }); + var aIndex = tf.util.locToIndex(aLoc, aRank, aStrides); + var bLoc = loc.slice(-bRank); + bBroadcastDims.forEach(function (d) { return bLoc[d] = 0; }); + var bIndex = tf.util.locToIndex(bLoc, bRank, bStrides); + result[i] = op(aVals[aIndex], bVals[bIndex]); + }; + for (var i = 0; i < result.length; ++i) { + _loop_1(i); + } + } + return [result, newShape]; + }; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var addImpl = createSimpleBinaryKernelImpl((function (a, b) { return a + b; })); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Template that creates implementation for unary op. + */ + function createSimpleUnaryImpl(op) { + return function (values, dtype, attrs) { + var newValues = tf.util.getTypedArrayFromDType(dtype, values.length); + for (var i = 0; i < values.length; ++i) { + newValues[i] = op(values[i], attrs); + } + return newValues; + }; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var ceilImpl = createSimpleUnaryImpl(function (xi) { return Math.ceil(xi); }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function concatImpl$1(inputs, outShape, dtype, simplyConcat) { + var outVals = tf.util.getArrayFromDType(dtype, tf.util.sizeFromShape(outShape)); + if (simplyConcat && dtype !== 'string') { + // Use built-in TypedArray.set() method for speed. + var offset_1 = 0; + inputs.forEach(function (input) { + var size = tf.util.sizeFromShape(input.shape); + outVals.set(input.vals, offset_1); + offset_1 += size; + }); + } + else { + var colOffset_1 = 0; + inputs.forEach(function (input) { + var decodedData = dtype === 'string' ? + tf.backend_util.fromUint8ToStringArray(input.vals) : + input.vals; + var tIdx = 0; + for (var row = 0; row < input.shape[0]; ++row) { + var resIdx = row * outShape[1] + colOffset_1; + for (var col = 0; col < input.shape[1]; ++col) { + outVals[resIdx + col] = decodedData[tIdx++]; + } + } + colOffset_1 += input.shape[1]; + }); + } + return outVals; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var equalImpl = createSimpleBinaryKernelImpl(function (a, b) { return (a === b) ? 1 : 0; }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var expImpl = createSimpleUnaryImpl(function (xi) { return Math.exp(xi); }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var expm1Impl = createSimpleUnaryImpl(function (xi) { return Math.expm1(xi); }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var floorImpl = createSimpleUnaryImpl(function (xi) { return Math.floor(xi); }); + + function gatherNdImpl(indicesData, paramsBuf, dtype, numSlices, sliceRank, sliceSize, strides, paramsShape, paramsSize) { + var outBuf = tf.buffer([numSlices, sliceSize], dtype); + for (var i = 0; i < numSlices; i++) { + var index = []; + var flattenIndex = 0; + for (var j = 0; j < sliceRank; j++) { + var dim = indicesData[i * sliceRank + j]; + flattenIndex += dim * strides[j]; + index.push(dim); + } + if (flattenIndex < 0 || flattenIndex >= paramsSize / sliceSize) { + throw new Error("Invalid indices: " + index + " does not index into " + paramsShape); + } + for (var k = 0; k < sliceSize; k++) { + outBuf.values[i * sliceSize + k] = paramsBuf.get.apply(paramsBuf, __spread(paramsBuf.indexToLoc(flattenIndex * sliceSize + k))); + } + } + return outBuf; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function gatherV2Impl(xBuf, indicesBuf, flattenOutputShape) { + var outBuf = tf.buffer(flattenOutputShape, xBuf.dtype); + for (var i = 0; i < outBuf.size; ++i) { + var newLoc = outBuf.indexToLoc(i); + var originalLoc = newLoc.slice(); + var batchIdx = originalLoc[0]; + var indicesIdx = originalLoc[2]; + var indicesIndex = indicesBuf.locToIndex([batchIdx, indicesIdx]); + originalLoc[2] = indicesBuf.values[indicesIndex]; + var originalIndex = xBuf.locToIndex(originalLoc); + if (0 <= originalIndex && originalIndex < xBuf.values.length) { + outBuf.values[i] = xBuf.values[originalIndex]; + } // Else, index is out of bounds, so leave the default zero val in outBuf. + } + return outBuf; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var greaterImpl = createSimpleBinaryKernelImpl(function (a, b) { return (a > b) ? 1 : 0; }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var greaterEqualImpl = createSimpleBinaryKernelImpl(function (a, b) { return (a >= b) ? 1 : 0; }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var lessImpl = createSimpleBinaryKernelImpl(function (a, b) { return (a < b) ? 1 : 0; }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var lessEqualImpl = createSimpleBinaryKernelImpl(function (a, b) { return (a <= b) ? 1 : 0; }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var logImpl = createSimpleUnaryImpl(function (xi) { return Math.log(xi); }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function maxImpl(aVals, reduceSize, outShape, dtype) { + var vals = tf.util.getTypedArrayFromDType(dtype, tf.util.sizeFromShape(outShape)); + for (var i = 0; i < vals.length; ++i) { + var offset = i * reduceSize; + var max = aVals[offset]; + for (var j = 0; j < reduceSize; ++j) { + var value = aVals[offset + j]; + if (Number.isNaN(value) || + value > max) { // comparison with NaN always return false + max = value; + } + } + vals[i] = max; + } + return vals; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var maximumImpl = createSimpleBinaryKernelImpl((function (aValue, bValue) { return Math.max(aValue, bValue); })); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var minimumImpl = createSimpleBinaryKernelImpl((function (aValue, bValue) { return Math.min(aValue, bValue); })); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var multiplyImpl = createSimpleBinaryKernelImpl((function (aValue, bValue) { return aValue * bValue; })); + + function negImpl(xVals, xShape, xDtype) { + var minusOne = tf.util.createScalarValue(-1, xDtype); + return multiplyImpl([], xShape, minusOne, xVals, xDtype); + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var notEqualImpl = createSimpleBinaryKernelImpl((function (a, b) { return (a !== b) ? 1 : 0; })); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function transposeImpl(xVals, xShape, dtype, perm, newShape) { + var xRank = xShape.length; + var xSize = tf.util.sizeFromShape(xShape); + var xStrides = tf.util.computeStrides(xShape); + var newStrides = tf.util.computeStrides(newShape); + var result = tf.util.getTypedArrayFromDType(dtype, tf.util.sizeFromShape(newShape)); + for (var i = 0; i < xSize; ++i) { + var loc = tf.util.indexToLoc(i, xRank, xStrides); + // Permute location. + var newLoc = new Array(loc.length); + for (var i_1 = 0; i_1 < newLoc.length; i_1++) { + newLoc[i_1] = loc[perm[i_1]]; + } + var newIndex = tf.util.locToIndex(newLoc, xRank, newStrides); + result[newIndex] = xVals[i]; + } + return result; + } + + function prodImpl(xShape, xDtype, xVals, reductionAxes) { + var _a = __read(tf.backend_util.computeOutAndReduceShapes(xShape, reductionAxes), 2), outShape = _a[0], reduceShape = _a[1]; + var outDtype = tf.upcastType(xDtype, 'int32'); + var outVals = tf.util.makeZerosTypedArray(tf.util.sizeFromShape(outShape), outDtype); + var reduceSize = tf.util.sizeFromShape(reduceShape); + for (var i = 0; i < outVals.length; ++i) { + var offset = i * reduceSize; + var prod_1 = 1; + for (var j = 0; j < reduceSize; ++j) { + prod_1 *= xVals[offset + j]; + } + outVals[i] = prod_1; + } + return { outVals: outVals, outShape: outShape, outDtype: outDtype }; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function rangeImpl(start, stop, step, dtype) { + var sameStartStop = start === stop; + var increasingRangeNegativeStep = start < stop && step < 0; + var decreasingRangePositiveStep = stop < start && step > 1; + if (sameStartStop || increasingRangeNegativeStep || + decreasingRangePositiveStep) { + return tf.util.makeZerosTypedArray(0, dtype); + } + var numElements = Math.abs(Math.ceil((stop - start) / step)); + var values = tf.util.makeZerosTypedArray(numElements, dtype); + if (stop < start && step === 1) { + // Auto adjust the step's sign if it hasn't been set + // (or was set to 1) + step = -1; + } + values[0] = start; + for (var i = 1; i < values.length; i++) { + values[i] = values[i - 1] + step; + } + return values; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var rsqrtImpl = createSimpleUnaryImpl(function (xi) { return 1 / Math.sqrt(xi); }); + + function sliceImpl(vals, begin, size, shape, dtype) { + var isContinous = tf.slice_util.isSliceContinous(shape, begin, size); + var length = tf.util.sizeFromShape(size); + var xStrides = tf.util.computeStrides(shape); + if (isContinous) { + var flatOffset = tf.slice_util.computeFlatOffset(begin, xStrides); + if (dtype === 'string') { + return vals.slice(flatOffset, flatOffset + length); + } + return vals.subarray(flatOffset, flatOffset + length); + } + var decodedData = dtype === 'string' ? + tf.backend_util.fromUint8ToStringArray(vals) : + vals; + var inBuf = tf.buffer(shape, dtype, decodedData); + var outBuf = tf.buffer(size, dtype); + for (var i = 0; i < outBuf.size; ++i) { + var outLoc = outBuf.indexToLoc(i); + var inLoc = outLoc.map(function (idx, j) { return idx + begin[j]; }); + outBuf.set.apply(outBuf, __spread([inBuf.get.apply(inBuf, __spread(inLoc))], outLoc)); + } + if (dtype === 'string') { + return tf.backend_util.fromStringArrayToUint8(outBuf.values); + } + return outBuf.values; + } + + function stridedSliceImpl(outShape, xBuf, strides, begin) { + var outBuf = tf.buffer(outShape, xBuf.dtype); + for (var i = 0; i < outBuf.size; i++) { + var loc = outBuf.indexToLoc(i); + var newLoc = new Array(loc.length); + for (var j = 0; j < newLoc.length; j++) { + newLoc[j] = loc[j] * strides[j] + begin[j]; + } + outBuf.set.apply(outBuf, __spread([xBuf.get.apply(xBuf, __spread(newLoc))], loc)); + } + return outBuf; + } + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * The StringNGramsOp class creates ngrams from ragged string data. + * The constructor contains all attributes related to the operation such as + * padding widths and strings, and the compute function can be used to + * compute the ngrams for different ragged tensor inputs. + */ + var StringNGramsOp = /** @class */ (function () { + function StringNGramsOp(separator, nGramWidths, leftPad, rightPad, padWidth, preserveShortSequences) { + this.separator = tf.util.encodeString(separator); + this.nGramWidths = nGramWidths; + this.leftPad = tf.util.encodeString(leftPad); + this.rightPad = tf.util.encodeString(rightPad); + this.padWidth = padWidth; + this.preserveShort = preserveShortSequences; + } + StringNGramsOp.prototype.getPadWidth = function (nGramWidth) { + // Ngrams can be padded with either a fixed pad width or a dynamic pad + // width depending on the 'padWidth' arg, but in no case should the padding + // ever be wider than 'nGramWidth' - 1. + return Math.min(this.padWidth < 0 ? nGramWidth - 1 : this.padWidth, nGramWidth - 1); + }; + StringNGramsOp.prototype.getNumNGrams = function (length, nGramWidth) { + var padWidth = this.getPadWidth(nGramWidth); + return Math.max(0, ((length + 2 * padWidth) - nGramWidth) + 1); + }; + StringNGramsOp.prototype.createNGrams = function (data, splitIndex, output, outputStartIndex, numNGrams, nGramWidth) { + var _loop_1 = function (nGramIndex) { + var padWidth = this_1.getPadWidth(nGramWidth); + var leftPadding = Math.max(0, padWidth - nGramIndex); + var rightPadding = Math.max(0, padWidth - (numNGrams - (nGramIndex + 1))); + var numTokens = nGramWidth - (leftPadding + rightPadding); + var dataStartIndex = splitIndex + (leftPadding > 0 ? 0 : nGramIndex - padWidth); + // Calculate the total expected size of the nGram so we can reserve the + // correct amount of space in the string. + var nGramSize = 0; + // Size of the left padding. + nGramSize += leftPadding * this_1.leftPad.length; + // Size of the tokens. + for (var n = 0; n < numTokens; ++n) { + nGramSize += data[dataStartIndex + n].length; + } + // Size of the right padding. + nGramSize += rightPadding * this_1.rightPad.length; + // Size of the separators. + var numSeparators = leftPadding + rightPadding + numTokens - 1; + nGramSize += numSeparators * this_1.separator.length; + // Build the nGram. + output[outputStartIndex + nGramIndex] = new Uint8Array(nGramSize); + var nGram = output[outputStartIndex + nGramIndex]; + var nextNGramIndex = 0; + var appendToNGram = function (str) { return str.forEach(function (value) { return nGram[nextNGramIndex++] = value; }); }; + for (var n = 0; n < leftPadding; ++n) { + appendToNGram(this_1.leftPad); + appendToNGram(this_1.separator); + } + // Only output first numTokens - 1 pairs of data and separator + for (var n = 0; n < numTokens - 1; ++n) { + appendToNGram(data[dataStartIndex + n]); + appendToNGram(this_1.separator); + } + // Handle case when there are no tokens or no right padding as these + // can result in consecutive separators. + if (numTokens > 0) { + // If we have tokens, then output last and then pair each separator + // with the right padding that follows, to ensure nGram ends either with + // the token or with the right pad. + appendToNGram(data[dataStartIndex + numTokens - 1]); + for (var n = 0; n < rightPadding; ++n) { + appendToNGram(this_1.separator); + appendToNGram(this_1.rightPad); + } + } + else { + // If we don't have tokens, then the last item inserted into the nGram + // has been the separator from the left padding loop above. Hence, + // output right pad and separator and make sure to finish with a + // padding, not a separator. + for (var n = 0; n < rightPadding - 1; ++n) { + appendToNGram(this_1.rightPad); + appendToNGram(this_1.separator); + } + appendToNGram(this_1.rightPad); + } + }; + var this_1 = this; + for (var nGramIndex = 0; nGramIndex < numNGrams; ++nGramIndex) { + _loop_1(nGramIndex); + } + }; + // Data and splits together form the definition of the ragged tensor, + // where data is 1 dimensional and contains the values of the tensor + // and splits denotes the indices at which each row starts. + StringNGramsOp.prototype.compute = function (data, splits) { + var _this = this; + // Validate that the splits are valid indices into data, only if there are + // splits specified. + var inputDataSize = data.length; + var splitsSize = splits.length; + if (splitsSize > 0) { + var prevSplit = splits[0]; + if (prevSplit !== 0) { + throw new Error("First split value must be 0, got " + prevSplit); + } + for (var i = 1; i < splitsSize; ++i) { + var validSplits = splits[i] >= prevSplit; + validSplits = validSplits && (splits[i] <= inputDataSize); + if (!validSplits) { + throw new Error("Invalid split value " + splits[i] + ", must be in [" + prevSplit + ", " + inputDataSize + "]"); + } + prevSplit = splits[i]; + } + if (prevSplit !== inputDataSize) { + throw new Error("Last split value must be data size. Expected " + inputDataSize + ", got " + prevSplit); + } + } + var numBatchItems = splitsSize - 1; + var nGramsSplits = tf.util.getArrayFromDType('int32', splitsSize); + // If there is no data or size, return an empty ragged tensor. + if (inputDataSize === 0 || splitsSize === 0) { + var empty = new Array(inputDataSize); + for (var i = 0; i <= numBatchItems; ++i) { + nGramsSplits[i] = 0; + } + return [empty, nGramsSplits]; + } + nGramsSplits[0] = 0; + var _loop_2 = function (i) { + var length = splits[i] - splits[i - 1]; + var numNGrams = 0; + this_2.nGramWidths.forEach(function (nGramWidth) { + numNGrams += _this.getNumNGrams(length, nGramWidth); + }); + if (this_2.preserveShort && length > 0 && numNGrams === 0) { + numNGrams = 1; + } + nGramsSplits[i] = nGramsSplits[i - 1] + numNGrams; + }; + var this_2 = this; + for (var i = 1; i <= numBatchItems; ++i) { + _loop_2(i); + } + var nGrams = new Array(nGramsSplits[numBatchItems]); + var _loop_3 = function (i) { + var splitIndex = splits[i]; + var outputStartIdx = nGramsSplits[i]; + this_3.nGramWidths.forEach(function (nGramWidth) { + var length = splits[i + 1] - splits[i]; + var numNGrams = _this.getNumNGrams(length, nGramWidth); + _this.createNGrams(data, splitIndex, nGrams, outputStartIdx, numNGrams, nGramWidth); + outputStartIdx += numNGrams; + }); + // If we're preserving short sequences, check to see if no sequence was + // generated by comparing the current output start idx to the original + // one (nGramSplitsdata). If no ngrams were generated, then they will + // be equal (since we increment outputStartIdx by numNGrams every + // time we create a set of ngrams.) + if (this_3.preserveShort && outputStartIdx === nGramsSplits[i]) { + var dataLength = splits[i + 1] - splits[i]; + // One legitimate reason to not have any ngrams when this.preserveShort + // is true is if the sequence itself is empty. In that case, move on. + if (dataLength === 0) { + return "continue"; + } + // We don't have to worry about dynamic padding sizes here: if padding + // was dynamic, every sequence would have had sufficient padding to + // generate at least one nGram. + var nGramWidth = dataLength + 2 * this_3.padWidth; + var numNGrams = 1; + this_3.createNGrams(data, splitIndex, nGrams, outputStartIdx, numNGrams, nGramWidth); + } + }; + var this_3 = this; + for (var i = 0; i < numBatchItems; ++i) { + _loop_3(i); + } + return [nGrams, nGramsSplits]; + }; + return StringNGramsOp; + }()); + function stringNGramsImpl(data, dataSplits, separator, nGramWidths, leftPad, rightPad, padWidth, preserveShortSequences) { + return new StringNGramsOp(separator, nGramWidths, leftPad, rightPad, padWidth, preserveShortSequences) + .compute(data, dataSplits); + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var subImpl = createSimpleBinaryKernelImpl((function (aValue, bValue) { return aValue - bValue; })); + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * An implementation of the tile kernel shared between webgl and cpu for string + * tensors only. + */ + function tileImpl(xBuf, reps) { + var newShape = new Array(xBuf.rank); + for (var i = 0; i < newShape.length; i++) { + newShape[i] = xBuf.shape[i] * reps[i]; + } + var result = tf.buffer(newShape, xBuf.dtype); + for (var i = 0; i < result.values.length; ++i) { + var newLoc = result.indexToLoc(i); + var originalLoc = new Array(xBuf.rank); + for (var j = 0; j < originalLoc.length; j++) { + originalLoc[j] = newLoc[j] % xBuf.shape[j]; + } + var originalIndex = xBuf.locToIndex(originalLoc); + result.values[i] = xBuf.values[originalIndex]; + } + return result; + } + + var comparePair = function (a, b) { + var valueDiff = b.value - a.value; + return valueDiff === 0 ? a.index - b.index : valueDiff; + }; + /** + * Partitions array where all elements smaller than the (k+1) smallest element + * are found to the left of it, and all larger to the right of it. + * Based on the Floyd-Rivest Algorithm, ref: + * https://en.wikipedia.org/wiki/Floyd%E2%80%93Rivest_algorithm + * @param array: Array to partition + * @param left: Left index for the interval + * @param right: Right index for the interval + * @param k: Desired index value, where array[k] is the (k+1)th smallest element + * when left = 0 + */ + function select$1(array, k, left, right) { + if (left === void 0) { left = 0; } + if (right === void 0) { right = array.length - 1; } + while (right > left) { + // Use select recursively to sample a smaller set of size s + // the arbitrary constants 600 and 0.5 are used in the original + // version to minimize execution time. + if (right - left > 600) { + var n = right - left + 1; + var i_1 = k - left + 1; + var z = Math.log(n); + var s = 0.5 * Math.exp(2 * z / 3); + var sd = 0.5 * Math.sqrt(z * s * (n - s) / n) * Math.sign(i_1 - n / 2); + var newLeft = Math.max(left, Math.floor(k - i_1 * s / n + sd)); + var newRight = Math.min(right, Math.floor(k + (n - i_1) * s / n + sd)); + select$1(array, k, newLeft, newRight); + } + // partition the elements between left and right around t + var t = array[k]; + var i = left; + var j = right; + tf.util.swap(array, left, k); + if (comparePair(array[right], t) > 0) { + tf.util.swap(array, left, right); + } + while (i < j) { + tf.util.swap(array, i, j); + i++; + j--; + while (comparePair(array[i], t) < 0) { + i = i + 1; + } + while (comparePair(array[j], t) > 0) { + j = j - 1; + } + } + if (comparePair(array[left], t) === 0) { + tf.util.swap(array, left, j); + } + else { + j = j + 1; + tf.util.swap(array, j, right); + } + // Adjust left and right towards the boundaries of the subset + // containing the (k - left + 1)th smallest element. + if (j <= k) { + left = j + 1; + } + if (k <= j) { + right = j - 1; + } + } + } + function topKImpl(x, xShape, xDtype, k, sorted) { + // Reshape into a 2d tensor [batch, lastDim] and compute topk along lastDim. + var lastDim = xShape[xShape.length - 1]; + var _a = __read([x.length / lastDim, lastDim], 2), batch = _a[0], size = _a[1]; + var allTopKVals = tf.util.getTypedArrayFromDType(xDtype, batch * k); + var allTopKIndices = tf.util.getTypedArrayFromDType('int32', batch * k); + var _loop_1 = function (b) { + var offset = b * size; + var vals = x.subarray(offset, offset + size); + var valAndInd = new Array(vals.length); + vals.forEach(function (value, index) { return valAndInd[index] = { value: value, index: index }; }); + if (k < valAndInd.length) { + select$1(valAndInd, k); + valAndInd = valAndInd.slice(0, k); + } + if (sorted) { + valAndInd.sort(comparePair); + } + var outOffset = b * k; + var topKVals = allTopKVals.subarray(outOffset, outOffset + k); + var topKIndices = allTopKIndices.subarray(outOffset, outOffset + k); + for (var i = 0; i < k; i++) { + topKVals[i] = valAndInd[i].value; + topKIndices[i] = valAndInd[i].index; + } + }; + for (var b = 0; b < batch; b++) { + _loop_1(b); + } + // Reshape back to the original input shape, except that the last + // dimension is k. + var outputShape = xShape.slice(); + outputShape[outputShape.length - 1] = k; + return [ + tf.buffer(outputShape, xDtype, allTopKVals), + tf.buffer(outputShape, 'int32', allTopKIndices) + ]; + } + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var addImplCPU = addImpl, ceilImplCPU = ceilImpl, concatImplCPU = concatImpl$1, equalImplCPU = equalImpl, expImplCPU = expImpl, expm1ImplCPU = expm1Impl, floorImplCPU = floorImpl, gatherNdImplCPU = gatherNdImpl, gatherV2ImplCPU = gatherV2Impl, greaterEqualImplCPU = greaterEqualImpl, greaterImplCPU = greaterImpl, lessEqualImplCPU = lessEqualImpl, lessImplCPU = lessImpl, logImplCPU = logImpl, maxImplCPU = maxImpl, maximumImplCPU = maximumImpl, minimumImplCPU = minimumImpl, multiplyImplCPU = multiplyImpl, negImplCPU = negImpl, notEqualImplCPU = notEqualImpl, prodImplCPU = prodImpl, rangeImplCPU = rangeImpl, rsqrtImplCPU = rsqrtImpl, simpleAbsImplCPU = simpleAbsImpl, sliceImplCPU = sliceImpl, stridedSliceImplCPU = stridedSliceImpl, stringNGramsImplCPU = stringNGramsImpl, subImplCPU = subImpl, tileImplCPU = tileImpl, topKImplCPU = topKImpl, transposeImplCPU = transposeImpl; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var abs = unaryKernelFunc({ opType: UnaryOpType.ABS, cpuKernelImpl: simpleAbsImplCPU }); + var absConfig = { + kernelName: tf.Abs, + backendName: 'webgpu', + kernelFunc: abs + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var addKernelFunc = binaryKernelFunc({ + opSnippet: BinaryOpType.ADD, + cpuKernelImpl: addImplCPU, + supportsComplex: true + }); + var addConfig = { + kernelName: tf.Add, + backendName: 'webgpu', + kernelFunc: addKernelFunc + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var AddNPackedProgram = /** @class */ (function () { + function AddNPackedProgram(shapes) { + this.workPerThread = 4; + this.workGroupSize = [64, 1, 1]; + this.size = true; + this.outputShape = shapes[0]; + this.variableNames = shapes.map(function (_, i) { return "T" + i; }); + this.dispatchLayout = flatDispatchLayout(this.outputShape); + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize, [this.workPerThread, 1, 1]); + this.shaderKey = 'addN'; + } + AddNPackedProgram.prototype.getUserCode = function () { + var snippets = []; + // Get target elements from every input tensor. + this.variableNames.forEach(function (variable) { + snippets.push("let v" + variable + " = get" + variable + "ByOutputCoords(coords);"); + }); + // Calculate the sum of all elements. + var operation = this.variableNames + .map(function (variable) { + return "v" + variable; + }) + .join(' + '); + var userCode = "\n " + getMainHeaderAndGlobalIndexString() + "\n for (var i = 0; i < " + this.workPerThread + "; i = i + 1) {\n let flatIndex = index * " + this.workPerThread + " + i;\n if (flatIndex < uniforms.size) {\n let coords = getCoordsFromIndex(flatIndex);\n " + snippets.join('\n ') + "\n setOutputAtIndex(flatIndex, " + operation + ");\n }\n }\n }\n "; + return userCode; + }; + return AddNPackedProgram; + }()); + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function addN(args) { + var inputs = args.inputs, backend = args.backend; + var tensors = inputs; + if (tensors.length === 1) { + return identity({ inputs: { x: tensors[0] }, backend: backend }); + } + var dtype = tensors.map(function (t) { return t.dtype; }).reduce(function (d1, d2) { return tf.upcastType(d1, d2); }); + var shapes = tensors.map(function (t) { return t.shape; }); + var program = new AddNPackedProgram(shapes); + return backend.runWebGPUProgram(program, tensors, dtype); + } + var addNConfig = { + kernelName: tf.AddN, + backendName: 'webgpu', + kernelFunc: addN + }; + + var ArgMinMaxProgram = /** @class */ (function () { + function ArgMinMaxProgram(inputShape, axis, reduceType) { + this.workGroupSize = [64, 1, 1]; + this.variableNames = ['x']; + this.uniforms = 'infinityValue : f32,'; + this.size = true; + var axes = [axis]; + tf.backend_util.assertAxesAreInnerMostDims('arg' + reduceType.charAt(0).toUpperCase() + reduceType.slice(1), axes, inputShape.length); + this.op = reduceType === 'min' ? '<' : '>'; + // |outShape| is the shape with the removed axis + var _a = __read(tf.backend_util.computeOutAndReduceShapes(inputShape, axes), 1), outputShape = _a[0]; + this.outputShape = outputShape.length === 0 ? [1] : outputShape; + this.dispatchLayout = flatDispatchLayout(this.outputShape); + // A work group only outputs a data, so we transfer [1, 1, 1] to compute + // dispatch size. + this.dispatch = + computeDispatch(this.dispatchLayout, this.outputShape, [1, 1, 1]); + this.inputShape = inputShape; + this.shaderKey = "argMinMax" + this.op; + } + ArgMinMaxProgram.prototype.getUserCode = function () { + var _this = this; + var sharedMemorySnippet = "\n var xBestIndices : array;\n var xBestValues : array;\n "; + var getInputShapeLastDim = function () { + if (_this.inputShape.length === 1) { + return 'uniforms.xShape'; + } + else { + return "uniforms.xShape." + getCoordsXYZ(_this.inputShape.length - 1); + } + }; + var splitOutputCoords = function () { + var snippet = ''; + if (_this.outputShape.length === 1) { + if (_this.inputShape.length !== 1) { + snippet += 'outputCoords,'; + } + } + else { + for (var i = 0; i < _this.outputShape.length; i++) { + snippet += "outputCoords." + getCoordsXYZ(i) + ","; + } + } + return snippet; + }; + var userCode = "\n fn DIV_CEIL(a : u32, b : u32) -> u32 {\n return ((a - 1u) / b + 1u);\n }\n\n " + sharedMemorySnippet + "\n\n " + getMainHeaderAndGlobalIndexString() + "\n let outputIndex = index / i32(workGroupSizeX);\n let reduceLength = " + getInputShapeLastDim() + ";\n\n var bestIndex = i32(localId.x);\n var bestValue = uniforms.infinityValue;\n let outputCoords = getCoordsFromIndex(outputIndex);\n for (var k = i32(localId.x); k < reduceLength && outputIndex < uniforms.size;\n k = k + i32(workGroupSizeX)) {\n let candidate = getX(" + splitOutputCoords() + " k);\n if (!isnan(candidate) && candidate " + this.op + " bestValue) {\n bestValue = candidate;\n bestIndex = k;\n }\n }\n xBestValues[localId.x] = bestValue;\n xBestIndices[localId.x] = bestIndex;\n workgroupBarrier();\n\n var reduceSize = min(u32(reduceLength), workGroupSizeX);\n for (var currentSize = reduceSize / 2u; reduceSize > 1u;\n currentSize = reduceSize / 2u) {\n let interval = DIV_CEIL(reduceSize, 2u);\n if (localId.x < currentSize) {\n let candidate = xBestValues[localId.x + interval];\n if (candidate " + this.op + " bestValue) {\n bestValue = candidate;\n xBestValues[localId.x] = bestValue;\n xBestIndices[localId.x] = xBestIndices[localId.x + interval];\n }\n }\n reduceSize = interval;\n workgroupBarrier();\n }\n\n if (localId.x == 0u && outputIndex < uniforms.size) {\n setOutputAtIndexI32(outputIndex, xBestIndices[localId.x]);\n }\n }\n "; + return userCode; + }; + return ArgMinMaxProgram; + }()); + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var TransposeSharedProgram = /** @class */ (function () { + function TransposeSharedProgram(aShape, newDim) { + this.variableNames = ['A']; + // Note that the maximum number of workgroup invocations by webgpu is 256. + this.workGroupSize = [16, 16, 1]; + var outputShape = new Array(aShape.length); + for (var i = 0; i < outputShape.length; i++) { + outputShape[i] = aShape[newDim[i]]; + } + this.outputShape = outputShape; + this.dispatchLayout = { x: [0], y: [1] }; + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize, [1, 1, 1]); + this.shaderKey = 'transposeShared'; + } + TransposeSharedProgram.prototype.getUserCode = function () { + var userCode = "\n let TILE_DIM = " + this.workGroupSize[0] + ";\n var tile : array, " + this.workGroupSize[0] + ">;\n " + getWorkGroupSizeString() + "\n fn main(@builtin(local_invocation_id) localId : vec3,\n @builtin(workgroup_id) workgroupId : vec3) {\n var x = i32(workgroupId.x) * TILE_DIM + i32(localId.x);\n var y = i32(workgroupId.y) * TILE_DIM + i32(localId.y);\n let width = uniforms.outShape[0];\n let height = uniforms.outShape[1];\n if (x < width && y < height) {\n tile[localId.y][localId.x] = A[y * width + x];\n }\n workgroupBarrier();\n\n x = i32(workgroupId.y) * TILE_DIM + i32(localId.x);\n y = i32(workgroupId.x) * TILE_DIM + i32(localId.y);\n if (x < height && y < width) {\n setOutputAtIndex((y * height + x), tile[localId.x]\n [localId.y]);\n }\n }\n "; + return userCode; + }; + return TransposeSharedProgram; + }()); + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var TransposeProgram = /** @class */ (function () { + function TransposeProgram(aShape, newDim) { + this.variableNames = ['A']; + this.workPerThread = 4; + this.workGroupSize = [64, 1, 1]; + this.size = true; + var outputShape = new Array(aShape.length); + for (var i = 0; i < outputShape.length; i++) { + outputShape[i] = aShape[newDim[i]]; + } + this.outputShape = outputShape; + this.dispatchLayout = flatDispatchLayout(this.outputShape); + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize, [this.workPerThread, 1, 1]); + this.newDim = newDim; + this.shaderKey = "transpose_" + newDim; + } + TransposeProgram.prototype.getUserCode = function () { + var dtype = getCoordsDataType(this.outputShape.length); + var switched = getSwitchedCoords(this.newDim); + var userCode = "\n " + getMainHeaderAndGlobalIndexString() + "\n\n for(var i = 0; i < " + this.workPerThread + "; i = i + 1) {\n let flatIndex = index * " + this.workPerThread + " + i;\n if(flatIndex < uniforms.size) {\n let resRC = getCoordsFromIndex(flatIndex);\n setOutputAtIndex(flatIndex, A[getIndexFromCoords" + this.outputShape.length + "D(\n " + dtype + "(" + switched + "), uniforms.aShape)]);\n }\n }\n }\n "; + return userCode; + }; + return TransposeProgram; + }()); + function getSwitchedCoords(newDim) { + var rank = newDim.length; + if (rank > 6) { + throw Error("Transpose for rank " + rank + " is not yet supported"); + } + var switchedCoords = new Array(rank); + for (var i = 0; i < newDim.length; i++) { + switchedCoords[newDim[i]] = "resRC." + getCoordsXYZ(i); + } + return switchedCoords.join(); + } + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function transpose(args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var x = inputs.x; + var perm = attrs.perm; + var webgpuBackend = backend; + var xRank = x.shape.length; + var newShape = new Array(xRank); + for (var i = 0; i < newShape.length; i++) { + newShape[i] = x.shape[perm[i]]; + } + if (backend.shouldExecuteOnCPU([x])) { + var xData = webgpuBackend.tensorMap.get(x.dataId); + var values = xData.values; + var outValues = transposeImplCPU(values, x.shape, x.dtype, perm, newShape); + return backend.makeTensorInfo(newShape, x.dtype, outValues); + } + if (x.shape.length === 2 && tf.util.arraysEqual(perm, [1, 0])) { + var program_1 = new TransposeSharedProgram(x.shape, perm); + return webgpuBackend.runWebGPUProgram(program_1, [x], x.dtype); + } + var program = new TransposeProgram(x.shape, perm); + return webgpuBackend.runWebGPUProgram(program, [x], x.dtype); + } + var transposeConfig = { + kernelName: tf.Transpose, + backendName: 'webgpu', + kernelFunc: transpose + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function argMax(args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var x = inputs.x; + var axis = attrs.axis; + var axes = tf.util.parseAxisParam(axis, x.shape); + var permutedAxes = tf.backend_util.getAxesPermutation(axes, x.shape.length); + var $x = x; + var intermediateTensorInfos = []; + if (permutedAxes != null) { + $x = transpose({ inputs: { x: x }, backend: backend, attrs: { perm: permutedAxes } }); + intermediateTensorInfos.push($x); + axes = tf.backend_util.getInnerMostAxes(axes.length, $x.shape.length); + } + tf.backend_util.assertAxesAreInnerMostDims('argMax', [axes[0]], $x.shape.length); + var program = new ArgMinMaxProgram($x.shape, axes[0], 'max'); + var uniformData = [{ type: 'float32', data: [Number.NEGATIVE_INFINITY] }]; + var out = backend.runWebGPUProgram(program, [$x], 'int32', uniformData); + intermediateTensorInfos.forEach(function (t) { return backend.disposeData(t.dataId); }); + return out; + } + var argMaxConfig = { + kernelName: tf.ArgMax, + backendName: 'webgpu', + kernelFunc: argMax + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function argMin(args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var x = inputs.x; + var axis = attrs.axis; + var axes = tf.util.parseAxisParam(axis, x.shape); + var permutedAxes = tf.backend_util.getAxesPermutation(axes, x.shape.length); + var $x = x; + var intermediateTensorInfos = []; + if (permutedAxes != null) { + $x = transpose({ inputs: { x: x }, backend: backend, attrs: { perm: permutedAxes } }); + intermediateTensorInfos.push($x); + axes = tf.backend_util.getInnerMostAxes(axes.length, $x.shape.length); + } + tf.backend_util.assertAxesAreInnerMostDims('argMin', [axes[0]], $x.shape.length); + var program = new ArgMinMaxProgram($x.shape, axes[0], 'min'); + var uniformData = [{ type: 'float32', data: [Number.POSITIVE_INFINITY] }]; + var out = backend.runWebGPUProgram(program, [$x], 'int32', uniformData); + intermediateTensorInfos.forEach(function (t) { return backend.disposeData(t.dataId); }); + return out; + } + var argMinConfig = { + kernelName: tf.ArgMin, + backendName: 'webgpu', + kernelFunc: argMin + }; + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var Pool2DProgram = /** @class */ (function () { + function Pool2DProgram(convInfo, poolType) { + this.variableNames = ['x']; + this.uniforms = "stride : vec2, pad : vec2, dilation : vec2, convDims : vec2, filterDims : vec2,"; + // TODO(jiajia.qin@intel.com): Dynamically choose different workGroupSize for + // different output shapes. + this.workGroupSize = [128, 1, 1]; + this.size = true; + this.outputShape = convInfo.outShape; + this.dispatchLayout = flatDispatchLayout(this.outputShape); + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize); + this.shaderKey = "pool2D_" + poolType; + this.poolType = poolType; + } + Pool2DProgram.prototype.getUserCode = function () { + var updateSnippet = "resultValue = max(value, resultValue);"; + if (this.poolType === 'avg') { + updateSnippet = "resultValue = resultValue + value; count = count + 1.0;"; + } + var returnValue = "resultValue"; + if (this.poolType === 'avg') { + returnValue = "resultValue / count"; + } + var userCode = "\n " + getMainHeaderAndGlobalIndexString() + "\n if (index < uniforms.size) {\n let coords = getCoordsFromIndex(index);\n let batch = coords[0];\n let xRCCorner = vec2(coords.yz) * uniforms.stride - uniforms.pad;\n let xRCorner = xRCCorner.x;\n let xCCorner = xRCCorner.y;\n\n var resultValue = " + (this.poolType === 'avg' ? '0.0' : '-1.0 / pow(10.0, -20.0)') + ";\n var count = 0.0;\n\n for (var wR = 0; wR < uniforms.filterDims.x; wR = wR + uniforms.dilation.x) {\n let xR = xRCorner + wR;\n\n if (xR < 0 || xR >= uniforms.convDims.x) {\n continue;\n }\n\n for (var wC = 0; wC < uniforms.filterDims.y; wC = wC + uniforms.dilation.y) {\n let xC = xCCorner + wC;\n if (xC < 0 || xC >= uniforms.convDims.y) {\n continue;\n }\n\n let value = getX(batch, xR, xC, coords[3]);\n " + updateSnippet + "\n }\n }\n\n setOutputAtIndex(index, " + returnValue + ");\n }\n }\n "; + return userCode; + }; + return Pool2DProgram; + }()); + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var PoolWithFilterSizeEqualsOneProgram = /** @class */ (function () { + function PoolWithFilterSizeEqualsOneProgram(convInfo) { + this.variableNames = ['x']; + this.uniforms = "stride : vec2,"; + this.workGroupSize = [256, 1, 1]; + this.size = true; + this.outputShape = convInfo.outShape; + this.dispatchLayout = flatDispatchLayout(this.outputShape); + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize); + this.shaderKey = 'poolWithFilterSizeEqualsOne'; + } + PoolWithFilterSizeEqualsOneProgram.prototype.getUserCode = function () { + var userCode = "\n " + getMainHeaderAndGlobalIndexString() + "\n if (index < uniforms.size) {\n let coords = getCoordsFromIndex(index);\n let batch = coords[0];\n let d = coords[3];\n\n let xRCCorner = coords.yz * uniforms.stride;\n let xRCorner = xRCCorner.x;\n let xCCorner = xRCCorner.y;\n\n let value = getX(batch, xRCorner, xCCorner, d);\n setOutputAtIndex(index, value);\n }\n }\n "; + return userCode; + }; + return PoolWithFilterSizeEqualsOneProgram; + }()); + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function avgPool(args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var x = inputs.x; + var filterSize = attrs.filterSize, strides = attrs.strides, pad = attrs.pad, dimRoundingMode = attrs.dimRoundingMode; + var dilations = 1; + var convInfo = tf.backend_util.computePool2DInfo(x.shape, filterSize, strides, dilations, pad, dimRoundingMode); + if (convInfo.filterWidth === 1 && convInfo.filterHeight === 1 && + tf.util.arraysEqual(convInfo.inShape, convInfo.outShape)) { + return identity({ inputs: { x: x }, backend: backend }); + } + var program; + var dimensions = [{ type: 'int32', data: [convInfo.strideHeight, convInfo.strideWidth] }]; + if (convInfo.filterHeight === 1 && convInfo.filterWidth === 1) { + program = new PoolWithFilterSizeEqualsOneProgram(convInfo); + } + else { + program = new Pool2DProgram(convInfo, 'avg'); + dimensions.push({ type: 'int32', data: [convInfo.padInfo.top, convInfo.padInfo.left] }, { + type: 'int32', + data: [convInfo.dilationHeight, convInfo.dilationWidth] + }, { type: 'int32', data: [convInfo.inHeight, convInfo.inWidth] }, { + type: 'int32', + data: [convInfo.effectiveFilterHeight, convInfo.effectiveFilterWidth] + }); + } + return backend.runWebGPUProgram(program, [x], x.dtype, dimensions); + } + var avgPoolConfig = { + kernelName: tf.AvgPool, + backendName: 'webgpu', + kernelFunc: avgPool + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function batchMatMul(args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var a = inputs.a, b = inputs.b; + var transposeA = attrs.transposeA, transposeB = attrs.transposeB; + return batchMatMulImpl({ a: a, b: b, transposeA: transposeA, transposeB: transposeB, backend: backend }); + } + var batchMatMulConfig = { + kernelName: tf.BatchMatMul, + backendName: 'webgpu', + kernelFunc: batchMatMul, + }; + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var SliceProgram = /** @class */ (function () { + function SliceProgram(start, destSize) { + this.variableNames = ['source']; + this.workPerThread = 1; + this.workGroupSize = [64, 1, 1]; + this.size = true; + this.outputShape = destSize; + this.rank = destSize.length; + this.dispatchLayout = flatDispatchLayout(this.outputShape); + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize, [this.workPerThread, 1, 1]); + this.start = start; + this.uniforms = "start : " + getCoordsDataType(start.length) + ", "; + this.shaderKey = 'slice'; + } + SliceProgram.prototype.getUserCode = function () { + var dtype = getCoordsDataType(this.rank); + var sourceCoords = getCoords$1(this.rank); + var coordSum; + if (this.start.length === 1) { + coordSum = this.outputShape.map(function (_, i) { + return "sourceLoc = uniforms.start + coords;"; + }); + } + else { + coordSum = this.outputShape.map(function (_, i) { + return "sourceLoc." + coords[i] + " = uniforms.start[" + i + "] + coords." + coords[i] + ";"; + }); + } + var userCode = "\n " + getMainHeaderAndGlobalIndexString() + "\n if (index < uniforms.size) {\n var sourceLoc : " + dtype + ";\n let coords = getCoordsFromIndex(index);\n " + coordSum.join('\n') + "\n setOutputAtIndex(index, getSource(" + sourceCoords + "));\n }\n }\n "; + return userCode; + }; + return SliceProgram; + }()); + var coords = ['x', 'y', 'z', 'w', 'u', 'v']; + function getCoords$1(rank) { + if (rank === 1) { + return 'sourceLoc'; + } + else if (rank <= 6) { + return coords.slice(0, rank).map(function (coord) { return "sourceLoc." + coord; }).join(','); + } + else { + throw Error("Slicing for rank " + rank + " is not yet supported"); + } + } + + function slice(args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var x = inputs.x; + var begin = attrs.begin, size = attrs.size; + var _a = __read(tf.slice_util.parseSliceParams(x, begin, size), 2), $begin = _a[0], $size = _a[1]; + tf.slice_util.assertParamsValid(x, $begin, $size); + if (backend.shouldExecuteOnCPU([x]) || x.dtype === 'string') { + var xBufferInfo = backend.tensorMap.get(x.dataId); + var outValues = sliceImplCPU(xBufferInfo.values, $begin, $size, x.shape, x.dtype); + return backend.makeTensorInfo($size, x.dtype, outValues); + } + if (tf.util.sizeFromShape($size) === 0) { + return backend.makeTensorInfo($size, x.dtype, []); + } + // TODO(xing.xu): Add shadow slice support. + var program = new SliceProgram($begin, $size); + var uniformData = [{ type: 'int32', data: $begin }]; + return backend.runWebGPUProgram(program, [x], x.dtype, uniformData); + } + var sliceConfig = { + kernelName: tf.Slice, + backendName: 'webgpu', + kernelFunc: slice + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var batchToSpaceND = function (args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var x = inputs.x; + var blockShape = attrs.blockShape, crops = attrs.crops; + tf.util.assert(x.shape.length <= 4, function () { return 'batchToSpaceND for rank > 4 with a WebGPU backend not ' + + 'implemented yet'; }); + var prod = blockShape.reduce(function (a, b) { return a * b; }); + var reshaped = tf.backend_util.getReshaped(x.shape, blockShape, prod); + var permuted = tf.backend_util.getPermuted(reshaped.length, blockShape.length); + var reshapedPermuted = tf.backend_util.getReshapedPermuted(x.shape, blockShape, prod); + var sliceBeginCoords = tf.backend_util.getSliceBeginCoords(crops, blockShape.length); + var sliceSize = tf.backend_util.getSliceSize(reshapedPermuted, crops, blockShape.length); + var toDispose = []; + var reshapedIntermediate = reshape({ inputs: { x: x }, backend: backend, attrs: { shape: reshaped } }); + var transposedIntermediate = transpose({ inputs: { x: reshapedIntermediate }, backend: backend, attrs: { perm: permuted } }); + var reshapedIntermediate2 = reshape({ + inputs: { x: transposedIntermediate }, + backend: backend, + attrs: { shape: reshapedPermuted } + }); + var sliced = slice({ + inputs: { x: reshapedIntermediate2 }, + backend: backend, + attrs: { begin: sliceBeginCoords, size: sliceSize } + }); + toDispose.push(reshapedIntermediate); + toDispose.push(transposedIntermediate); + toDispose.push(reshapedIntermediate2); + toDispose.forEach(function (t) { return backend.disposeData(t.dataId); }); + return sliced; + }; + var batchToSpaceNDConfig = { + kernelName: tf.BatchToSpaceND, + backendName: 'webgpu', + kernelFunc: batchToSpaceND + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var notEqual = binaryKernelFunc({ + opSnippet: BinaryOpType.NOT_EQUAL, + dtype: 'bool', + cpuKernelImpl: notEqualImplCPU + }); + var notEqualConfig = { + kernelName: tf.NotEqual, + backendName: 'webgpu', + kernelFunc: notEqual + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function real(args) { + var inputs = args.inputs, backend = args.backend; + var input = inputs.input; + var inputData = backend.tensorMap.get(input.dataId); + return identity({ inputs: { x: inputData.complexTensorInfos.real }, backend: backend }); + } + var realConfig = { + kernelName: tf.Real, + backendName: 'webgpu', + kernelFunc: real + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function int(input, backend) { + var program = new UnaryOpProgram(input.shape, UnaryOpType.TO_INT); + var output = backend.runWebGPUProgram(program, [input], 'int32'); + return { dataId: output.dataId, shape: output.shape, dtype: output.dtype }; + } + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function cast(args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var x = inputs.x; + var dtype = attrs.dtype; + // Casting to complex64. + if (dtype === 'complex64') { + if (x.dtype === 'complex64') { + return identity({ inputs: { x: x }, backend: backend }); + } + // TODO: Import kernel function once zeros is modularized. + var zerosTensor = tf__namespace.zeros(x.shape); + var floatX = cast({ inputs: { x: x }, backend: backend, attrs: { dtype: 'float32' } }); + var result = complex({ inputs: { real: floatX, imag: zerosTensor }, backend: backend }); + zerosTensor.dispose(); + backend.disposeData(floatX.dataId); + return result; + } + // Casting from complex64 + if (x.dtype === 'complex64') { + var realPart = real({ inputs: { input: x }, backend: backend }); + var result = cast({ inputs: { x: realPart }, backend: backend, attrs: { dtype: dtype } }); + backend.disposeData(realPart.dataId); + return result; + } + if (!tf.util.hasEncodingLoss(x.dtype, dtype)) { + // We don't change the underlying data, since we cast to higher + // precision. + var result = identity({ inputs: { x: x }, backend: backend }); + return { dataId: result.dataId, shape: result.shape, dtype: dtype }; + } + if (dtype === 'int32') { + return int(x, backend); + } + if (dtype === 'bool') { + var zerosTensorInfo = backend.makeTensorInfo([], 'bool', tf.util.getTypedArrayFromDType('bool', 1)); + var binaryInputs = { a: x, b: zerosTensorInfo }; + var result = notEqual({ inputs: binaryInputs, backend: backend }); + backend.disposeData(zerosTensorInfo.dataId); + return result; + } + throw new Error("Error in Cast: failed to cast " + x.dtype + " to " + dtype); + } + var castConfig = { + kernelName: tf.Cast, + backendName: 'webgpu', + kernelFunc: cast + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var ceil = unaryKernelFunc({ opType: UnaryOpType.CEIL, cpuKernelImpl: ceilImplCPU }); + var ceilConfig = { + kernelName: tf.Ceil, + backendName: 'webgpu', + kernelFunc: ceil + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var ClipVec4Program = /** @class */ (function () { + function ClipVec4Program(outputShape) { + this.variableNames = ['A']; + this.uniforms = 'minVal : f32, maxVal : f32,'; + this.workPerThread = 4; + this.workGroupSize = [64, 1, 1]; + this.isVec4 = true; + this.size = true; + this.outputShape = outputShape; + this.dispatchLayout = flatDispatchLayout(this.outputShape); + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize, [this.workPerThread, 1, 1]); + this.shaderKey = 'clipVec4'; + } + ClipVec4Program.prototype.getUserCode = function () { + var userCode = "\n " + getMainHeaderAndGlobalIndexString() + "\n if(index < uniforms.size) {\n let value = getAByOutputIndex(index);\n var clampedValue : vec4;\n for (var i = 0; i < 4; i = i + 1) {\n if (isnan(value[i])) {\n clampedValue[i] = value[i];\n } else {\n clampedValue[i] = clamp(value[i], uniforms.minVal, uniforms.maxVal);\n }\n }\n\n setOutputAtIndex(index, clampedValue);\n }\n }\n "; + return userCode; + }; + return ClipVec4Program; + }()); + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var ClipProgram = /** @class */ (function () { + function ClipProgram(outputShape) { + this.variableNames = ['A']; + this.uniforms = 'minVal : f32, maxVal : f32,'; + this.workGroupSize = [64, 1, 1]; + this.size = true; + this.outputShape = outputShape; + this.dispatchLayout = flatDispatchLayout(this.outputShape); + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize); + this.shaderKey = 'clip'; + } + ClipProgram.prototype.getUserCode = function () { + var userCode = "\n " + getMainHeaderAndGlobalIndexString() + "\n if(index < uniforms.size) {\n let value = getAByOutputIndex(index);\n if (isnan(value)) {\n setOutputAtIndex(index, value);\n return;\n }\n setOutputAtIndex(index, clamp(value, uniforms.minVal, uniforms.maxVal));\n }\n }\n "; + return userCode; + }; + return ClipProgram; + }()); + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function clipByValue(args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var x = inputs.x; + var clipValueMin = attrs.clipValueMin, clipValueMax = attrs.clipValueMax; + var program; + var uniformData = [ + { type: 'float32', data: [clipValueMin] }, + { type: 'float32', data: [clipValueMax] } + ]; + if (tf.util.sizeFromShape(x.shape) % 4 === 0) { + program = new ClipVec4Program(x.shape); + } + else { + program = new ClipProgram(x.shape); + } + return backend.runWebGPUProgram(program, [x], x.dtype, uniformData); + } + var clipByValueConfig = { + kernelName: tf.ClipByValue, + backendName: 'webgpu', + kernelFunc: clipByValue + }; + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var ConcatProgram = /** @class */ (function () { + function ConcatProgram(shapes) { + this.uniforms = ''; + this.workPerThread = 4; + this.workGroupSize = [64, 1, 1]; + this.size = true; + this.outputShape = + tf.backend_util.computeOutShape(shapes, 1 /* axis */); + this.variableNames = shapes.map(function (_, i) { return "T" + i; }); + this.dispatchLayout = flatDispatchLayout(this.outputShape); + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize, [this.workPerThread, 1, 1]); + this.offsetLength = shapes.length - 1; + for (var i = 0; i < this.offsetLength; i++) { + this.uniforms += "offset" + i + " : i32,"; + } + this.shaderKey = 'concat'; + } + ConcatProgram.prototype.getUserCode = function () { + var snippets = []; + if (this.offsetLength > 0) { + snippets.push("if (yC < uniforms.offset0){ setOutputAtCoords(coords.x, coords.y, getT0(yR, yC)); }"); + for (var i = 1; i < this.offsetLength; i++) { + snippets.push("else if (yC < uniforms.offset" + [i] + "){ " + + ("setOutputAtCoords(coords.x, coords.y, getT" + i + "(yR, yC - uniforms.offset" + (i - 1) + ")); }")); + } + var lastIndex = this.offsetLength; + var lastShiftIndex = this.offsetLength - 1; + snippets.push("else { setOutputAtCoords(coords.x, coords.y, getT" + lastIndex + "(yR, yC - uniforms.offset" + lastShiftIndex + ")); }"); + } + else { + snippets.push("setOutputAtCoords(coords.x, coords.y, getT0(yR, yC));"); + } + var userCode = "\n " + getMainHeaderAndGlobalIndexString() + "\n for(var i = 0; i < " + this.workPerThread + "; i = i + 1) {\n let flatIndex = index * " + this.workPerThread + " + i;\n if(flatIndex < uniforms.size) {\n let coords = getCoordsFromIndex(flatIndex);\n let yR = coords.x;\n let yC = coords.y;\n\n " + snippets.join('\n ') + "\n }\n }\n }\n "; + return userCode; + }; + return ConcatProgram; + }()); + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function imag(args) { + var inputs = args.inputs, backend = args.backend; + var input = inputs.input; + var inputData = backend.tensorMap.get(input.dataId); + return identity({ inputs: { x: inputData.complexTensorInfos.imag }, backend: backend }); + } + var imagConfig = { + kernelName: tf.Imag, + backendName: 'webgpu', + kernelFunc: imag + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function concatImpl(inputs, axis, backend) { + var dtype = inputs[0].dtype; + if (dtype === 'complex64') { + var reals = inputs.map(function (t) { return real({ inputs: { input: t }, backend: backend }); }); + var imags = inputs.map(function (t) { return imag({ inputs: { input: t }, backend: backend }); }); + var realConcated = concatImpl(reals, axis, backend); + var imagConcated = concatImpl(imags, axis, backend); + var result = complex({ inputs: { real: realConcated, imag: imagConcated }, backend: backend }); + reals.forEach(function (r) { return backend.disposeData(r.dataId); }); + imags.forEach(function (i) { return backend.disposeData(i.dataId); }); + backend.disposeData(realConcated.dataId); + backend.disposeData(imagConcated.dataId); + return result; + } + var runOnCpu = backend.shouldExecuteOnCPU(inputs); + // Run on cpu if dtype is string. For string, the backend represents it + // as Uint8Array[], where each Uint8Array is a character. Given that the + // computation is only on the outer array, uploading the whole data onto + // gpu is wasteful. Also, currently webgpu doesn't have a design to + // upload and retrieve Uint8Array[] between cpu and gpu. Therefore, we + // just run the kernel on cpu if dtype is string. + if (dtype === 'string') { + runOnCpu = true; + } + if (runOnCpu) { + // Any concat of n-dimensional tensors across any axis can be reduced to + // a concatenation of two-dimensional tensors across the axis 1 by first + // partitioning the axes of the original tensors into those less than the + // axis to be concatenated and the rest. Then reshape the tensors + // into a two-dimensional tensor by collapsing these two sets of axes and + // concatenate the resulting matrices across the axis 1, finally reshaping + // the result to have the proper shape. + var tensors2D_1 = inputs.map(function (t) { + var innerSize = tf.util.sizeFromShape(t.shape.slice(axis)); + var shape = [-1, innerSize]; + return reshape({ inputs: { x: t }, backend: backend, attrs: { shape: shape } }); + }); + var inputsValShapes = tensors2D_1.map(function (t) { + return { vals: backend.readSync(t.dataId), shape: t.shape }; + }); + // Concats 2d tensors along axis=1. + var outShape_1 = tf.backend_util.computeOutShape(tensors2D_1.map(function (t) { return t.shape; }), 1 /* axis */); + var simplyConcat = tensors2D_1[0].shape[0] === 1; + var outVals = concatImplCPU(inputsValShapes, outShape_1, dtype, simplyConcat); + var finalOutShape = tf.backend_util.computeOutShape(inputs.map(function (t) { return t.shape; }), axis); + var outInfo = backend.makeTensorInfo(finalOutShape, dtype, outVals); + tensors2D_1.forEach(function (t) { return backend.disposeData(t.dataId); }); + return outInfo; + } + var _a = computeTensors2D(inputs, axis, backend), tensors2D = _a.tensors2D, outShape = _a.outShape; + var shapes = (tensors2D).map(function (t) { return t.shape; }); + var program = new ConcatProgram(shapes); + var uniformData = []; + var offsets = new Array(shapes.length - 1); + if (offsets.length > 0) { + offsets[0] = shapes[0][1]; + uniformData.push({ type: 'int32', data: [offsets[0]] }); + for (var i = 1; i < offsets.length; i++) { + offsets[i] = offsets[i - 1] + shapes[i][1]; + uniformData.push({ type: 'int32', data: [offsets[i]] }); + } + } + var res = backend.runWebGPUProgram(program, tensors2D, tensors2D[0].dtype, uniformData); + tensors2D.forEach(function (r) { return backend.disposeData(r.dataId); }); + var reshapedResult = reshape({ inputs: { x: res }, backend: backend, attrs: { shape: outShape } }); + backend.disposeData(res.dataId); + return reshapedResult; + } + function computeTensors2D(inputs, axis, backend) { + var outShape = tf.backend_util.computeOutShape(inputs.map(function (t) { return t.shape; }), axis); + var tensors2D = inputs.map(function (t) { return reshape({ + inputs: { x: t }, + backend: backend, + attrs: { + shape: [ + tf.util.sizeFromShape(t.shape.slice(0, axis)), + tf.util.sizeFromShape(t.shape.slice(axis)) + ] + } + }); }); + return { tensors2D: tensors2D, outShape: outShape }; + } + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function concat(args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var axis = attrs.axis; + var $axis = tf.util.parseAxisParam(axis, inputs[0].shape)[0]; + var outShape = tf.backend_util.computeOutShape(inputs.map(function (t) { return t.shape; }), $axis); + if (tf.util.sizeFromShape(outShape) === 0) { + return backend.makeTensorInfo(outShape, inputs[0].dtype, []); + } + // Keep only non-empty tensors (ignore tensors with 0 in their shape). + var $inputs = inputs.filter(function (t) { return tf.util.sizeFromShape(t.shape) > 0; }); + if ($inputs.length === 1) { + return identity({ inputs: { x: $inputs[0] }, backend: backend }); + } + var shapes = $inputs.map(function (t) { return t.shape; }); + tf.backend_util.assertParamsConsistent(shapes, $axis); + return concatImpl($inputs, $axis, backend); + } + var concatConfig = { + kernelName: tf.Concat, + backendName: 'webgpu', + kernelFunc: concat + }; + + var Conv2DMMVec4Program = /** @class */ (function () { + function Conv2DMMVec4Program(convInfo, addBias, activation, hasPreluActivationWeights, hasLeakyreluAlpha) { + var _a; + if (addBias === void 0) { addBias = false; } + if (activation === void 0) { activation = null; } + if (hasPreluActivationWeights === void 0) { hasPreluActivationWeights = false; } + if (hasLeakyreluAlpha === void 0) { hasLeakyreluAlpha = false; } + this.variableNames = ['x', 'W']; + this.uniforms = "filterDims : vec2, pad : vec2, stride : vec2, dilation : vec2,\n dimAOuter : i32, dimBOuter : i32, dimInner : i32,"; + this.workGroupSize = [8, 8, 1]; + this.isVec4 = true; + this.outputShape = convInfo.outShape; + tf.util.assert(convInfo.dataFormat === 'channelsLast', function () { return 'TODO: NCHW is unimplemented'; }); + this.dispatchLayout = { x: [3], y: [1, 2], z: [0] }; + // The first element in elementsPerThread must be 4. + if (this.outputShape[1] === 1) { + this.elementsPerThread = [4, 1, 1]; + } + else { + this.elementsPerThread = [4, 4, 1]; + } + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize, this.elementsPerThread); + this.convInfo = convInfo; + this.addBias = addBias; + this.activation = activation; + this.hasPreluActivationWeights = hasPreluActivationWeights; + this.hasLeakyreluAlpha = hasLeakyreluAlpha; + if (this.addBias) { + this.variableNames.push('bias'); + } + if (this.hasPreluActivationWeights) { + this.variableNames.push('preluActivationWeights'); + } + if (this.hasLeakyreluAlpha) { + this.variableNames.push('leakyreluAlpha'); + } + this.tileAOuter = this.outputShape[1] === 1 ? + 1 : + this.workGroupSize[1] * this.elementsPerThread[1]; + this.tileBOuter = this.workGroupSize[0] * this.elementsPerThread[0]; + this.tileInner = this.tileBOuter; + _a = __read(this.getShapeFit(), 2), this.fitA = _a[0], this.fitB = _a[1]; + this.shaderKey = "conv2DMMVec4_" + this.activation + "_" + this.fitA + "_" + this.fitB + "_" + this.elementsPerThread; + } + Conv2DMMVec4Program.prototype.getShapeFit = function () { + var tileSizeA = [this.tileAOuter, this.tileInner]; + var tileSizeB = [this.tileInner, this.tileBOuter]; + var dimAOuter = this.outputShape[1] * this.outputShape[2]; + var dimBOuter = this.outputShape[3]; + var dimInner = this.convInfo.filterHeight * this.convInfo.filterWidth * + this.convInfo.inChannels; + return [ + tilesFitEvenlyIntoShape(tileSizeA, [dimAOuter, dimInner]), + tilesFitEvenlyIntoShape(tileSizeB, [dimInner, dimBOuter]) + ]; + }; + // index is used to avoid repeated definition error. + Conv2DMMVec4Program.prototype.getSampleAWithRemainder = function (index) { + return "let flatIndex" + index + " = getIndexFromCoords4D(coord, uniforms.xShape);\n let divBy4Remainder" + index + " = flatIndex" + index + " % 4;\n let divBy4Index" + index + " = flatIndex" + index + " / 4;\n let curData" + index + " = x[divBy4Index" + index + "];\n if (divBy4Remainder" + index + " == 0) {\n temp = curData" + index + ";\n } else {\n // TODO: This could end up being a redundant load with another one in\n // the same shader invocation. Perhaps there's an opportunity for\n // optimization\n let nextData" + index + " = x[divBy4Index" + index + " + 1];\n if (divBy4Remainder" + index + " == 1) {\n temp = vec4(curData" + index + ".yzw, nextData" + index + ".x);\n } else if (divBy4Remainder" + index + " == 2) {\n temp = vec4(curData" + index + ".zw, nextData" + index + ".xy);\n } else if (divBy4Remainder" + index + " == 3) {\n temp = vec4(curData" + index + ".w, nextData" + index + ".xyz);\n }\n }\n "; + }; + Conv2DMMVec4Program.prototype.getUserCode = function () { + var matMulSource = makeMatMulPackedVec4Source(this.elementsPerThread, this.tileAOuter, this.tileBOuter, this.tileInner); + var remainder = this.convInfo.inChannels % 4; + // Below code only applys to valid padding type. + var remainderSnippet = remainder === 0 ? + "// The bounds checking is always needed since we use it to pad zero for\n // the 'same' padding type.\n if (coordsInBounds4D(coord, uniforms.xShape)) {\n resData = x[getIndexFromCoords4D(coord, uniforms.xShape) / 4];\n } else {\n resData = vec4(0.0); }" : + "var temp = vec4(0.0);\n " + this.getSampleAWithRemainder(1) + "\n resData = temp;\n if (WCol == (uniforms.filterDims[1] - 1)) {\n coord = vec4(\n coord.x, coord.y + 1, coord.z + 1 - uniforms.filterDims[1], 0);\n " + this.getSampleAWithRemainder(2) + "\n if (inChCoord == 0) {\n resData = vec4(resData.xyz, temp.x);\n } else if (inChCoord == 1) {\n resData = vec4(resData.xy, temp.xy);\n } else {\n resData = vec4(resData.x, temp.xyz);\n }\n }\n "; + var readASnippet = "let outRow = r / uniforms.outShape[2];\n let outCol = r % uniforms.outShape[2];\n let WRow = c / (uniforms.filterDims[1] * uniforms.xShape[3]);\n let WCol = c / uniforms.xShape[3] % uniforms.filterDims[1];\n let inChCoord = c % uniforms.xShape[3];\n var coord = vec4(\n batch,\n outRow * uniforms.stride[0] + uniforms.dilation[0] * WRow - uniforms.pad[0],\n outCol * uniforms.stride[1] + uniforms.dilation[1] * WCol - uniforms.pad[1],\n inChCoord);\n var resData = vec4(0.0);\n " + remainderSnippet + "\n return resData;"; + var sampleA = this.fitA ? + "" + readASnippet : + "if (r < uniforms.dimAOuter && c < uniforms.dimInner) {\n " + readASnippet + "\n }\n return vec4(0.0);\n "; + var sampleB = this.fitB ? + "return W[row * uniforms.dimBOuter / 4 + col];" : + "if(coordsInBounds2D(vec2(row, col * 4), vec2(uniforms.dimInner, uniforms.dimBOuter))) {\n return W[row * uniforms.dimBOuter / 4 + col];\n }\n return vec4(0.0);\n "; + var activationSnippet = '', applyActivationSnippet = ''; + if (this.activation) { + var activationOp = mapActivationToShaderProgram(this.activation, this.isVec4); + if (this.hasPreluActivationWeights) { + activationSnippet = + "fn activation(a : vec4, outCoord : vec4) -> vec4 {\n let b = getPreluActivationWeightsByOutputCoords(outCoord);\n " + activationOp + "\n }"; + } + else if (this.hasLeakyreluAlpha) { + activationSnippet = "fn activation(outCoord: vec4) -> vec4 {\n let b = getLeakyreluAlphaByOutputCoords(outCoord);\n " + activationOp + "\n }"; + throw new Error('Leakyrelu is not supported.'); + } + else { + activationSnippet = "\n fn activation(a : vec4, outCoord : vec4) -> vec4 {\n " + activationOp + "\n }"; + } + applyActivationSnippet = "value = activation(value, outCoord);"; + } + var addBiasSnippet = this.addBias ? 'value = value + getBiasByOutputCoords(outCoord);' : ''; + var userCode = "\n " + activationSnippet + "\n fn mm_readA(row : i32, col : i32, globalId : vec3) -> vec4 {\n let r = row;\n let c = col * 4;\n var batch = i32(globalId.z);\n " + sampleA + "\n }\n\n fn mm_readB(row : i32, col : i32, globalId : vec3) -> vec4 {\n " + sampleB + "\n }\n\n fn mm_write(row : i32, col : i32, valueInput : vec4, globalId : vec3) {\n var batch = i32(globalId.z);\n var value = valueInput;\n if (row < uniforms.dimAOuter && col * 4 < uniforms.dimBOuter)\n {\n let outCoord = vec4(\n batch,\n row / uniforms.outShape[2],\n row % uniforms.outShape[2],\n col * 4);\n " + addBiasSnippet + "\n " + applyActivationSnippet + "\n setOutputAtCoords(outCoord[0], outCoord[1], outCoord[2], outCoord[3],\n value);\n }\n }\n " + matMulSource + "\n "; + return userCode; + }; + return Conv2DMMVec4Program; + }()); + + var Conv2DMMProgram = /** @class */ (function () { + function Conv2DMMProgram(convInfo, addBias, activation, hasPreluActivationWeights) { + var _a; + if (addBias === void 0) { addBias = false; } + if (activation === void 0) { activation = null; } + if (hasPreluActivationWeights === void 0) { hasPreluActivationWeights = false; } + this.variableNames = ['x', 'W']; + this.uniforms = "filterDims : vec2, pad : vec2, stride : vec2, dilation : vec2, dimAOuter : i32, dimBOuter : i32, dimInner : i32,"; + this.outputShape = convInfo.outShape; + this.isChannelsLast = convInfo.dataFormat === 'channelsLast'; + this.dispatchLayout = this.isChannelsLast ? { x: [3], y: [1, 2], z: [0] } : + { x: [1], y: [2, 3], z: [0] }; + this.workGroupSize = + computeWorkGroupSizeForConv2d(this.dispatchLayout, this.outputShape); + this.elementsPerThread = + computeWorkPerThreadForConv2d(this.dispatchLayout, this.outputShape); + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize, this.elementsPerThread); + if (addBias) { + this.variableNames.push('bias'); + } + if (hasPreluActivationWeights) { + this.variableNames.push('preluActivationWeights'); + } + this.convInfo = convInfo; + this.addBias = addBias; + this.activation = activation; + this.hasPreluActivationWeights = hasPreluActivationWeights; + _a = __read(this.getShapeFit(), 2), this.fitA = _a[0], this.fitB = _a[1]; + this.shaderKey = "conv2DMM_" + this.elementsPerThread + "_" + this.activation + "_" + this.fitA + "_" + this.fitB + "_" + this.isChannelsLast; + } + Conv2DMMProgram.prototype.getShapeFit = function () { + var tileAOuter = this.workGroupSize[1] * this.elementsPerThread[1]; + var tileBOuter = this.workGroupSize[0] * this.elementsPerThread[0]; + var tileInner = tileAOuter > tileBOuter ? tileAOuter : tileBOuter; + tf.util.assert(tileInner % this.workGroupSize[0] === 0 && + tileInner % this.workGroupSize[1] === 0, function () { + // tslint:disable-next-line: max-line-length + return 'tileInner must be multiple of workgroupsize.x and workgroupsize.y'; + }); + var tileSizeA = [tileAOuter, tileInner]; + var tileSizeB = [tileInner, tileBOuter]; + var dimAOuter = this.convInfo.outHeight * this.convInfo.outWidth; + var dimBOuter = this.convInfo.outChannels; + var dimInner = this.convInfo.filterHeight * this.convInfo.filterWidth * + this.convInfo.inChannels; + return [ + tilesFitEvenlyIntoShape(tileSizeA, [dimAOuter, dimInner]), + tilesFitEvenlyIntoShape(tileSizeB, [dimInner, dimBOuter]) + ]; + }; + Conv2DMMProgram.prototype.getUserCode = function () { + var coordASnippet = this.isChannelsLast ? "\n let coord = vec4(batch, xRow, xCol, col % inChannels);\n " : + "\n let coord = vec4(batch, col % inChannels, xRow, xCol);\n "; + var coordResSnippet = this.isChannelsLast ? "\n let outCoord = vec4(\n batch,\n row / outWidth,\n row % outWidth,\n col);\n " : + "\n let outCoord = vec4(\n batch,\n col,\n row / outWidth,\n row % outWidth);\n "; + var matMulSource = makeMatMulPackedSource(this.elementsPerThread, this.workGroupSize); + var readASnippet = "\n let inChannels = uniforms.wShape[2];\n let outWidth = " + (this.isChannelsLast ? 'uniforms.outShape[2]' : 'uniforms.outShape[3]') + ";\n let outRow = row / outWidth;\n let outCol = row % outWidth;\n\n let WRow = col / (uniforms.filterDims[1] * inChannels);\n let WCol = col / inChannels % uniforms.filterDims[1];\n let xRow = outRow * uniforms.stride[0] + uniforms.dilation[0] * WRow - uniforms.pad[0];\n let xCol = outCol * uniforms.stride[1] + uniforms.dilation[1] * WCol - uniforms.pad[1];\n " + coordASnippet + "\n // The bounds checking is always needed since we use it to pad zero for the\n // 'same' padding type.\n if(coordsInBounds4D(coord, uniforms.xShape)) {\n return x[getIndexFromCoords4D(coord, uniforms.xShape)];\n }\n return 0.0;"; + var sampleA = this.fitA ? + "" + readASnippet : + "if (row < uniforms.dimAOuter && col < uniforms.dimInner) {\n " + readASnippet + "\n }\n return 0.0;\n "; + var sampleB = this.fitB ? + "return W[row * uniforms.dimBOuter + col];" : + "if(coordsInBounds2D(vec2(row, col), vec2(uniforms.dimInner, uniforms.dimBOuter))) {\n return W[row * uniforms.dimBOuter + col];\n\t }\n\t return 0.0;\n\t "; + var activationSnippet = '', applyActivationSnippet = ''; + if (this.activation) { + var activationOp = mapActivationToShaderProgram(this.activation, false); + if (this.hasPreluActivationWeights) { + activationSnippet = + "fn activation(a: f32, outCoord : vec4) -> f32 {\n let b = getPreluActivationWeightsByOutputCoords(outCoord);\n " + activationOp + "\n }"; + } + else { + activationSnippet = "\n fn activation(a : f32, outCoord : vec4) -> f32 {\n " + activationOp + "\n }\n "; + } + applyActivationSnippet = "value = activation(value, outCoord);"; + } + var addBiasSnippet = this.addBias ? 'value = value + getBiasByOutputCoords(outCoord);' : ''; + var userCode = "\n " + activationSnippet + "\n fn mm_readA(row : i32, col : i32, globalId : vec3) -> f32 {\n var batch = i32(globalId.z);\n " + sampleA + "\n }\n\n fn mm_readB(row : i32, col : i32, globalId : vec3) -> f32 {\n " + sampleB + "\n }\n\n fn mm_write(row : i32, col : i32, valueInput : f32, globalId : vec3) {\n var batch = i32(globalId.z);\n var value = valueInput;\n let outWidth = " + (this.isChannelsLast ? 'uniforms.outShape[2]' : 'uniforms.outShape[3]') + ";\n " + coordResSnippet + "\n " + addBiasSnippet + "\n " + applyActivationSnippet + "\n result[getIndexFromCoords4D(outCoord, uniforms.outShape)] = value;\n }\n " + matMulSource + "\n "; + return userCode; + }; + return Conv2DMMProgram; + }()); + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var Conv2DNaiveProgram = /** @class */ (function () { + function Conv2DNaiveProgram(convInfo, addBias, activation, hasPreluActivationWeights) { + if (addBias === void 0) { addBias = false; } + if (activation === void 0) { activation = null; } + if (hasPreluActivationWeights === void 0) { hasPreluActivationWeights = false; } + this.variableNames = ['x', 'W']; + this.uniforms = "filterDims : vec2, pad : vec2, stride : vec2, dilation : vec2,"; + this.workGroupSize = [128, 1, 1]; + this.outputShape = convInfo.outShape; + this.dispatchLayout = flatDispatchLayout(this.outputShape); + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize); + tf.util.assert(convInfo.dataFormat === 'channelsLast', function () { return 'TODO: NCHW is unimplemented'; }); + if (addBias) { + this.variableNames.push('bias'); + } + if (hasPreluActivationWeights) { + this.variableNames.push('preluActivationWeights'); + } + this.convInfo = convInfo; + this.addBias = addBias; + this.activation = activation; + this.hasPreluActivationWeights = hasPreluActivationWeights; + this.shaderKey = "conv2DNaive_" + this.activation; + } + Conv2DNaiveProgram.prototype.getUserCode = function () { + var activationSnippet = '', applyActivationSnippet = ''; + if (this.activation) { + var activationOp = mapActivationToShaderProgram(this.activation); + if (this.hasPreluActivationWeights) { + activationSnippet = + "fn activation(a : f32, outCoord : vec4) -> f32{\n let b = getPreluActivationWeightsByOutputCoords(outCoord);\n " + activationOp + "\n }"; + } + else { + activationSnippet = "\n fn activation(a : f32, outCoord : vec4) -> f32{\n " + activationOp + "\n }\n "; + } + applyActivationSnippet = "value = activation(value, outCoord);"; + } + var addBiasSnippet = this.addBias ? 'value = value + getBiasByOutputCoords(outCoord);' : ''; + var userCode = "\n " + activationSnippet + "\n fn readInp(batch : i32, row : i32, col : i32, chan : i32) -> f32 {\n let coord = vec4(batch, row, col, chan);\n if(coordsInBounds4D(coord, uniforms.xShape)) {\n return getX(batch, row, col, chan);\n }\n return 0.0;\n }\n\n fn readFilt(row : i32, col : i32, xChannel : i32, outChannel : i32) -> f32{\n let coord = vec4(row, col, xChannel, outChannel);\n if(coordsInBounds4D(coord, uniforms.wShape)) {\n return getW(row, col, xChannel, outChannel);\n }\n return 0.0;\n }\n\n fn writeResult(batch : i32, row : i32, col : i32, chan : i32, value : f32) {\n let coord = vec4(batch, row, col, chan);\n if (coordsInBounds4D(coord, uniforms.outShape)) {\n " + addBiasSnippet + "\n " + applyActivationSnippet + "\n setOutputAtCoords(batch, row, col, chan, value);\n }\n }\n\n " + getMainHeaderString() + "\n let coords = getOutputCoords();\n let batch = coords[0];\n let outChannel = coords[3];\n\n var acc = 0.0;\n\n for (var row = 0; row < uniforms.filterDims[0]; row = row + 1) {\n for (var col = 0; col < uniforms.filterDims[1]; col = col + 1) {\n for (var xChannel = 0; xChannel < uniforms.xShape[3]; xChannel = xChannel + 1) {\n let coordRow = coords[1] * uniforms.stride[0] + uniforms.dilation[0] * row - uniforms.pad[0];\n let coordCol = coords[2] * uniforms.stride[1] + uniforms.dilation[1] * col - uniforms.pad[1];\n let v = readInp(batch, coordRow, coordCol, xChannel);\n let f = readFilt(row, col, xChannel, outChannel);\n acc = acc + v * f;\n }\n }\n }\n\n writeResult(batch, coords[1], coords[2], outChannel, acc);\n }\n "; + return userCode; + }; + return Conv2DNaiveProgram; + }()); + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var Im2ColProgram = /** @class */ (function () { + function Im2ColProgram(outputShape, isChannelsLast) { + this.variableNames = ['A']; + this.uniforms = "pad : vec2, stride : vec2, dilation : vec2, outWidth : i32, itemsPerBlockRow : i32,\n inChannels : i32,"; + this.workPerThread = 4; + this.workGroupSize = [64, 1, 1]; + this.size = true; + this.outputShape = outputShape; + this.dispatchLayout = flatDispatchLayout(this.outputShape); + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize, [this.workPerThread, 1, 1]); + this.isChannelsLast = isChannelsLast; + this.shaderKey = "im2col_" + this.isChannelsLast; + } + Im2ColProgram.prototype.getUserCode = function () { + var rowDim = this.isChannelsLast ? 0 : 1; + var colDim = this.isChannelsLast ? 1 : 2; + var userCode = "\n " + getMainHeaderAndGlobalIndexString() + "\n\n for(var i = 0; i<" + this.workPerThread + "; i = i + 1) {\n let flatIndex = index * " + this.workPerThread + " + i;\n\n let rc = getCoordsFromIndex(flatIndex);\n\n if(flatIndex < uniforms.size) {\n let blockIndex = rc[0];\n let pos = rc[1];\n\n let offsetY = blockIndex / uniforms.outWidth * uniforms.stride[1] - uniforms.pad[1];\n let d0 = offsetY + uniforms.dilation[1] * pos / uniforms.itemsPerBlockRow;\n var value = 0.0;\n if(d0 < uniforms.aShape[" + rowDim + "] && d0 >= 0) {\n let offsetX = (blockIndex % uniforms.outWidth) * uniforms.stride[0] -\n uniforms.pad[0];\n let d1 = offsetX + uniforms.dilation[0] * ((pos %\n uniforms.itemsPerBlockRow) / uniforms.inChannels);\n let ch = pos % uniforms.inChannels;\n if(d1 < uniforms.aShape[" + colDim + "] && d1 >= 0) {\n value = getA(d0, d1, ch);\n }\n }\n setOutputAtIndex(flatIndex, value);\n }\n }\n }\n "; + return userCode; + }; + return Im2ColProgram; + }()); + + // For 1x1 kernels that iterate through every point in the input, convolution + // can be expressed as matrix multiplication (without need for memory + // remapping). + function conv2dByMatMul(_a) { + var x = _a.x, filter = _a.filter, convInfo = _a.convInfo, backend = _a.backend, _b = _a.bias, bias = _b === void 0 ? null : _b, _c = _a.preluActivationWeights, preluActivationWeights = _c === void 0 ? null : _c, _d = _a.leakyreluAlpha, leakyreluAlpha = _d === void 0 ? 0 : _d, _e = _a.activation, activation = _e === void 0 ? null : _e; + var isChannelsLast = convInfo.dataFormat === 'channelsLast'; + var transposeA = isChannelsLast ? false : true; + var transposeB = false; + var sameSize = isChannelsLast && + convInfo.filterHeight === convInfo.inHeight && + convInfo.filterWidth === convInfo.inWidth && + convInfo.padInfo.type === 'VALID'; + var xReshaped; + var filterReshaped; + if (sameSize) { + var sharedDim = convInfo.inHeight * convInfo.inWidth * convInfo.inChannels; + xReshaped = reshape({ + inputs: { x: x }, + backend: backend, + attrs: { shape: [1, convInfo.batchSize, sharedDim] } + }); + filterReshaped = reshape({ + inputs: { x: filter }, + backend: backend, + attrs: { shape: [1, sharedDim, convInfo.outChannels] } + }); + } + else { + xReshaped = reshape({ + inputs: { x: x }, + backend: backend, + attrs: { + shape: isChannelsLast ? + [ + convInfo.batchSize, convInfo.inHeight * convInfo.inWidth, + convInfo.inChannels + ] : + [ + convInfo.batchSize, convInfo.inChannels, + convInfo.inHeight * convInfo.inWidth + ] + } + }); + filterReshaped = reshape({ + inputs: { x: filter }, + backend: backend, + attrs: { shape: [1, convInfo.inChannels, convInfo.outChannels] } + }); + } + var result = batchMatMulImpl({ + a: isChannelsLast ? xReshaped : filterReshaped, + b: isChannelsLast ? filterReshaped : xReshaped, + transposeA: transposeA, + transposeB: transposeB, + backend: backend, + bias: bias, + activation: activation, + preluActivationWeights: preluActivationWeights, + leakyreluAlpha: leakyreluAlpha + }); + var out = reshape({ inputs: { x: result }, backend: backend, attrs: { shape: convInfo.outShape } }); + backend.disposeData(xReshaped.dataId); + backend.disposeData(filterReshaped.dataId); + backend.disposeData(result.dataId); + return out; + } + // Implements the im2row algorithm as outlined in "High Performance + // Convolutional Neural Networks for Document Processing" (Suvisoft, 2006) + function conv2dWithIm2Col(_a) { + var e_1, _b; + var x = _a.x, filter = _a.filter, convInfo = _a.convInfo, backend = _a.backend, _c = _a.bias, bias = _c === void 0 ? null : _c, _d = _a.preluActivationWeights, preluActivationWeights = _d === void 0 ? null : _d, _e = _a.leakyreluAlpha, leakyreluAlpha = _e === void 0 ? 0 : _e, _f = _a.activation, activation = _f === void 0 ? null : _f; + // Rearranges conv2d input so each block to be convolved over forms the + // column of a new matrix with shape [filterWidth * filterHeight * + // inChannels, outHeight * outWidth]. The filter is also rearranged so each + // output channel forms a row of a new matrix with shape [outChannels, + // filterWidth * filterHeight * inChannels]. The convolution is then + // computed by multiplying these matrices and reshaping the result. + var filterWidth = convInfo.filterWidth, filterHeight = convInfo.filterHeight, inChannels = convInfo.inChannels, strideWidth = convInfo.strideWidth, strideHeight = convInfo.strideHeight, padInfo = convInfo.padInfo, outWidth = convInfo.outWidth, outHeight = convInfo.outHeight, dilationWidth = convInfo.dilationWidth, dilationHeight = convInfo.dilationHeight, dataFormat = convInfo.dataFormat; + var isChannelsLast = dataFormat === 'channelsLast'; + var sharedDim = filterWidth * filterHeight * inChannels; + var numCols = outHeight * outWidth; + var x2ColShape = [numCols, sharedDim]; + var transposeA = false; + var transposeB = false; + var intermediates = []; + var xSqueezed = reshape({ inputs: { x: x }, backend: backend, attrs: { shape: x.shape.slice(1) } }); + var w2Row = reshape({ inputs: { x: filter }, backend: backend, attrs: { shape: [1, sharedDim, -1] } }); + intermediates.push(xSqueezed); + intermediates.push(w2Row); + var im2ColProgram = new Im2ColProgram(x2ColShape, isChannelsLast); + var dimensions = [ + { type: 'int32', data: [padInfo.left, padInfo.top] }, + { type: 'int32', data: [strideWidth, strideHeight] }, + { type: 'int32', data: [dilationWidth, dilationHeight] }, + { type: 'int32', data: [outWidth] }, + { type: 'int32', data: [inChannels * filterWidth] }, + { type: 'int32', data: [inChannels] } + ]; + var im2Col = backend.runWebGPUProgram(im2ColProgram, [xSqueezed], xSqueezed.dtype, dimensions); + var im2Col3D = reshape({ + inputs: { x: im2Col }, + backend: backend, + attrs: { shape: [1, x2ColShape[0], x2ColShape[1]] } + }); + intermediates.push(im2Col); + intermediates.push(im2Col3D); + var a3dShape = [1, x2ColShape[0], x2ColShape[1]]; + var matMulProgram = new MatMulPackedProgram(a3dShape, [1, numCols, convInfo.outChannels], tf.env().get('WEBGPU_MATMUL_WORK_PER_THREAD'), true, true, transposeA, transposeB, bias, activation, preluActivationWeights); + var dimAOuter = a3dShape[1]; + var dimInner = a3dShape[2]; + var dimBOuter = convInfo.outChannels; + var matmulDimensions = [ + { type: 'int32', data: [dimAOuter] }, { type: 'int32', data: [dimBOuter] }, + { type: 'int32', data: [dimInner] } + ]; + var inputs = [im2Col3D, w2Row]; + if (bias) { + inputs.push(bias); + } + if (preluActivationWeights) { + inputs.push(preluActivationWeights); + } + if (activation === 'leakyrelu') { + dimensions.push({ type: 'float32', data: [leakyreluAlpha] }); + matMulProgram.uniforms += ' alpha : f32,'; + } + var result = backend.runWebGPUProgram(matMulProgram, inputs, im2Col3D.dtype, matmulDimensions); + var outShape = isChannelsLast ? + [1, outHeight, outWidth, convInfo.outChannels] : + [1, convInfo.outChannels, outHeight, outWidth]; + var out = reshape({ inputs: { x: result }, backend: backend, attrs: { shape: outShape } }); + intermediates.push(result); + try { + for (var intermediates_1 = __values(intermediates), intermediates_1_1 = intermediates_1.next(); !intermediates_1_1.done; intermediates_1_1 = intermediates_1.next()) { + var i = intermediates_1_1.value; + backend.disposeData(i.dataId); + } + } + catch (e_1_1) { e_1 = { error: e_1_1 }; } + finally { + try { + if (intermediates_1_1 && !intermediates_1_1.done && (_b = intermediates_1.return)) _b.call(intermediates_1); + } + finally { if (e_1) throw e_1.error; } + } + return out; + } + function conv2DImpl(_a) { + var x = _a.x, filter = _a.filter, convInfo = _a.convInfo, backend = _a.backend, _b = _a.bias, bias = _b === void 0 ? null : _b, _c = _a.preluActivationWeights, preluActivationWeights = _c === void 0 ? null : _c, _d = _a.leakyreluAlpha, leakyreluAlpha = _d === void 0 ? 0 : _d, _e = _a.activation, activation = _e === void 0 ? null : _e; + var hasBias = bias != null; + var hasPreluActivationWeights = preluActivationWeights != null; + var isChannelsLast = convInfo.dataFormat === 'channelsLast'; + var program; + var sameSize = isChannelsLast && + convInfo.filterHeight === convInfo.inHeight && + convInfo.filterWidth === convInfo.inWidth && + convInfo.padInfo.type === 'VALID'; + if (sameSize || + (convInfo.filterHeight === 1 && convInfo.filterWidth === 1 && + convInfo.dilationHeight === 1 && convInfo.dilationWidth === 1 && + convInfo.strideHeight === 1 && convInfo.strideWidth === 1 && + (convInfo.padInfo.type === 'SAME' || + convInfo.padInfo.type === 'VALID'))) { + return conv2dByMatMul({ + x: x, + filter: filter, + convInfo: convInfo, + backend: backend, + bias: bias, + activation: activation, + preluActivationWeights: preluActivationWeights, + leakyreluAlpha: leakyreluAlpha + }); + } + if (tf.env().getBool('WEBGPU_CONV_SEPARATE_IM2COL_SHADER') && x.shape[0] === 1) { + tf.util.assert(isChannelsLast, function () { return 'TODO: NCHW is unimplemented'; }); + return conv2dWithIm2Col({ + x: x, + filter: filter, + convInfo: convInfo, + backend: backend, + bias: bias, + preluActivationWeights: preluActivationWeights, + leakyreluAlpha: leakyreluAlpha, + activation: activation + }); + } + var useNaive = tf.env().getBool('WEBGPU_USE_NAIVE_CONV2D'); + var useVec4 = (convInfo.inChannels % 4 === 0 || + (convInfo.inChannels === 3 && convInfo.padInfo.type === 'VALID')) && + convInfo.outChannels % 4 === 0 && isChannelsLast; + var padInfo = [convInfo.padInfo.top, convInfo.padInfo.left]; + var dimensions = [ + { type: 'int32', data: [convInfo.filterHeight, convInfo.filterWidth] }, + { type: 'int32', data: __spread(padInfo) }, + { type: 'int32', data: [convInfo.strideHeight, convInfo.strideWidth] }, + { type: 'int32', data: [convInfo.dilationHeight, convInfo.dilationWidth] } + ]; + if (useNaive) { + tf.util.assert(isChannelsLast, function () { return 'TODO: NCHW is unimplemented'; }); + // TODO(kainino0x): This may be obsolete, but is kept for reference. + program = new Conv2DNaiveProgram(convInfo, hasBias, activation, hasPreluActivationWeights); + } + else { + if (useVec4) { + program = new Conv2DMMVec4Program(convInfo, hasBias, activation, hasPreluActivationWeights); + } + else { + program = new Conv2DMMProgram(convInfo, hasBias, activation, hasPreluActivationWeights); + } + var dimAOuter = convInfo.outHeight * convInfo.outWidth; + var dimBOuter = convInfo.outChannels; + var dimInner = convInfo.filterHeight * convInfo.filterWidth * convInfo.inChannels; + dimensions.push({ type: 'int32', data: [dimAOuter] }, { type: 'int32', data: [dimBOuter] }, { type: 'int32', data: [dimInner] }); + } + var inputVar = [x, filter]; + if (hasBias) { + inputVar.push(bias); + } + if (hasPreluActivationWeights) { + inputVar.push(preluActivationWeights); + } + if (activation === 'leakyrelu') { + dimensions.push({ type: 'float32', data: [leakyreluAlpha] }); + program.uniforms += ' alpha : f32,'; + } + return backend.runWebGPUProgram(program, inputVar, x.dtype, dimensions); + } + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function conv2d(args) { + var inputs = args.inputs, attrs = args.attrs, backend = args.backend; + var x = inputs.x, filter = inputs.filter; + var strides = attrs.strides, pad = attrs.pad, dataFormat = attrs.dataFormat, dilations = attrs.dilations, dimRoundingMode = attrs.dimRoundingMode; + var $dataFormat = tf.backend_util.convertConv2DDataFormat(dataFormat); + var convInfo = tf.backend_util.computeConv2DInfo(x.shape, filter.shape, strides, dilations, pad, dimRoundingMode, false /* depthwise */, $dataFormat); + return conv2DImpl({ x: x, filter: filter, convInfo: convInfo, backend: backend }); + } + var conv2DConfig = { + kernelName: tf.Conv2D, + backendName: 'webgpu', + kernelFunc: conv2d + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var Conv2DDerInputMMProgram = /** @class */ (function () { + function Conv2DDerInputMMProgram(convInfo) { + this.variableNames = ['x', 'W']; + this.uniforms = 'filterDims : vec2, pads : vec2, stride : vec2, outBackprop : vec4, dimAOuter : i32, dimBOuter : i32, dimInner : i32,'; + this.outputShape = convInfo.inShape; + tf.util.assert(convInfo.dataFormat === 'channelsLast', function () { return 'TODO: NCHW is unimplemented'; }); + this.dispatchLayout = { x: [3], y: [1, 2], z: [0] }; + this.workGroupSize = + computeWorkGroupSizeForConv2d(this.dispatchLayout, this.outputShape); + this.elementsPerThread = + computeWorkPerThreadForConv2d(this.dispatchLayout, this.outputShape); + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize, this.elementsPerThread); + this.shaderKey = "conv2DDerInputMM_" + this.elementsPerThread; + } + Conv2DDerInputMMProgram.prototype.getUserCode = function () { + var matMulSource = makeMatMulPackedSource(this.elementsPerThread, this.workGroupSize); + var readASnippet = "\n let outRow = row / uniforms.outShape[2];\n let outCol = row % uniforms.outShape[2];\n\n let WRow = col / (uniforms.filterDims[1] * uniforms.outBackprop[3]);\n let WCol = col / uniforms.outBackprop[3] % uniforms.filterDims[1];\n let xR = f32(outRow - uniforms.pads[0] + WRow) / f32(uniforms.stride[0]);\n let xC = f32(outCol - uniforms.pads[1] + WCol) / f32(uniforms.stride[1]);\n if (xR < 0.0 || xR >= f32(uniforms.outBackprop[1]) || fract(xR) > 0.0) {\n return 0.0;\n }\n if (xC < 0.0 || xC >= f32(uniforms.outBackprop[2]) || fract(xC) > 0.0) {\n return 0.0;\n }\n let coord = vec4(\n batch,\n i32(xR),\n i32(xC),\n col % uniforms.outBackprop[3]);\n return x[getIndexFromCoords4D(coord, uniforms.xShape)];"; + var sampleA = "if (row < uniforms.dimAOuter && col < uniforms.dimInner) {\n " + readASnippet + "\n }\n return 0.0;"; + var userCode = "\n fn mm_readA(row : i32, col : i32, globalId : vec3) -> f32 {\n var batch = i32(globalId.z);\n " + sampleA + "\n }\n\n fn mm_readB(row : i32, col : i32, globalId : vec3) -> f32 {\n let coordX = uniforms.filterDims.x - 1 -\n row / (uniforms.filterDims[1] * uniforms.outBackprop[3]);\n let coordY = uniforms.filterDims.y - 1 -\n (row / uniforms.outBackprop[3]) % uniforms.filterDims[1];\n if (row < uniforms.dimInner && col < uniforms.dimBOuter &&\n coordX >= 0 && coordY >= 0) {\n let coord = vec4(coordX, coordY, col,\n row % uniforms.outBackprop[3]);\n return W[getIndexFromCoords4D(coord, uniforms.wShape)];\n }\n return 0.0;\n }\n\n fn mm_write(row : i32, col : i32, valueInput : f32, globalId : vec3) {\n var batch = i32(globalId.z);\n var value = valueInput;\n let outCoord = vec4(\n batch,\n row / uniforms.outShape[2],\n row % uniforms.outShape[2],\n col);\n result[getIndexFromCoords4D(outCoord, uniforms.outShape)] = value;\n }\n\n " + matMulSource + "\n "; + return userCode; + }; + return Conv2DDerInputMMProgram; + }()); + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var Conv2DDerInputProgram = /** @class */ (function () { + function Conv2DDerInputProgram(convInfo) { + this.variableNames = ['dy', 'W']; + this.uniforms = 'filterDims : vec2, pads : vec2, stride : vec2, outBackprop : vec4,'; + this.workGroupSize = [64, 1, 1]; + this.size = true; + this.outputShape = convInfo.inShape; + this.dispatchLayout = flatDispatchLayout(this.outputShape); + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize); + this.isChannelsLast = convInfo.dataFormat === 'channelsLast'; + this.shaderKey = "conv2DDerInput_" + this.isChannelsLast; + } + Conv2DDerInputProgram.prototype.getUserCode = function () { + var rowDim = this.isChannelsLast ? 1 : 2; + var colDim = this.isChannelsLast ? 2 : 3; + var channelDim = this.isChannelsLast ? 3 : 1; + return "\n " + getMainHeaderAndGlobalIndexString() + " {\n if(index < uniforms.size) {\n let coords = getCoordsFromIndex(index);\n let batch = coords[0];\n let d1 = coords[" + channelDim + "];\n\n let dyCorner = vec2(coords[" + rowDim + "]), coords[" + colDim + "]) - uniforms.pads;\n let dyRCorner = dyCorner.x;\n let dyCCorner = dyCorner.y;\n\n // Convolve dy(?, ?, d2) with w(:, :, d1, d2) to compute dx(xR, xC, d1).\n // ? = to be determined. : = across all values in that axis.\n var dotProd = 0.0;\n for (var wR = 0; wR < uniforms.filterDims.x; wR = wR + 1) {\n let dyR = (f32(dyRCorner) + f32(wR)) / f32(uniforms.stride.x);\n let wRPerm = uniforms.filterDims.x - 1 - wR;\n if (dyR < 0.0 || dyR >= f32(uniforms.outBackprop[1]) || fract(dyR) > 0.0 ||\n wRPerm < 0) {\n continue;\n }\n let idyR = dyR;\n\n for (var wC = 0; wC < uniforms.filterDims.y; wC = wC + 1) {\n let dyC = (f32(dyCCorner) + f32(wC)) / f32(uniforms.stride.y);\n let wCPerm = uniforms.filterDims.y - 1 - wC;\n if (dyC < 0.0 || dyC >= f32(uniforms.outBackprop[2]) ||\n fract(dyC) > 0.0 || wCPerm < 0) {\n continue;\n }\n let idyC = dyC;\n\n for (var d2 = 0; d2 < uniforms.outBackprop[3]; d2 = d2 + 1) {\n if (" + this.isChannelsLast + ") {\n let xValue = getDy(batch, idyR, idyC, d2);\n let wValue = getW(wRPerm, wCPerm, d1, d2);\n dotProd = dotProd + xValue * wValue;\n } else {\n let xValue = getDy(batch, d2, idyR, idyC);\n let wValue = getW(wRPerm, wCPerm, d1, d2);\n dotProd = dotProd + xValue * wValue;\n }\n\n }\n }\n }\n setOutputAtIndex(index, dotProd);\n }\n }\n "; + }; + return Conv2DDerInputProgram; + }()); + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function conv2DBackpropInput(args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var dy = inputs.dy, filter = inputs.filter; + var inputShape = attrs.inputShape, strides = attrs.strides, pad = attrs.pad, dataFormat = attrs.dataFormat, dimRoundingMode = attrs.dimRoundingMode; + var $dataFormat = tf.backend_util.convertConv2DDataFormat(dataFormat); + var convInfo = tf.backend_util.computeConv2DInfo(inputShape, filter.shape, strides, 1 /* dilations */, pad, dimRoundingMode, false, $dataFormat); + var dimensions = [ + { type: 'int32', data: [convInfo.filterHeight, convInfo.filterWidth] }, + { + type: 'int32', + data: [ + convInfo.filterHeight - 1 - convInfo.padInfo.top, + convInfo.filterWidth - 1 - convInfo.padInfo.left + ] + }, + { type: 'int32', data: [convInfo.strideHeight, convInfo.strideWidth] }, + { + type: 'int32', + data: [ + convInfo.batchSize, convInfo.outHeight, convInfo.outWidth, + convInfo.outChannels + ] + }, + ]; + var program; + if (tf.env().getBool('WEBGPU_USE_NAIVE_CONV2D_TRANSPOSE')) { + // Keep Conv2DDerInputProgram for reference. + program = new Conv2DDerInputProgram(convInfo); + } + else { + program = new Conv2DDerInputMMProgram(convInfo); + var dimAOuter = convInfo.inShape[1] * convInfo.inShape[2]; + var dimBOuter = convInfo.inShape[3]; + var dimInner = convInfo.filterHeight * convInfo.filterWidth * convInfo.outChannels; + dimensions.push({ type: 'uint32', data: [dimAOuter] }, { type: 'uint32', data: [dimBOuter] }, { type: 'uint32', data: [dimInner] }); + } + return backend.runWebGPUProgram(program, [dy, filter], 'float32', dimensions); + } + var conv2DBackpropInputConfig = { + kernelName: tf.Conv2DBackpropInput, + backendName: 'webgpu', + kernelFunc: conv2DBackpropInput, + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var cos = unaryKernelFunc({ opType: UnaryOpType.COS }); + var cosConfig = { + kernelName: tf.Cos, + backendName: 'webgpu', + kernelFunc: cos + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var cosh = unaryKernelFunc({ opType: UnaryOpType.COSH }); + var coshConfig = { + kernelName: tf.Cosh, + backendName: 'webgpu', + kernelFunc: cosh + }; + + var CropAndResizeProgram = /** @class */ (function () { + function CropAndResizeProgram(channnel, boxShape, cropSize, method) { + this.variableNames = ['Image', 'Boxes', 'BoxInd']; + this.uniforms = 'extrapolationValue : f32,'; + this.workGroupSize = [64, 1, 1]; + this.size = true; + var _a = __read(boxShape, 1), numBoxes = _a[0]; + this.outputShape = [numBoxes, cropSize[0], cropSize[1], channnel]; + this.dispatchLayout = flatDispatchLayout(this.outputShape); + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize); + this.methodId = method === 'bilinear' ? 1 : 0; + this.cropHeightBiggerThan1 = this.outputShape[1] > 1; + this.cropWidthBiggerThan1 = this.outputShape[2] > 1; + this.shaderKey = "cropAndResize_" + this.methodId + "_" + this.cropHeightBiggerThan1 + "_" + this.cropWidthBiggerThan1; + } + CropAndResizeProgram.prototype.getUserCode = function () { + var _a = __read(["f32(uniforms.imageShape[1] - 1)", "f32(uniforms.imageShape[2] - 1)"], 2), inputHeightFloat = _a[0], inputWidthFloat = _a[1]; + var _b = __read(this.cropHeightBiggerThan1 ? + [ + "(" + inputHeightFloat + " / f32(uniforms.outShape[1] - 1))", + '(y2-y1) * height_ratio', + "y1*" + inputHeightFloat + " + f32(y)*(height_scale)", + ] : + [ + '0.0', + '0.0', + "0.5 * (y1+y2) * " + inputHeightFloat, + ], 3), heightRatio = _b[0], heightScale = _b[1], inY = _b[2]; + var _c = __read(this.cropWidthBiggerThan1 ? + [ + "(" + inputWidthFloat + " / f32(uniforms.outShape[2] - 1))", + '(x2-x1) * width_ratio', + "x1*" + inputWidthFloat + " + f32(x)*(width_scale)", + ] : + [ + '0.0', + '0.0', + "0.5 * (x1+x2) * " + inputWidthFloat, + ], 3), widthRatio = _c[0], widthScale = _c[1], inX = _c[2]; + // Reference implementation + // tslint:disable-next-line:max-line-length + // https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/crop_and_resize_op_gpu.cu.cc + var userCode = "\n " + getMainHeaderAndGlobalIndexString() + "\n if (index < uniforms.size) {\n let coords = getCoordsFromIndex(index);\n let height_ratio = f32(" + heightRatio + ");\n let width_ratio = f32(" + widthRatio + ");\n let b = coords[0];\n let y = coords[1];\n let x = coords[2];\n let d = coords[3];\n // get box vals\n let y1 = getBoxes(b, 0);\n let x1 = getBoxes(b, 1);\n let y2 = getBoxes(b, 2);\n let x2 = getBoxes(b, 3);\n // get image in batch index\n let bInd = i32(round(getBoxInd(b)));\n if(bInd < 0 || bInd >= uniforms.outShape[0]) {\n return;\n }\n let height_scale = " + heightScale + ";\n let width_scale = " + widthScale + ";\n let in_y = " + inY + ";\n if( in_y < 0.0 || in_y > " + inputHeightFloat + " ) {\n setOutputAtIndex(index, uniforms.extrapolationValue);\n return;\n }\n let in_x = " + inX + ";\n if( in_x < 0.0 || in_x > " + inputWidthFloat + " ) {\n setOutputAtIndex(index, uniforms.extrapolationValue);\n return;\n }\n let sourceFracIndexCR = vec2(in_x,in_y);\n if(" + this.methodId + " == 1) {\n // Compute the four integer indices.\n let sourceFloorCR = vec2(sourceFracIndexCR);\n let sourceCeilCR = vec2(ceil(sourceFracIndexCR));\n let topLeft = getImage(bInd, sourceFloorCR.y, sourceFloorCR.x, d);\n let bottomLeft = getImage(bInd, sourceCeilCR.y, sourceFloorCR.x, d);\n let topRight = getImage(bInd, sourceFloorCR.y, sourceCeilCR.x, d);\n let bottomRight = getImage(bInd, sourceCeilCR.y, sourceCeilCR.x, d);\n let fracCR = sourceFracIndexCR - vec2(sourceFloorCR);\n let top = topLeft + (topRight - topLeft) * fracCR.x;\n let bottom = bottomLeft + (bottomRight - bottomLeft) * fracCR.x;\n let newValue = top + (bottom - top) * fracCR.y;\n setOutputAtIndex(index, newValue);\n } else {\n // Compute the coordinators of nearest neighbor point.\n let sourceNearestCR = vec2(floor(\n sourceFracIndexCR + vec2(0.5,0.5)));\n let newValue = getImage(\n bInd, sourceNearestCR.y, sourceNearestCR.x, d);\n setOutputAtIndex(index, newValue);\n }\n }\n }\n "; + return userCode; + }; + return CropAndResizeProgram; + }()); + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var cropAndResize = function (args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var image = inputs.image, boxes = inputs.boxes, boxInd = inputs.boxInd; + var cropSize = attrs.cropSize, method = attrs.method, extrapolationValue = attrs.extrapolationValue; + var program = new CropAndResizeProgram(image.shape[3], boxes.shape, cropSize, method); + var uniformData = [{ type: 'float32', data: [extrapolationValue] }]; + return backend.runWebGPUProgram(program, [image, boxes, boxInd], 'float32', uniformData); + }; + var cropAndResizeConfig = { + kernelName: tf.CropAndResize, + backendName: 'webgpu', + kernelFunc: cropAndResize + }; + + /** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var CumOpType; + (function (CumOpType) { + CumOpType["Prod"] = "*"; + CumOpType["Sum"] = "+"; + })(CumOpType || (CumOpType = {})); + var CumProgram = /** @class */ (function () { + function CumProgram(op, shape, exclusive, reverse) { + this.variableNames = ['x']; + // pow(i32, i32) is not supported, use pow(f32, f32) instead. + this.uniforms = 'index : f32,'; + this.size = true; + var workGroupSizeX = 128; + this.workGroupSize = [workGroupSizeX, 1, 1]; + this.outputShape = shape; + this.dispatchLayout = flatDispatchLayout(this.outputShape); + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize); + this.exclusive = exclusive; + this.reverse = reverse; + this.op = op; + this.shaderKey = "cum_" + this.op + "_" + this.exclusive + "_" + this.reverse; + } + CumProgram.prototype.getUserCode = function () { + var rank = this.outputShape.length; + var initVal = this.op === CumOpType.Prod ? '1.0' : '0.0'; + var val = this.exclusive ? initVal : + "getX(" + getCoords(rank, 'coords', this.op) + ")"; + var length = this.outputShape[this.outputShape.length - 1]; + var condition = ''; + var idxString = ''; + // When exclusive is set, the cum op becomes roll op that copies the + // value from the previous index based on the direction specified by the + // reverse flag. + if (this.exclusive) { + condition = this.reverse ? "end != " + (length - 1) : 'end != 0'; + idxString = this.reverse ? 'end + 1' : 'end - 1'; + } + else { + condition = this.reverse ? "end + pow2 < " + length : 'end >= pow2'; + idxString = (this.reverse ? 'end + pow2' : 'end - pow2'); + } + return "\n " + getMainHeaderAndGlobalIndexString() + "\n if (index < uniforms.size) {\n var coords = getCoordsFromIndex(index);\n\n let end = " + getFinalCoord(rank, 'coords', this.op) + ";\n var val = " + val + ";\n let pow2 = i32(pow(2.0, uniforms.index));\n if (" + condition + ") {\n let idx = " + idxString + ";\n " + getFinalCoord(rank, 'coords', this.op) + " = idx;\n val " + this.op + "= getX(" + getCoords(rank, 'coords', this.op) + ");\n }\n setOutputAtIndex(index, val);\n }\n }\n "; + }; + return CumProgram; + }()); + function getCoords(rank, name, op) { + if (rank === 1) { + return "" + name; + } + else if (rank === 2) { + return name + ".x, " + name + ".y"; + } + else if (rank === 3) { + return name + ".x, " + name + ".y, " + name + ".z"; + } + else if (rank === 4) { + return name + ".x, " + name + ".y, " + name + ".z, " + name + ".w"; + } + else { + throw Error("Cumulative " + op + " for rank " + rank + " is not yet supported"); + } + } + function getFinalCoord(rank, name, op) { + if (rank === 1) { + return "" + name; + } + else if (rank === 2) { + return name + ".y"; + } + else if (rank === 3) { + return name + ".z"; + } + else if (rank === 4) { + return name + ".w"; + } + else { + throw Error("Cumulative " + op + " for rank " + rank + " is not yet supported"); + } + } + + /** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function cumImpl(op, x, backend, axis, exclusive, reverse) { + var xRank = x.shape.length; + var permutation = tf.backend_util.getAxesPermutation([axis], xRank); + var permutedX = x; + if (permutation != null) { + permutedX = transpose({ inputs: { x: x }, backend: backend, attrs: { perm: permutation } }); + } + var permutedAxis = tf.backend_util.getInnerMostAxes(1, xRank)[0]; + if (permutedAxis !== xRank - 1) { + throw new Error("WebGPU cumprod shader expects an inner-most axis=" + (x.shape.length - 1) + " " + + ("but got axis=" + axis)); + } + var size = permutedX.shape[permutedAxis]; + var result = identity({ inputs: { x: permutedX }, backend: backend }); + // Use cum parallel algorithm, inspired by: + // https://developer.nvidia.com/gpugems/gpugems3/part-vi-gpu-computing/chapter-39-parallel-prefix-sum-scan-cuda + // Note: although the algorithm is called sum, it works for any associtative + // operator with an identity. + for (var i = 0; i <= Math.ceil(Math.log2(size)) - 1; i++) { + var program = new CumProgram(op, permutedX.shape, false, reverse); + var prevResult = result; + var uniformData = [{ type: 'float32', data: [i] }]; + result = + backend.runWebGPUProgram(program, [result], result.dtype, uniformData); + backend.disposeData(prevResult.dataId); + } + // For exclusive cum, shift the end result in the direction of product or sum + // and add 1 for product or 0 for sum to the front index. + if (exclusive) { + var program = new CumProgram(op, permutedX.shape, exclusive, reverse); + var prevResult = result; + var uniformData = [{ type: 'float32', data: [0] }]; + result = + backend.runWebGPUProgram(program, [result], result.dtype, uniformData); + backend.disposeData(prevResult.dataId); + } + if (permutation != null) { + var reversePermutation = tf.backend_util.getUndoAxesPermutation(permutation); + var reverseTransposedResult = transpose({ inputs: { x: result }, backend: backend, attrs: { perm: reversePermutation } }); + backend.disposeData(result.dataId); + backend.disposeData(permutedX.dataId); + return reverseTransposedResult; + } + return result; + } + + /** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function cumprod(args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var x = inputs.x; + var axis = attrs.axis, exclusive = attrs.exclusive, reverse = attrs.reverse; + return cumImpl(CumOpType.Prod, x, backend, axis, exclusive, reverse); + } + var cumprodConfig = { + kernelName: tf.Cumprod, + backendName: 'webgpu', + kernelFunc: cumprod + }; + + /** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function cumsum(args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var x = inputs.x; + var axis = attrs.axis, exclusive = attrs.exclusive, reverse = attrs.reverse; + return cumImpl(CumOpType.Sum, x, backend, axis, exclusive, reverse); + } + var cumsumConfig = { + kernelName: tf.Cumsum, + backendName: 'webgpu', + kernelFunc: cumsum + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var DepthToSpaceProgram = /** @class */ (function () { + function DepthToSpaceProgram(outputShape, dataFormat) { + this.variableNames = ['x']; + this.workGroupSize = [64, 1, 1]; + this.size = true; + this.uniforms = 'blockSize : i32,'; + this.outputShape = outputShape; + this.dispatchLayout = flatDispatchLayout(this.outputShape); + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize); + this.shaderKey = "depthToSpace_" + dataFormat; + this.dataFormat = dataFormat; + } + DepthToSpaceProgram.prototype.getUserCode = function () { + var userCode = "\n " + getMainHeaderAndGlobalIndexString() + "\n if (index < uniforms.size) {\n let coords = getCoordsFromIndex(index);\n let b = coords[0];\n let h = " + this.getHeightCoordString() + ";\n let w = " + this.getWidthCoordString() + ";\n let d = " + this.getDepthCoordString() + ";\n\n let in_h = h / uniforms.blockSize;\n let offset_h = h % uniforms.blockSize;\n let in_w = w / uniforms.blockSize;\n let offset_w = w % uniforms.blockSize;\n let offset_d = (offset_h * uniforms.blockSize + offset_w) *\n " + this.getOutputDepthSize() + ";\n let in_d = d + offset_d;\n\n let rlt = " + this.getInputSamplingString() + ";\n setOutputAtIndex(index, rlt);\n }\n }"; + return userCode; + }; + DepthToSpaceProgram.prototype.getHeightCoordString = function () { + if (this.dataFormat === 'NHWC') { + return "coords[1]"; + } + else { + return "coords[2]"; + } + }; + DepthToSpaceProgram.prototype.getWidthCoordString = function () { + if (this.dataFormat === 'NHWC') { + return "coords[2]"; + } + else { + return "coords[3]"; + } + }; + DepthToSpaceProgram.prototype.getDepthCoordString = function () { + if (this.dataFormat === 'NHWC') { + return "coords[3]"; + } + else { + return "coords[1]"; + } + }; + DepthToSpaceProgram.prototype.getOutputDepthSize = function () { + if (this.dataFormat === 'NHWC') { + return "uniforms.outShape[3]"; + } + else { + return "uniforms.outShape[1]"; + } + }; + DepthToSpaceProgram.prototype.getInputSamplingString = function () { + if (this.dataFormat === 'NHWC') { + return "getX(b, in_h, in_w, in_d)"; + } + else { + return "getX(b, in_d, in_h, in_w)"; + } + }; + return DepthToSpaceProgram; + }()); + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function depthToSpace(args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var x = inputs.x; + var blockSize = attrs.blockSize, dataFormat = attrs.dataFormat; + var batchSize = x.shape[0]; + var inputHeight = (dataFormat === 'NHWC') ? x.shape[1] : x.shape[2]; + var inputWidth = (dataFormat === 'NHWC') ? x.shape[2] : x.shape[3]; + var inputDepth = (dataFormat === 'NHWC') ? x.shape[3] : x.shape[1]; + var outputHeight = inputHeight * blockSize; + var outputWidth = inputWidth * blockSize; + var outputDepth = inputDepth / (blockSize * blockSize); + var outputShape = (dataFormat === 'NHWC') ? + [batchSize, outputHeight, outputWidth, outputDepth] : + [batchSize, outputDepth, outputHeight, outputWidth]; + var uniformData = [ + { type: 'int32', data: [blockSize] }, + ]; + var program = new DepthToSpaceProgram(outputShape, dataFormat); + return backend.runWebGPUProgram(program, [x], x.dtype, uniformData); + } + var depthToSpaceConfig = { + kernelName: tf.DepthToSpace, + backendName: 'webgpu', + kernelFunc: depthToSpace + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var DepthwiseConv2D3x3Program = /** @class */ (function () { + function DepthwiseConv2D3x3Program(convInfo, addBias, activation, hasPreluActivation) { + if (addBias === void 0) { addBias = false; } + if (activation === void 0) { activation = null; } + if (hasPreluActivation === void 0) { hasPreluActivation = false; } + this.variableNames = ['x', 'W']; + this.uniforms = 'pad : vec2, stride : vec2, dilation : vec2, inDims : vec2,'; + this.workGroupSize = [4, 4, 4]; + this.isVec4 = true; + this.outputShape = convInfo.outShape; + this.dispatchLayout = { x: [0, 1], y: [2], z: [3] }; + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize, [1, 4, 4]); + tf.util.assert(convInfo.dataFormat === 'channelsLast', function () { return 'TODO: NCHW is unimplemented'; }); + if (addBias) { + this.variableNames.push('bias'); + } + if (hasPreluActivation) { + this.variableNames.push('preluActivationWeights'); + } + this.convInfo = convInfo; + this.addBias = addBias; + this.activation = activation; + this.hasPreluActivation = hasPreluActivation; + this.shaderKey = "depthwise3x3_" + activation; + } + DepthwiseConv2D3x3Program.prototype.getUserCode = function () { + var activationSnippet = '', applyActivationSnippet = ''; + if (this.activation) { + var activationOp = mapActivationToShaderProgram(this.activation, this.isVec4); + if (this.hasPreluActivation) { + activationSnippet = + "fn activation(a : vec4, outCoord : vec4) -> vec4 {\n let b = getPreluActivationWeightsByOutputCoords(outCoord);\n " + activationOp + "\n }"; + } + else { + activationSnippet = "\n fn activation(a : vec4, outCoord : vec4) -> vec4 {\n " + activationOp + "\n }\n "; + } + applyActivationSnippet = "dotProd[i] = activation(dotProd[i], coords);"; + } + var addBiasSnippet = this.addBias ? + 'dotProd[i] = dotProd[i] + getBiasByOutputCoords(coords);' : + ''; + var userCode = "\n " + activationSnippet + "\n\n " + getWorkGroupSizeString() + "\n fn main(@builtin(global_invocation_id) globalId: vec3) {\n let batch = 0;\n let r = i32(globalId.x);\n let c = i32(globalId.y) * 4;\n let d2 = i32(globalId.z) * 4;\n let xRCCorner = vec2(r, c) * uniforms.stride - uniforms.pad;\n let d1 = d2;\n let q = 0;\n\n let xRCorner = xRCCorner.x;\n let xCCorner = xRCCorner.y;\n\n var wVals : array, 9>;\n wVals[0] = getW(0, 0, d1, q);\n wVals[1] = getW(0, 1, d1, q);\n wVals[2] = getW(0, 2, d1, q);\n wVals[3] = getW(1, 0, d1, q);\n wVals[4] = getW(1, 1, d1, q);\n wVals[5] = getW(1, 2, d1, q);\n wVals[6] = getW(2, 0, d1, q);\n wVals[7] = getW(2, 1, d1, q);\n wVals[8] = getW(2, 2, d1, q);\n\n var xVals : array, 6>, 3>;\n for (var wR = 0; wR < 3; wR = wR + 1) {\n let xR = xRCorner + wR * uniforms.dilation[0];\n for (var wC = 0; wC < 6; wC = wC + 1) {\n let xC = xCCorner + wC * uniforms.dilation[1];\n if (xR < 0 || xR >= uniforms.inDims[0] || xC < 0 || xC >= uniforms.inDims[1]) {\n xVals[wR][wC] = vec4(0.0);\n } else {\n xVals[wR][wC] = getX(batch, xR, xC, d1);\n }\n }\n }\n\n var dotProd : array, 4>;\n dotProd[0] = vec4(0.0);\n dotProd[1] = vec4(0.0);\n dotProd[2] = vec4(0.0);\n dotProd[3] = vec4(0.0);\n\n for (var wR = 0; wR < 3; wR = wR + 1) {\n for (var wC = 0; wC < 3; wC = wC + 1) {\n let indexW = wR * 3 + wC;\n dotProd[0] = dotProd[0] + xVals[wR][0 + wC] * wVals[indexW];\n dotProd[1] = dotProd[1] + xVals[wR][1 + wC] * wVals[indexW];\n dotProd[2] = dotProd[2] + xVals[wR][2 + wC] * wVals[indexW];\n dotProd[3] = dotProd[3] + xVals[wR][3 + wC] * wVals[indexW];\n }\n }\n\n for (var i = 0; i < 4; i = i + 1) {\n let coords = vec4(batch, r, c + i, d2);\n if (coordsInBounds4D(coords, uniforms.outShape)) {\n " + addBiasSnippet + "\n " + applyActivationSnippet + "\n setOutputAtCoords(coords[0], coords[1], coords[2], coords[3], dotProd[i]);\n }\n }\n }\n "; + return userCode; + }; + return DepthwiseConv2D3x3Program; + }()); + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var DepthwiseConv2DProgram = /** @class */ (function () { + function DepthwiseConv2DProgram(convInfo, addBias, activation, hasPreluActivation) { + if (addBias === void 0) { addBias = false; } + if (activation === void 0) { activation = null; } + if (hasPreluActivation === void 0) { hasPreluActivation = false; } + this.variableNames = ['x', 'W']; + this.uniforms = "pad : vec2, stride : vec2, dilation : vec2,\n inDims : vec2, filterHeight : i32, filterWidth : i32,\n channelMul : i32,"; + // This is an experimental value. + this.workGroupSize = [256, 1, 1]; + this.outputShape = convInfo.outShape; + this.dispatchLayout = flatDispatchLayout(this.outputShape); + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize); + tf.util.assert(convInfo.dataFormat === 'channelsLast', function () { return 'TODO: NCHW is unimplemented'; }); + if (addBias) { + this.variableNames.push('bias'); + } + if (hasPreluActivation) { + this.variableNames.push('preluActivationWeights'); + } + this.convInfo = convInfo; + this.addBias = addBias; + this.activation = activation; + this.hasPreluActivation = hasPreluActivation; + this.shaderKey = "depthwise_" + this.activation; + } + DepthwiseConv2DProgram.prototype.getUserCode = function () { + var activationSnippet = '', applyActivationSnippet = ''; + if (this.activation) { + var activationOp = mapActivationToShaderProgram(this.activation, false); + if (this.hasPreluActivation) { + activationSnippet = + "fn activation(a : f32, outCoord : vec4) -> f32 {\n let b = getPreluActivationWeightsByOutputCoords(outCoord);\n " + activationOp + "\n }"; + } + else { + activationSnippet = "\n fn activation(a : f32, outCoord : vec4) -> f32 {\n " + activationOp + "\n }\n "; + } + applyActivationSnippet = "dotProd = activation(dotProd, coords);"; + } + var addBiasSnippet = this.addBias ? + 'dotProd = dotProd + getBiasByOutputCoords(coords);' : + ''; + var userCode = "\n " + activationSnippet + "\n\n fn writeResult(batch : i32, row : i32, col : i32, chan : i32,\n value : f32) {\n let coord = vec4(batch, row, col, chan);\n if (coordsInBounds4D(coord, uniforms.outShape)) {\n setOutputAtCoords(batch, row, col, chan, value);\n }\n }\n\n " + getMainHeaderString() + "\n let coords = getOutputCoords();\n let batch = coords[0];\n let xRCCorner = vec2(coords.yz) * uniforms.stride - uniforms.pad;\n let d2 = coords[3];\n let d1 = d2 / uniforms.channelMul;\n let q = d2 - d1 * uniforms.channelMul;\n\n let inputRowStart = xRCCorner.x;\n let inputColStart = xRCCorner.y;\n let inputRowEnd = inputRowStart + uniforms.filterHeight *\n uniforms.dilation[0];\n let inputColEnd = inputColStart + uniforms.filterWidth *\n uniforms.dilation[1];\n\n // Convolve x(?, ?, d1) with w(:, :, d1, q) to get y(yR, yC, d2).\n // ? = to be determined. : = across all values in that axis.\n var dotProd = 0.0;\n\n // Extract if checking out of for loop for performance.\n if (inputRowStart >= 0 && inputColStart >= 0 &&\n inputRowEnd < uniforms.inDims[0] &&\n inputColEnd < uniforms.inDims[1]) {\n // Here using a constant value |this.convInfo.filterHeight| instead\n // of uniform value is in order to loop unrolling.\n for (var wR = 0; wR < uniforms.filterHeight; wR = wR + 1) {\n let xR = inputRowStart + wR * uniforms.dilation[0];\n\n for (var wC = 0; wC < uniforms.filterWidth; wC = wC + 1) {\n let xC = inputColStart + wC * uniforms.dilation[1];\n\n let xVal = getX(batch, xR, xC, d1);\n let wVal = getW(wR, wC, d1, q);\n dotProd = dotProd + xVal * wVal;\n }\n }\n } else {\n for (var wR = 0; wR < uniforms.filterHeight; wR = wR + 1) {\n let xR = inputRowStart + wR * uniforms.dilation[0];\n\n if (xR < 0 || xR >= uniforms.inDims[0]) {\n continue;\n }\n\n for (var wC = 0; wC < uniforms.filterWidth; wC = wC + 1) {\n let xC = inputColStart + wC * uniforms.dilation[1];\n\n if (xC < 0 || xC >= uniforms.inDims[1]) {\n continue;\n }\n\n let xVal = getX(batch, xR, xC, d1);\n let wVal = getW(wR, wC, d1, q);\n dotProd = dotProd + xVal * wVal;\n }\n }\n }\n\n " + addBiasSnippet + "\n " + applyActivationSnippet + "\n writeResult(batch, coords[1], coords[2], d2, dotProd);\n }\n "; + return userCode; + }; + return DepthwiseConv2DProgram; + }()); + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function depthwiseConv2dNative(args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var x = inputs.x, filter = inputs.filter; + var strides = attrs.strides, pad = attrs.pad, dilations = attrs.dilations, dimRoundingMode = attrs.dimRoundingMode; + var $dilations = dilations; + if ($dilations == null) { + $dilations = [1, 1]; + } + var convInfo = tf.backend_util.computeConv2DInfo(x.shape, filter.shape, strides, $dilations, pad, dimRoundingMode, true /* depthwise */); + var dimensions = [ + { type: 'int32', data: [convInfo.padInfo.top, convInfo.padInfo.left] }, + { type: 'int32', data: [convInfo.strideHeight, convInfo.strideWidth] }, + { type: 'int32', data: [convInfo.dilationHeight, convInfo.dilationWidth] }, + { type: 'int32', data: [convInfo.inHeight, convInfo.inWidth] } + ]; + var program; + // TODO: To see if we need to relax the limitation. Currently, it's only for + // filter size 3x3. + if (convInfo.batchSize === 1 && convInfo.inHeight === convInfo.outHeight && + convInfo.inWidth === convInfo.outWidth && convInfo.strideHeight === 1 && + convInfo.strideWidth === 1 && + convInfo.filterHeight === convInfo.filterWidth && + convInfo.inChannels === convInfo.outChannels && + convInfo.dilationHeight === 1 && convInfo.dilationWidth === 1 && + convInfo.filterHeight === 3 && convInfo.inChannels % 4 === 0) { + program = new DepthwiseConv2D3x3Program(convInfo); + } + else { + program = new DepthwiseConv2DProgram(convInfo); + dimensions.push({ type: 'int32', data: [convInfo.filterHeight] }, { type: 'int32', data: [convInfo.filterWidth] }, { type: 'int32', data: [convInfo.outChannels / convInfo.inChannels] }); + } + return backend.runWebGPUProgram(program, [x, filter], x.dtype, dimensions); + } + var depthwiseConv2dNativeConfig = { + kernelName: tf.DepthwiseConv2dNative, + backendName: 'webgpu', + kernelFunc: depthwiseConv2dNative, + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var multiplyKernelFunc = binaryKernelFunc({ + opSnippet: BinaryOpType.MUL, + cpuKernelImpl: multiplyImplCPU, + supportsComplex: true + }); + var multiplyConfig = { + kernelName: tf.Multiply, + backendName: 'webgpu', + kernelFunc: multiplyKernelFunc + }; + + var ReduceProgram = /** @class */ (function () { + function ReduceProgram(reduceInfo, reduceType) { + this.workGroupSize = [64, 1, 1]; + this.variableNames = ['x']; + this.uniforms = 'reduceSize : i32,'; + this.size = true; + this.inputShape = [reduceInfo.batchSize, reduceInfo.inSize]; + var _a = __read(tf.backend_util.computeOutAndReduceShapes(this.inputShape, [1]), 1), outputShape = _a[0]; + this.outputShape = outputShape.length === 0 ? [1] : outputShape; + this.dispatchLayout = flatDispatchLayout(this.outputShape); + // A work group only outputs a data, so we transfer [1, 1, 1] to compute + // dispatch size. + this.dispatch = + computeDispatch(this.dispatchLayout, this.outputShape, [1, 1, 1]); + this.reduceType = reduceType; + this.shaderKey = "reduce_" + reduceType; + } + ReduceProgram.prototype.getUserCode = function () { + var reduceOp = ""; + var initValue = '0.0'; + if (this.reduceType === 'min' || this.reduceType === 'max') { + reduceOp = "\n if (isnan(candidate)) {\n bestValue = uniforms.NAN;\n } else if (!isnan(bestValue) && candidate " + (this.reduceType === 'min' ? '<' : '>') + " bestValue)\n { bestValue = candidate; }"; + initValue = 'f32(x[offset])'; + } + else if (this.reduceType === 'sum' || this.reduceType === 'mean') { + reduceOp = ' bestValue = bestValue + candidate; '; + } + else if (this.reduceType === 'prod') { + reduceOp = ' bestValue = bestValue * candidate; '; + initValue = '1.0'; + } + var outputSnippet = this.reduceType === 'mean' ? + // tslint:disable-next-line:max-line-length + "setOutputAtIndex(outputIndex, bestValue / f32(uniforms.reduceSize));" : + "setOutputAtIndex(outputIndex, bestValue);"; + var sharedMemorySnippet = "\n var xBestValues : array;\n "; + var userCode = "\n fn DIV_CEIL(a : u32, b : u32) -> u32 {\n return ((a - 1u) / b + 1u);\n }\n\n " + sharedMemorySnippet + "\n fn getOffset(outputIndex : i32) -> i32 {\n let outputCoords = getCoordsFromIndex(outputIndex);\n let offset = " + (this.outputShape.length === 1 ? + 'outputCoords' : + 'outputCoords[0]') + " * uniforms.reduceSize;\n return offset;\n }\n " + getMainHeaderAndGlobalIndexString() + "\n let outputIndex = index / i32(workGroupSizeX);\n let offset = getOffset(outputIndex);\n var bestValue = " + initValue + ";\n let Length = uniforms.reduceSize;\n let WorkPerThread = DIV_CEIL(u32(Length), workGroupSizeX);\n for (var k = i32(localId.x); k < Length && outputIndex < uniforms.size;\n k = k + i32(workGroupSizeX)) {\n let candidate = f32(x[offset + k]);\n " + reduceOp + "\n }\n xBestValues[localId.x] = bestValue;\n workgroupBarrier();\n\n var reduceSize = min(u32(Length), workGroupSizeX);\n for (var currentSize = reduceSize / 2u; reduceSize > 1u;\n currentSize = reduceSize / 2u) {\n let interval = DIV_CEIL(reduceSize, 2u);\n if (localId.x < currentSize) {\n let candidate = xBestValues[localId.x + interval];\n " + reduceOp + "\n xBestValues[localId.x] = bestValue;\n }\n reduceSize = interval;\n workgroupBarrier();\n }\n\n if (localId.x == 0u && outputIndex < uniforms.size) {\n " + outputSnippet + "\n }\n }\n "; + return userCode; + }; + return ReduceProgram; + }()); + + function reduce(x, axis, keepDims, reduceType, backend) { + var xRank = x.shape.length; + var toDispose = []; + var origAxes = tf.util.parseAxisParam(axis, x.shape); + var axes = origAxes; + var permutedAxes = tf.backend_util.getAxesPermutation(axes, xRank); + var input = x; + if (permutedAxes != null) { + input = transpose({ inputs: { x: x }, attrs: { perm: permutedAxes }, backend: backend }); + axes = tf.backend_util.getInnerMostAxes(axes.length, xRank); + toDispose.push(input); + } + tf.backend_util.assertAxesAreInnerMostDims(reduceType, axes, xRank); + var _a = __read(tf.backend_util.computeOutAndReduceShapes(input.shape, axes), 2), reduceOutShape = _a[0], reduceShape = _a[1]; + var resOutShape = reduceOutShape; + if (keepDims) { + // rather than reshape at the end, set the target shape here. + resOutShape = tf.backend_util.expandShapeToKeepDim(reduceOutShape, origAxes); + } + var res; + if ((reduceType === 'max' || reduceType === 'prod') && + backend.shouldExecuteOnCPU([input])) { + var xVals = backend.tensorMap.get(input.dataId).values; + switch (reduceType) { + case 'max': + var outValues = maxImplCPU(xVals, tf.util.sizeFromShape(reduceShape), resOutShape, x.dtype); + res = backend.makeTensorInfo(resOutShape, x.dtype, outValues); + break; + case 'prod': + var _b = prodImplCPU(input.shape, input.dtype, xVals, axes), outVals = _b.outVals, outShape = _b.outShape, outDtype = _b.outDtype; + res = backend.makeTensorInfo(outShape, outDtype, outVals); + break; + default: + throw new Error(reduceType + " CPU implementation is not yet supported."); + } + } + else { + var inSize = tf.util.sizeFromShape(reduceShape); + var xSize = tf.util.sizeFromShape(input.shape); + var batchSize = xSize / inSize; + var reduceInfo = { windowSize: inSize, inSize: inSize, batchSize: batchSize, outSize: 1 }; + var dtype = reduceType === 'mean' ? 'float32' : tf.sumOutType(x.dtype); + var uniformData = [ + { type: 'int32', data: [inSize] }, + ]; + var program = new ReduceProgram(reduceInfo, reduceType); + var reduced = backend.runWebGPUProgram(program, [input], dtype, uniformData); + toDispose.push(reduced); + res = reshape({ inputs: { x: reduced }, attrs: { shape: resOutShape }, backend: backend }); + } + toDispose.forEach(function (t) { return backend.disposeData(t.dataId); }); + return res; + } + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function sum(args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var x = inputs.x; + var axis = attrs.axis, keepDims = attrs.keepDims; + return reduce(x, axis, keepDims, 'sum', backend); + } + var sumConfig = { + kernelName: tf.Sum, + backendName: 'webgpu', + kernelFunc: sum + }; + + function einsum(args) { + var e_1, _a, e_2, _b; + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var equation = attrs.equation; + var tensors = inputs; + var _c = tf.backend_util.decodeEinsumEquation(equation, tensors.length), allDims = _c.allDims, summedDims = _c.summedDims, idDims = _c.idDims; + tf.backend_util.checkEinsumDimSizes(allDims.length, idDims, tensors); + var _d = tf.backend_util.getEinsumComputePath(summedDims, idDims), path = _d.path, steps = _d.steps; + var nSteps = steps.length; + var out = null; + var numDimsRemaining = allDims.length; + var tensorsToDispose = []; + for (var i = 0; i < nSteps; ++i) { + try { + for (var _e = (e_1 = void 0, __values(steps[i])), _f = _e.next(); !_f.done; _f = _e.next()) { + var idTerm = _f.value; + var _g = tf.backend_util.getEinsumPermutation(numDimsRemaining, idDims[idTerm]), perm = _g.permutationIndices, dimsToExpand = _g.expandDims; + var x = void 0; + if (tf.backend_util.isIdentityPermutation(perm)) { + x = tensors[idTerm]; + } + else { + x = transpose({ inputs: { x: tensors[idTerm] }, backend: backend, attrs: { perm: perm } }); + tensorsToDispose.push(x); + } + var targetShape = x.shape.slice(); + for (var k = 0; k < dimsToExpand.length; ++k) { + targetShape.splice(dimsToExpand[k], 0, 1); + } + if (!tf.util.arraysEqual(x.shape, targetShape)) { + x = reshape({ inputs: { x: x }, backend: backend, attrs: { shape: targetShape } }); + tensorsToDispose.push(x); + } + if (out === null) { + out = x; + } + else { + // tslint:disable-next-line: no-unnecessary-type-assertion + out = + multiplyKernelFunc({ inputs: { a: x, b: out }, backend: backend }); + tensorsToDispose.push(out); + } + } + } + catch (e_1_1) { e_1 = { error: e_1_1 }; } + finally { + try { + if (_f && !_f.done && (_a = _e.return)) _a.call(_e); + } + finally { if (e_1) throw e_1.error; } + } + if (i < nSteps - 1) { + if (path[i] >= 0) { + out = sum({ + inputs: { x: out }, + backend: backend, + attrs: { + axis: path[i] - (allDims.length - numDimsRemaining), + keepDims: false + } + }); + tensorsToDispose.push(out); + } + numDimsRemaining--; + } + } + try { + // Clean up intermediate tensors. + for (var tensorsToDispose_1 = __values(tensorsToDispose), tensorsToDispose_1_1 = tensorsToDispose_1.next(); !tensorsToDispose_1_1.done; tensorsToDispose_1_1 = tensorsToDispose_1.next()) { + var tensorInfo = tensorsToDispose_1_1.value; + if (tensorInfo === out) { + continue; + } + backend.disposeData(tensorInfo.dataId); + } + } + catch (e_2_1) { e_2 = { error: e_2_1 }; } + finally { + try { + if (tensorsToDispose_1_1 && !tensorsToDispose_1_1.done && (_b = tensorsToDispose_1.return)) _b.call(tensorsToDispose_1); + } + finally { if (e_2) throw e_2.error; } + } + return out; + } + var einsumConfig = { + kernelName: tf.Einsum, + backendName: 'webgpu', + kernelFunc: einsum + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var elu = unaryKernelFunc({ opType: UnaryOpType.ELU }); + var eluConfig = { + kernelName: tf.Elu, + backendName: 'webgpu', + kernelFunc: elu + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var equal = binaryKernelFunc({ opSnippet: BinaryOpType.EQUAL, dtype: 'bool', cpuKernelImpl: equalImplCPU }); + var equalConfig = { + kernelName: tf.Equal, + backendName: 'webgpu', + kernelFunc: equal + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var exp = unaryKernelFunc({ + opType: UnaryOpType.EXP, + cpuKernelImpl: expImplCPU, + dtype: 'float32', + }); + var expConfig = { + kernelName: tf.Exp, + backendName: 'webgpu', + kernelFunc: exp + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function expandDims(args) { + var inputs = args.inputs, attrs = args.attrs, backend = args.backend; + var dim = attrs.dim; + var input = inputs.input; + var inputRank = input.shape.length; + var newShape = input.shape.slice(); + var $dim = dim; + if (dim < 0) { + // Negative value is counted from the tail of rank. + tf.util.assert(-(inputRank + 1) <= dim, function () { return "Axis must be in the interval [" + -(inputRank + 1) + ", " + inputRank + "]"; }); + $dim = inputRank + dim + 1; + } + newShape.splice($dim, 0, 1); + return reshape({ inputs: { x: input }, backend: backend, attrs: { shape: newShape } }); + } + var expandDimsConfig = { + kernelName: tf.ExpandDims, + backendName: 'webgpu', + kernelFunc: expandDims, + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var expm1 = unaryKernelFunc({ opType: UnaryOpType.EXPM1, cpuKernelImpl: expm1ImplCPU }); + var expm1Config = { + kernelName: tf.Expm1, + backendName: 'webgpu', + kernelFunc: expm1 + }; + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var FillProgram = /** @class */ (function () { + function FillProgram(shape) { + this.variableNames = []; + this.outputShape = []; + this.uniforms = 'value : f32,'; + this.workGroupSize = [64, 1, 1]; + this.size = true; + this.outputShape = shape; + this.dispatchLayout = flatDispatchLayout(this.outputShape); + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize); + this.shaderKey = 'fill'; + } + FillProgram.prototype.getUserCode = function () { + var userCode = "\n " + getMainHeaderAndGlobalIndexString() + "\n if (index < uniforms.size) {\n setOutputAtIndex(index, uniforms.value);\n }\n }\n "; + return userCode; + }; + return FillProgram; + }()); + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function fill(args) { + var backend = args.backend, attrs = args.attrs; + var shape = attrs.shape, value = attrs.value; + var dtype = attrs.dtype; + dtype = dtype || tf.util.inferDtype(value); + if (dtype === 'string') { + // String type should be handled in CPU memory. + var values = tf.util.getArrayFromDType(dtype, tf.util.sizeFromShape(shape)); + values.fill(value); + return backend.makeTensorInfo(shape, dtype, values); + } + else { + var program = new FillProgram(shape); + var uniformData = [{ type: 'float32', data: [value] }]; + return backend.runWebGPUProgram(program, [], dtype, uniformData); + } + } + var fillConfig = { + kernelName: tf.Fill, + backendName: 'webgpu', + kernelFunc: fill + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var FlipLeftRightProgram = /** @class */ (function () { + function FlipLeftRightProgram(imageShape) { + this.outputShape = []; + this.variableNames = ['x']; + this.workGroupSize = [64, 1, 1]; + this.size = true; + this.outputShape = imageShape; + this.dispatchLayout = flatDispatchLayout(this.outputShape); + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize); + this.shaderKey = 'flipLeftRight'; + } + FlipLeftRightProgram.prototype.getUserCode = function () { + var userCode = "\n " + getMainHeaderAndGlobalIndexString() + "\n if (index < uniforms.size) {\n let coords = getCoordsFromIndex(index);\n let coordX = uniforms.xShape[2] - coords[2] - 1;\n let outputValue = getX(coords[0], coords[1], coordX, coords[3]);\n setOutputAtIndex(index, outputValue);\n }\n }\n "; + return userCode; + }; + return FlipLeftRightProgram; + }()); + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var flipLeftRightConfig = { + kernelName: tf.FlipLeftRight, + backendName: 'webgpu', + kernelFunc: function (_a) { + var inputs = _a.inputs, backend = _a.backend; + var image = inputs.image; + var webgpuBackend = backend; + var program = new FlipLeftRightProgram(image.shape); + var output = webgpuBackend.runWebGPUProgram(program, [image], image.dtype); + return output; + } + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var floor = unaryKernelFunc({ opType: UnaryOpType.FLOOR, cpuKernelImpl: floorImplCPU }); + var floorConfig = { + kernelName: tf.Floor, + backendName: 'webgpu', + kernelFunc: floor + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var floorDiv = binaryKernelFunc({ opSnippet: BinaryOpType.INT_DIV, dtype: 'int32' }); + var floorDivConfig = { + kernelName: tf.FloorDiv, + backendName: 'webgpu', + kernelFunc: floorDiv + }; + + /** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var FromPixelsProgram = /** @class */ (function () { + function FromPixelsProgram(outputShape, useImport) { + if (useImport === void 0) { useImport = false; } + this.outputShape = [0]; + this.variableNames = []; + this.workGroupSize = [256, 1, 1]; // The empirical value. + this.outputShape = outputShape; + this.dispatchLayout = flatDispatchLayout(this.outputShape); + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize); + this.useImport = useImport; + this.shaderKey = "fromPixels_" + this.useImport; + } + FromPixelsProgram.prototype.getUserCode = function () { + var textureLoad = this.useImport ? + 'textureLoad(src, vec2(coords.yx));' : + 'textureLoad(src, vec2(coords.yx), 0)'; + var textureType = this.useImport ? 'texture_external' : 'texture_2d'; + return "\n @binding(1) @group(0) var src: " + textureType + ";\n\n " + getMainHeaderAndGlobalIndexString() + "\n let flatIndexBase = index * uniforms.numChannels;\n for (var i = 0; i < uniforms.numChannels; i = i + 1) {\n let flatIndex = flatIndexBase + i;\n if (flatIndex < uniforms.size) {\n let coords = getCoordsFromIndex(flatIndexBase);\n let values = " + textureLoad + ";\n result[flatIndex] = i32(floor(255.0 * values[i]));\n }\n }\n }\n "; + }; + return FromPixelsProgram; + }()); + + var fromPixelsConfig = { + kernelName: tf.FromPixels, + backendName: 'webgpu', + kernelFunc: fromPixels, + }; + var fromPixels2DContext; + function fromPixels(args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var pixels = inputs.pixels; + var numChannels = attrs.numChannels; + if (pixels == null) { + throw new Error('pixels passed to tf.browser.fromPixels() can not be null'); + } + var isVideo = typeof (HTMLVideoElement) !== 'undefined' && + pixels instanceof HTMLVideoElement; + var isImage = typeof (HTMLImageElement) !== 'undefined' && + pixels instanceof HTMLImageElement; + var isCanvas = (typeof (HTMLCanvasElement) !== 'undefined' && + pixels instanceof HTMLCanvasElement) || + (typeof (OffscreenCanvas) !== 'undefined' && + pixels instanceof OffscreenCanvas); + var isImageBitmap = typeof (ImageBitmap) !== 'undefined' && pixels instanceof ImageBitmap; + var _a = __read(isVideo ? + [ + pixels.videoWidth, + pixels.videoHeight + ] : + [pixels.width, pixels.height], 2), width = _a[0], height = _a[1]; + var outShape = [height, width, numChannels]; + if (tf.env().getBool('WEBGPU_USE_IMPORT')) { + if (isVideo) { + return fromPixelsExternalImage({ + externalImage: pixels, + backend: backend, + attrs: attrs, + outShape: outShape, + useImport: true + }); + } + } + if (isVideo || isImage) { + if (fromPixels2DContext == null) { + fromPixels2DContext = document.createElement('canvas').getContext('2d'); + } + fromPixels2DContext.canvas.width = width; + fromPixels2DContext.canvas.height = height; + fromPixels2DContext.drawImage(pixels, 0, 0, width, height); + pixels = fromPixels2DContext.canvas; + } + if (isImageBitmap || isCanvas || isVideo || isImage) { + return fromPixelsExternalImage({ + externalImage: pixels, + backend: backend, + attrs: attrs, + outShape: outShape, + useImport: false + }); + } + // TODO: Encoding should happen on GPU once we no longer have to download + // image data to the CPU. + var imageData = pixels.data; + var pixelArray = imageData; + if (numChannels != null && numChannels !== 4) { + pixelArray = new Uint8Array(pixels.width * pixels.height * numChannels); + var dataLength = imageData.length; + var j = 0; + for (var i = 0; i < dataLength; i++) { + if (i % 4 < numChannels) { + pixelArray[j++] = imageData[i]; + } + } + } + var output = backend.makeTensorInfo(outShape, 'int32'); + var info = backend.tensorMap.get(output.dataId); + info.values = new Int32Array(pixelArray); + backend.maybeReleaseBuffer(output.dataId); + backend.uploadToGPU(output.dataId); + return output; + } + function fromPixelsExternalImage(args) { + var externalImage = args.externalImage, backend = args.backend, attrs = args.attrs, outShape = args.outShape, useImport = args.useImport; + var numChannels = attrs.numChannels; + var size = tf.util.sizeFromShape(outShape); + var strides = tf.util.computeStrides(outShape); + var program = new FromPixelsProgram(outShape, useImport); + var uniformData = [ + { type: 'uint32', data: [size] }, { type: 'uint32', data: [numChannels] }, + { type: 'uint32', data: __spread(strides) }, + { type: 'uint32', data: __spread(program.dispatch) } + ]; + var output = backend.runFromPixelsProgram(program, outShape, uniformData, useImport, externalImage); + return output; + } + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var BatchNormProgram = /** @class */ (function () { + function BatchNormProgram(xShape, meanShape, varianceShape, offsetShape, scaleShape) { + this.uniforms = 'varianceEpsilon : f32,'; + // This is an experimental value. + this.workGroupSize = [128, 1, 1]; + this.size = true; + this.variableNames = ['x', 'mean', 'variance']; + tf.backend_util.assertAndGetBroadcastShape(xShape, meanShape); + tf.backend_util.assertAndGetBroadcastShape(xShape, varianceShape); + this.outputShape = xShape; + this.dispatchLayout = flatDispatchLayout(this.outputShape); + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize); + if (offsetShape != null) { + tf.backend_util.assertAndGetBroadcastShape(xShape, offsetShape); + this.variableNames.push('offset'); + } + if (scaleShape != null) { + tf.backend_util.assertAndGetBroadcastShape(xShape, scaleShape); + this.variableNames.push('scale'); + } + this.offsetShape = offsetShape; + this.scaleShape = scaleShape; + this.shaderKey = 'batchNorm'; + } + BatchNormProgram.prototype.getUserCode = function () { + var offsetSnippet = '0.0'; + if (this.offsetShape != null) { + offsetSnippet = 'getOffsetByOutputIndex(index)'; + } + var scaleSnippet = '1.0'; + if (this.scaleShape != null) { + scaleSnippet = 'getScaleByOutputIndex(index)'; + } + var userCode = "\n " + getMainHeaderAndGlobalIndexString() + "\n if (index < uniforms.size)\n {\n let xValue = getXByOutputIndex(index);\n let meanValue = getMeanByOutputIndex(index);\n let varianValue = getVarianceByOutputIndex(index);\n let offsetValue = " + offsetSnippet + ";\n let scaleValue = " + scaleSnippet + ";\n let inv = scaleValue * inverseSqrt(varianValue + f32(uniforms.varianceEpsilon));\n setOutputAtIndex(index,dot(vec3(xValue, -meanValue, offsetValue), vec3(inv, inv, 1.0)));\n }\n }\n "; + return userCode; + }; + return BatchNormProgram; + }()); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var fusedBatchNormConfig = { + kernelName: tf.FusedBatchNorm, + backendName: 'webgpu', + kernelFunc: function (_a) { + var inputs = _a.inputs, attrs = _a.attrs, backend = _a.backend; + var x = inputs.x, scale = inputs.scale, offset = inputs.offset, mean = inputs.mean, variance = inputs.variance; + var varianceEpsilon = attrs.varianceEpsilon; + var webGPUBackend = backend; + var batchNormInputs = [x, mean, variance]; + var offsetShape = null; + if (offset != null) { + offsetShape = offset.shape; + batchNormInputs.push(offset); + } + var scaleShape = null; + if (scale != null) { + scaleShape = scale.shape; + batchNormInputs.push(scale); + } + var program = new BatchNormProgram(x.shape, mean.shape, variance.shape, offsetShape, scaleShape); + var uniformData = [{ type: 'float32', data: [varianceEpsilon] }]; + return webGPUBackend.runWebGPUProgram(program, batchNormInputs, x.dtype, uniformData); + } + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function fusedConv2d(args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var x = inputs.x, filter = inputs.filter, bias = inputs.bias, preluActivationWeights = inputs.preluActivationWeights; + var strides = attrs.strides, pad = attrs.pad, dataFormat = attrs.dataFormat, dilations = attrs.dilations, dimRoundingMode = attrs.dimRoundingMode, activation = attrs.activation, leakyreluAlpha = attrs.leakyreluAlpha; + var $dataFormat = tf.backend_util.convertConv2DDataFormat(dataFormat); + var convInfo = tf.backend_util.computeConv2DInfo(x.shape, filter.shape, strides, dilations, pad, dimRoundingMode, false /* depthwise */, $dataFormat); + return conv2DImpl({ x: x, filter: filter, convInfo: convInfo, backend: backend, bias: bias, preluActivationWeights: preluActivationWeights, + leakyreluAlpha: leakyreluAlpha, activation: activation }); + } + var fusedConv2DConfig = { + kernelName: tf.FusedConv2D, + backendName: 'webgpu', + kernelFunc: fusedConv2d, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function fusedDepthwiseConv2D(args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var x = inputs.x, filter = inputs.filter, bias = inputs.bias, preluActivationWeights = inputs.preluActivationWeights; + var strides = attrs.strides, pad = attrs.pad, dilations = attrs.dilations, dimRoundingMode = attrs.dimRoundingMode, activation = attrs.activation, leakyreluAlpha = attrs.leakyreluAlpha; + var $dilations = dilations; + if ($dilations == null) { + $dilations = [1, 1]; + } + tf.util.assert(tf.backend_util.eitherStridesOrDilationsAreOne(strides, $dilations), function () { return 'Error in depthwiseConv2d: Either strides or dilations must be ' + + ("1. Got strides " + strides + " and dilations '" + $dilations + "'"); }); + var convInfo = tf.backend_util.computeConv2DInfo(x.shape, filter.shape, strides, $dilations, pad, dimRoundingMode, true /* depthwise */); + var programInputs = [x, filter]; + var hasBias = bias != null; + var hasPreluActivationWeights = preluActivationWeights != null; + if (hasBias) { + programInputs.push(bias); + } + if (hasPreluActivationWeights) { + programInputs.push(preluActivationWeights); + } + var dimensions = [ + { type: 'int32', data: [convInfo.padInfo.top, convInfo.padInfo.left] }, + { type: 'int32', data: [convInfo.strideHeight, convInfo.strideWidth] }, + { type: 'int32', data: [convInfo.dilationHeight, convInfo.dilationWidth] }, + { type: 'int32', data: [convInfo.inHeight, convInfo.inWidth] } + ]; + var program; + // TODO: To see if we need to relax the limitation. Currently, it's only for + // filter size 3x3. + if (convInfo.batchSize === 1 && convInfo.inHeight === convInfo.outHeight && + convInfo.inWidth === convInfo.outWidth && convInfo.strideHeight === 1 && + convInfo.strideWidth === 1 && + convInfo.filterHeight === convInfo.filterWidth && + convInfo.inChannels === convInfo.outChannels && + convInfo.dilationHeight === 1 && convInfo.dilationWidth === 1 && + convInfo.filterHeight === 3 && convInfo.inChannels % 4 === 0) { + program = new DepthwiseConv2D3x3Program(convInfo, hasBias, activation, hasPreluActivationWeights); + } + else { + program = new DepthwiseConv2DProgram(convInfo, hasBias, activation, hasPreluActivationWeights); + dimensions.push({ type: 'int32', data: [convInfo.filterHeight] }, { type: 'int32', data: [convInfo.filterWidth] }, { type: 'int32', data: [convInfo.outChannels / convInfo.inChannels] }); + } + if (activation === 'leakyrelu') { + dimensions.push({ type: 'float32', data: [leakyreluAlpha] }); + program.uniforms += ' alpha : f32,'; + } + var result = backend.runWebGPUProgram(program, programInputs, 'float32', dimensions); + return result; + } + var fusedDepthwiseConv2DConfig = { + kernelName: tf.FusedDepthwiseConv2D, + backendName: 'webgpu', + kernelFunc: fusedDepthwiseConv2D, + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var GatherNDProgram = /** @class */ (function () { + function GatherNDProgram(sliceDim, shape) { + this.variableNames = ['A', 'indices']; + this.workGroupSize = [64, 1, 1]; + this.size = true; + this.outputShape = shape; + this.dispatchLayout = flatDispatchLayout(this.outputShape); + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize); + this.shaderKey = "gathernd_" + sliceDim; + this.sliceDim = sliceDim; + this.uniforms = "sliceDim : i32, strides : " + getCoordsDataType(sliceDim) + ","; + } + GatherNDProgram.prototype.getUserCode = function () { + var strideString; + if (this.sliceDim > 1) { + strideString = 'uniforms.strides[j]'; + } + else { + strideString = 'uniforms.strides'; + } + var userCode = "\n " + getMainHeaderAndGlobalIndexString() + "\n if (index < uniforms.size) {\n let coords = getCoordsFromIndex(index);\n var flattenIndex = 0;\n for (var j = 0; j < uniforms.sliceDim; j = j + 1) {\n let indexTemp = i32(round(getIndices(coords[0], j)));\n let strideNum = " + strideString + ";\n flattenIndex = flattenIndex + indexTemp * strideNum;\n }\n\n setOutputAtIndex(index, getA(flattenIndex, coords[1]));\n }\n }\n "; + return userCode; + }; + return GatherNDProgram; + }()); + + function gatherNd(args) { + var inputs = args.inputs, backend = args.backend; + var params = inputs.params, indices = inputs.indices; + var indicesShape = indices.shape; + var sliceRank = indicesShape[indicesShape.length - 1]; + var paramsSize = tf.util.sizeFromShape(params.shape); + var _a = __read(tf.backend_util.prepareAndValidate(params, indices), 4), resultShape = _a[0], numSlices = _a[1], sliceSize = _a[2], strides = _a[3]; + var flattenIndices = reshape({ inputs: { x: indices }, backend: backend, attrs: { shape: [numSlices, sliceRank] } }); + var flattenX = reshape({ + inputs: { x: params }, + backend: backend, + attrs: { shape: [(tf.util.sizeFromShape(params.shape) / sliceSize), sliceSize] } + }); + if (backend.shouldExecuteOnCPU([params, indices]) || + params.dtype === 'string') { + var indicesData = backend.readSync(indices.dataId); + var paramsBuf = backend.bufferSync(params); + var outValue = gatherNdImplCPU(indicesData, paramsBuf, params.dtype, numSlices, sliceRank, sliceSize, strides, params.shape, paramsSize); + return backend.makeTensorInfo(resultShape, params.dtype, outValue.values); + } + var program = new GatherNDProgram(sliceRank, [numSlices, sliceSize]); + var uniformData = [{ type: 'int32', data: [sliceRank] }, { type: 'int32', data: strides }]; + var res = backend.runWebGPUProgram(program, [flattenX, flattenIndices], flattenX.dtype, uniformData); + var reshaped = reshape({ inputs: { x: res }, backend: backend, attrs: { shape: resultShape } }); + backend.disposeData(flattenIndices.dataId); + backend.disposeData(flattenX.dataId); + backend.disposeData(res.dataId); + return reshaped; + } + var gatherNdConfig = { + kernelName: tf.GatherNd, + backendName: 'webgpu', + kernelFunc: gatherNd + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var GatherProgram = /** @class */ (function () { + function GatherProgram(aShape, outputShape) { + this.variableNames = ['A', 'indices']; + this.workGroupSize = [64, 1, 1]; + this.size = true; + this.outputShape = aShape.slice(); + this.aShape = aShape; + this.outputShape = outputShape; + this.dispatchLayout = flatDispatchLayout(this.outputShape); + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize); + this.shaderKey = "gather"; + } + GatherProgram.prototype.getUserCode = function () { + var sourceCoords = getSourceCoords$1(this.aShape); + var userCode = "\n " + getMainHeaderAndGlobalIndexString() + "\n if (index < uniforms.size) {\n let resRC = getCoordsFromIndex(index);\n let indexZ = i32(getIndices(resRC.x, resRC.z));\n let inBounds = select(0.0, 1.0, indexZ >= 0 && indexZ < uniforms.aShape[2]);\n setOutputAtIndex(index, inBounds * getA(" + sourceCoords + "));\n }\n }\n "; + return userCode; + }; + return GatherProgram; + }()); + // The input and output are always flattened into rank 4 tensors. + function getSourceCoords$1(aShape) { + var currentCoords = ['resRC.x', 'resRC.y', 'resRC.z', 'resRC.w']; + var sourceCoords = []; + for (var i = 0; i < aShape.length; i++) { + if (i === 2) { + sourceCoords.push('indexZ'); + } + else { + sourceCoords.push("" + currentCoords[i]); + } + } + return sourceCoords.join(); + } + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function gatherV2(args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var x = inputs.x, indices = inputs.indices; + var axis = attrs.axis, batchDims = attrs.batchDims; + // Unlike WebGL, WebGPU won't check if index is out of bound by calling + // backend.readSync() function in debug mode. + var parsedAxis = tf.util.parseAxisParam(axis, x.shape)[0]; + var shapeInfo = tf.backend_util.segment_util.collectGatherOpShapeInfo(x, indices, parsedAxis, batchDims); + var indicesSize = tf.util.sizeFromShape(indices.shape); + var toDispose = []; + var flattenX = reshape({ + inputs: { x: x }, + backend: backend, + attrs: { + shape: [ + shapeInfo.batchSize, shapeInfo.outerSize, shapeInfo.dimSize, + shapeInfo.sliceSize + ] + } + }); + var flattenIndex = reshape({ + inputs: { x: indices }, + backend: backend, + attrs: { shape: [shapeInfo.batchSize, indicesSize / shapeInfo.batchSize] } + }); + toDispose.push(flattenX); + toDispose.push(flattenIndex); + var flattenOutputShape = [ + shapeInfo.batchSize, shapeInfo.outerSize, indicesSize / shapeInfo.batchSize, + shapeInfo.sliceSize + ]; + if (backend.shouldExecuteOnCPU([x, indices])) { + var indicesBufferInfo = backend.tensorMap.get(flattenIndex.dataId); + var indicesValues = indicesBufferInfo.values; + var indicesBuf = tf.buffer(flattenIndex.shape, flattenIndex.dtype, indicesValues); + var xBufferInfo = backend.tensorMap.get(flattenX.dataId); + var xValues = xBufferInfo.values; + var xBuf = tf.buffer(flattenX.shape, flattenX.dtype, xValues); + var outBuf = gatherV2ImplCPU(xBuf, indicesBuf, flattenOutputShape); + toDispose.forEach(function (t) { return backend.disposeData(t.dataId); }); + return backend.makeTensorInfo(shapeInfo.outputShape, outBuf.dtype, outBuf.values); + } + var program = new GatherProgram(flattenX.shape, flattenOutputShape); + var res = backend.runWebGPUProgram(program, [flattenX, flattenIndex], flattenX.dtype); + toDispose.push(res); + var reshaped = reshape({ inputs: { x: res }, backend: backend, attrs: { shape: shapeInfo.outputShape } }); + toDispose.forEach(function (t) { return backend.disposeData(t.dataId); }); + return reshaped; + } + var gatherV2Config = { + kernelName: tf.GatherV2, + backendName: 'webgpu', + kernelFunc: gatherV2 + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var greater = binaryKernelFunc({ + opSnippet: BinaryOpType.GREATER, + cpuKernelImpl: greaterImplCPU, + dtype: 'bool', + }); + var greaterConfig = { + kernelName: tf.Greater, + backendName: 'webgpu', + kernelFunc: greater + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var greaterEqual = binaryKernelFunc({ + opSnippet: BinaryOpType.GREATER_EQUAL, + dtype: 'bool', + cpuKernelImpl: greaterEqualImplCPU + }); + var greaterEqualConfig = { + kernelName: tf.GreaterEqual, + backendName: 'webgpu', + kernelFunc: greaterEqual + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function leakyRelu(args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var x = inputs.x; + var alpha = attrs.alpha; + var uniformData = [{ type: 'float32', data: [alpha] }]; + var program = new UnaryOpProgram(x.shape, UnaryOpType.LEAKYRELU); + program.uniforms = 'alpha : f32,'; + return backend.runWebGPUProgram(program, [x], 'float32', uniformData); + } + var leakyReluConfig = { + kernelName: tf.LeakyRelu, + backendName: 'webgpu', + kernelFunc: leakyRelu + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var less = binaryKernelFunc({ opSnippet: BinaryOpType.LESS, dtype: 'bool', cpuKernelImpl: lessImplCPU }); + var lessConfig = { + kernelName: tf.Less, + backendName: 'webgpu', + kernelFunc: less + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var lessEqual = binaryKernelFunc({ + opSnippet: BinaryOpType.LESS_EQUAL, + dtype: 'bool', + cpuKernelImpl: lessEqualImplCPU + }); + var lessEqualConfig = { + kernelName: tf.LessEqual, + backendName: 'webgpu', + kernelFunc: lessEqual + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var log = unaryKernelFunc({ opType: UnaryOpType.LOG, cpuKernelImpl: logImplCPU }); + var logConfig = { + kernelName: tf.Log, + backendName: 'webgpu', + kernelFunc: log + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var logicalAnd = binaryKernelFunc({ + opSnippet: BinaryOpType.LOGICAL_AND, + dtype: 'bool' + }); + var logicalAndConfig = { + kernelName: tf.LogicalAnd, + backendName: 'webgpu', + kernelFunc: logicalAnd + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var logicalNot = unaryKernelFunc({ opType: UnaryOpType.LOGICAL_NOT }); + var logicalNotConfig = { + kernelName: tf.LogicalNot, + backendName: 'webgpu', + kernelFunc: logicalNot + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function max(args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var x = inputs.x; + var reductionIndices = attrs.reductionIndices, keepDims = attrs.keepDims; + return reduce(x, reductionIndices, keepDims, 'max', backend); + } + var maxConfig = { + kernelName: tf.Max, + backendName: 'webgpu', + kernelFunc: max + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var maximum = binaryKernelFunc({ + opSnippet: BinaryOpType.MAX, + cpuKernelImpl: maximumImplCPU, + }); + var maximumConfig = { + kernelName: tf.Maximum, + backendName: 'webgpu', + kernelFunc: maximum + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function maxPool(args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var x = inputs.x; + var filterSize = attrs.filterSize, strides = attrs.strides, pad = attrs.pad, dimRoundingMode = attrs.dimRoundingMode; + var dilations = 1; + var convInfo = tf.backend_util.computePool2DInfo(x.shape, filterSize, strides, dilations, pad, dimRoundingMode); + var program; + var dimensions = []; + if (convInfo.filterHeight === 1 && convInfo.filterWidth === 1) { + if (tf.util.arraysEqual(convInfo.inShape, convInfo.outShape)) { + return identity({ inputs: { x: x }, backend: backend }); + } + program = new PoolWithFilterSizeEqualsOneProgram(convInfo); + dimensions.push({ type: 'int32', data: [convInfo.strideHeight, convInfo.strideWidth] }); + } + else { + program = new Pool2DProgram(convInfo, 'max'); + dimensions.push({ type: 'int32', data: [convInfo.strideHeight, convInfo.strideWidth] }, { type: 'int32', data: [convInfo.padInfo.top, convInfo.padInfo.left] }, { + type: 'int32', + data: [convInfo.dilationHeight, convInfo.dilationWidth] + }, { type: 'int32', data: [convInfo.inHeight, convInfo.inWidth] }, { + type: 'int32', + data: [convInfo.effectiveFilterHeight, convInfo.effectiveFilterWidth] + }); + } + return backend.runWebGPUProgram(program, [x], x.dtype, dimensions); + } + var maxPoolConfig = { + kernelName: tf.MaxPool, + backendName: 'webgpu', + kernelFunc: maxPool + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function mean(args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var x = inputs.x; + var keepDims = attrs.keepDims, axis = attrs.axis; + return reduce(x, axis, keepDims, 'mean', backend); + } + var meanConfig = { + kernelName: tf.Mean, + backendName: 'webgpu', + kernelFunc: mean + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function min(args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var x = inputs.x; + var axis = attrs.axis, keepDims = attrs.keepDims; + return reduce(x, axis, keepDims, 'min', backend); + } + var minConfig = { + kernelName: tf.Min, + backendName: 'webgpu', + kernelFunc: min + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var minimum = binaryKernelFunc({ + opSnippet: BinaryOpType.MIN, + cpuKernelImpl: minimumImplCPU, + }); + var minimumConfig = { + kernelName: tf.Minimum, + backendName: 'webgpu', + kernelFunc: minimum + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var MirrorPadProgram = /** @class */ (function () { + function MirrorPadProgram(xShape, paddings, mode) { + var _this = this; + this.uniforms = ''; + this.variableNames = ['x']; + this.workGroupSize = [64, 1, 1]; + this.size = true; + this.outputShape = paddings.map(function (p, i) { return p[0] /* beforePad */ + xShape[i] + p[1]; } /* afterPad */); + this.dispatchLayout = flatDispatchLayout(this.outputShape); + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize); + this.xShape = xShape; + paddings.map(function (_, i) { + _this.uniforms += " pad" + i + " : vec2,"; + }); + this.offset = mode === 'reflect' ? 0 : 1; + this.shaderKey = "mirrorPad_" + mode; + } + MirrorPadProgram.prototype.getUserCode = function () { + var rank = this.xShape.length; + // The length of paddings are same with the rank of the input tensor. + var start = this.xShape.map(function (_, i) { return "uniforms.pad" + i + "[0]"; }).join(','); + var end = this.xShape + .map(function (_, i) { return "uniforms.pad" + i + "[0] + uniforms.xShape" + (rank > 1 ? "[" + i + "]" : ''); }) + .join(','); + var shaderStart = rank === 1 ? 'start' : 'start[i]'; + var shaderEnd = rank === 1 ? 'end' : 'end[i]'; + var shaderOutC = rank === 1 ? 'outC' : 'outC[i]'; + var dtype = getCoordsDataType(rank); + var unpackedCoords = rank > 1 ? + ['coords[0]', 'coords[1]', 'coords[2]', 'coords[3]'].slice(0, rank) : + 'coords'; + return "\n " + getMainHeaderAndGlobalIndexString() + "\n if (index < uniforms.size) {\n let start = " + dtype + "(" + start + ");\n let end = " + dtype + "(" + end + ");\n var outC = getCoordsFromIndex(index);\n for (var i = 0; i < " + rank + "; i = i + 1) {\n if (" + shaderOutC + " < " + shaderStart + ") {\n " + shaderOutC + " = " + shaderStart + " * 2 - " + shaderOutC + " - " + this.offset + ";\n } else if(" + shaderOutC + " >= " + shaderEnd + ") {\n " + shaderOutC + " = (" + shaderEnd + " - 1) * 2 - " + shaderOutC + " + " + this.offset + ";\n }\n }\n let coords = outC - start;\n setOutputAtIndex(index, getX(" + unpackedCoords + "));\n }\n }\n "; + }; + return MirrorPadProgram; + }()); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var mirrorPadConfig = { + kernelName: tf.MirrorPad, + backendName: 'webgpu', + kernelFunc: function (_a) { + var inputs = _a.inputs, attrs = _a.attrs, backend = _a.backend; + var x = inputs.x; + var paddings = attrs.paddings, mode = attrs.mode; + var webGPUBackend = backend; + var uniformData = paddings.map(function (p) { + return { type: 'int32', data: [p[0], p[1]] }; + }); + var program = new MirrorPadProgram(x.shape, paddings, mode); + var output = webGPUBackend.runWebGPUProgram(program, [x], x.dtype, uniformData); + return output; + } + }; + + // This doesn't use unaryKernelFunc because negImplCPU is not of type + // SimpleUnaryKernelImplCPU. + function neg(args) { + var inputs = args.inputs, backend = args.backend; + var x = inputs.x; + if (backend.shouldExecuteOnCPU([x])) { + var xData = backend.tensorMap.get(x.dataId); + var _a = __read(negImplCPU(xData.values, x.shape, x.dtype), 2), outValues = _a[0], newShape = _a[1]; + return backend.makeTensorInfo(newShape, x.dtype, outValues); + } + var program = new UnaryOpProgram(x.shape, UnaryOpType.NEG); + return backend.runWebGPUProgram(program, [x], x.dtype); + } + var negConfig = { + kernelName: tf.Neg, + backendName: 'webgpu', + kernelFunc: neg + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function nonMaxSuppressionV3(args) { + console.warn('tf.nonMaxSuppression() in webgpu locks the UI thread. ' + + 'Call tf.nonMaxSuppressionAsync() instead'); + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var boxes = inputs.boxes, scores = inputs.scores; + var maxOutputSize = attrs.maxOutputSize, iouThreshold = attrs.iouThreshold, scoreThreshold = attrs.scoreThreshold; + var boxesVals = backend.readSync(boxes.dataId); + var scoresVals = backend.readSync(scores.dataId); + var selectedIndices = tf.kernel_impls.nonMaxSuppressionV3Impl(boxesVals, scoresVals, maxOutputSize, iouThreshold, scoreThreshold).selectedIndices; + return backend.makeTensorInfo([selectedIndices.length], 'int32', new Int32Array(selectedIndices)); + } + var nonMaxSuppressionV3Config = { + kernelName: tf.NonMaxSuppressionV3, + backendName: 'webgpu', + kernelFunc: nonMaxSuppressionV3 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function nonMaxSuppressionV5(args) { + console.warn('tf.nonMaxSuppression() in webgpu locks the UI thread. ' + + 'Call tf.nonMaxSuppressionAsync() instead'); + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var boxes = inputs.boxes, scores = inputs.scores; + var maxOutputSize = attrs.maxOutputSize, iouThreshold = attrs.iouThreshold, scoreThreshold = attrs.scoreThreshold, softNmsSigma = attrs.softNmsSigma; + var boxesVals = backend.readSync(boxes.dataId); + var scoresVals = backend.readSync(scores.dataId); + var maxOutputSizeVal = maxOutputSize; + var iouThresholdVal = iouThreshold; + var scoreThresholdVal = scoreThreshold; + var softNmsSigmaVal = softNmsSigma; + var _a = tf.kernel_impls.nonMaxSuppressionV5Impl(boxesVals, scoresVals, maxOutputSizeVal, iouThresholdVal, scoreThresholdVal, softNmsSigmaVal), selectedIndices = _a.selectedIndices, selectedScores = _a.selectedScores; + return [ + backend.makeTensorInfo([selectedIndices.length], 'int32', new Int32Array(selectedIndices)), + backend.makeTensorInfo([selectedScores.length], 'float32', new Float32Array(selectedScores)) + ]; + } + var nonMaxSuppressionV5Config = { + kernelName: tf.NonMaxSuppressionV5, + backendName: 'webgpu', + kernelFunc: nonMaxSuppressionV5 + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function zerosLike(args) { + var inputs = args.inputs, backend = args.backend; + var x = inputs.x; + if (x.dtype === 'complex64') { + var realPart = real({ inputs: { input: x }, backend: backend }); + var r = zerosLike({ inputs: { x: realPart }, backend: backend }); + var imagPart = imag({ inputs: { input: x }, backend: backend }); + var i = zerosLike({ inputs: { x: imagPart }, backend: backend }); + var result = complex({ inputs: { real: r, imag: i }, backend: backend }); + backend.disposeData(realPart.dataId); + backend.disposeData(r.dataId); + backend.disposeData(imagPart.dataId); + backend.disposeData(i.dataId); + return result; + } + else { + return fill({ + attrs: { + shape: x.shape, + dtype: x.dtype, + value: x.dtype === 'string' ? '' : 0 + }, + backend: backend + }); + } + } + var zerosLikeConfig = { + kernelName: tf.ZerosLike, + backendName: 'webgpu', + kernelFunc: zerosLike + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function onesLike(args) { + var inputs = args.inputs, backend = args.backend; + var x = inputs.x; + if (x.dtype === 'string') { + throw new Error('onesLike is not supported under string dtype'); + } + else if (x.dtype === 'complex64') { + var realPart = real({ inputs: { input: x }, backend: backend }); + var r = onesLike({ inputs: { x: realPart }, backend: backend }); + var imagPart = imag({ inputs: { input: x }, backend: backend }); + var i = zerosLike({ inputs: { x: imagPart }, backend: backend }); + var result = complex({ inputs: { real: r, imag: i }, backend: backend }); + backend.disposeData(realPart.dataId); + backend.disposeData(r.dataId); + backend.disposeData(imagPart.dataId); + backend.disposeData(i.dataId); + return result; + } + else { + return fill({ attrs: { shape: x.shape, dtype: x.dtype, value: 1 }, backend: backend }); + } + } + var onesLikeConfig = { + kernelName: tf.OnesLike, + backendName: 'webgpu', + kernelFunc: onesLike + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function pack(args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var axis = attrs.axis; + if (inputs.length === 1) { + return expandDims({ inputs: { input: inputs[0] }, backend: backend, attrs: { dim: axis } }); + } + var shape = inputs[0].shape; + var dtype = inputs[0].dtype; + inputs.forEach(function (t) { + tf.util.assertShapesMatch(shape, t.shape, 'All tensors passed to stack must have matching shapes'); + tf.util.assert(dtype === t.dtype, function () { return 'All tensors passed to stack must have matching dtypes'; }); + }); + var intermediateTensorInfos = []; + var expandedTensors = inputs.map(function (t) { + var expandedT = expandDims({ inputs: { input: t }, backend: backend, attrs: { dim: axis } }); + intermediateTensorInfos.push(expandedT); + return expandedT; + }); + var result = concat({ inputs: expandedTensors, backend: backend, attrs: { axis: axis } }); + intermediateTensorInfos.forEach(function (t) { return backend.disposeData(t.dataId); }); + return result; + } + var packConfig = { + kernelName: tf.Pack, + backendName: 'webgpu', + kernelFunc: pack + }; + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var PadProgram = /** @class */ (function () { + function PadProgram(xShape, paddings) { + var _this = this; + this.variableNames = ['x']; + this.uniforms = 'constantValue : f32,'; + this.workGroupSize = [64, 1, 1]; + this.size = true; + this.outputShape = paddings.map(function (p, i) { return p[0] /* beforePad */ + xShape[i] + p[1]; } /* afterPad */); + this.dispatchLayout = flatDispatchLayout(this.outputShape); + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize); + paddings.map(function (_, i) { + _this.uniforms += " pad" + i + " : vec2,"; + }); + this.xShape = xShape; + this.shaderKey = 'pad'; + } + PadProgram.prototype.getUserCode = function () { + var rank = this.xShape.length; + var type = getCoordsDataType(rank); + // The length of paddings are same with the rank of the input tensor. + var start = this.xShape.map(function (_, i) { return "uniforms.pad" + i + "[0]"; }).join(','); + var end = this.xShape + .map(function (_, i) { return "uniforms.pad" + i + "[0] + uniforms.xShape" + (rank > 1 ? "[" + i + "]" : ''); }) + .join(','); + var startValue = rank > 1 ? type + "(" + start + ")" : "" + start; + var endValue = rank > 1 ? type + "(" + end + ")" : "" + end; + var leftPadCondition = rank > 1 ? "any(outC < start)" : "outC < start"; + var rightPadCondition = rank > 1 ? "any(outC >= end)" : "outC >= end"; + var unpackedCoords = rank > 1 ? + ['coords[0]', 'coords[1]', 'coords[2]', 'coords[3]'].slice(0, rank) : + 'coords'; + var userCode = "\n " + getMainHeaderAndGlobalIndexString() + "\n if (index < uniforms.size) {\n let start = " + startValue + ";\n let end = " + endValue + ";\n let outC = getCoordsFromIndex(index);\n\n if (" + leftPadCondition + " || " + rightPadCondition + ") {\n setOutputAtIndex(index, uniforms.constantValue);\n } else {\n let coords = outC - start;\n setOutputAtIndex(index, getX(" + unpackedCoords + "));\n }\n }\n }\n "; + return userCode; + }; + return PadProgram; + }()); + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var padV2 = function (args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var x = inputs.x; + var paddings = attrs.paddings, constantValue = attrs.constantValue; + if (paddings.every(function (p) { return tf.util.arraysEqual(p, [0, 0]); })) { + return identity({ inputs: { x: x }, backend: backend }); + } + if (tf.util.sizeFromShape(x.shape) === 0) { + // Short-circuit the computation, since x doesn't have value, only + // the shape is used to compute output shape to pad. + var outputShape = paddings.map(function (p, i) { return p[0] /* beforePad */ + x.shape[i] + p[1]; } /* afterPad */); + return fill({ + backend: backend, + attrs: { shape: outputShape, value: constantValue, dtype: x.dtype } + }); + } + var uniformData = [{ type: 'float32', data: [constantValue] }]; + paddings.map(function (p) { return uniformData.push({ type: 'int32', data: [p[0], p[1]] }); }); + var program = new PadProgram(x.shape, paddings); + return backend.runWebGPUProgram(program, [x], x.dtype, uniformData); + }; + var padV2Config = { + kernelName: tf.PadV2, + backendName: 'webgpu', + kernelFunc: padV2 + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var pow = binaryKernelFunc({ + opSnippet: BinaryOpType.POW, + }); + var powConfig = { + kernelName: tf.Pow, + backendName: 'webgpu', + kernelFunc: pow + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function prelu(args) { + var inputs = args.inputs, backend = args.backend; + var x = inputs.x, alpha = inputs.alpha; + var program = new BinaryOpProgram(BinaryOpType.PRELU, x.shape, alpha.shape); + return backend.runWebGPUProgram(program, [x, alpha], 'float32'); + } + var preluConfig = { + kernelName: tf.Prelu, + backendName: 'webgpu', + kernelFunc: prelu + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function prod(args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var x = inputs.x; + var axis = attrs.axis, keepDims = attrs.keepDims; + return reduce(x, axis, keepDims, 'prod', backend); + } + var prodConfig = { + kernelName: tf.Prod, + backendName: 'webgpu', + kernelFunc: prod + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var range = function (args) { + var backend = args.backend, attrs = args.attrs; + var start = attrs.start, stop = attrs.stop, step = attrs.step, dtype = attrs.dtype; + var values = rangeImplCPU(start, stop, step, dtype); + return backend.makeTensorInfo([values.length], dtype, values); + }; + var rangeConfig = { + kernelName: tf.Range, + backendName: 'webgpu', + kernelFunc: range + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var realDiv = binaryKernelFunc({ opSnippet: BinaryOpType.DIV }); + var realDivConfig = { + kernelName: tf.RealDiv, + backendName: 'webgpu', + kernelFunc: realDiv + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var relu = unaryKernelFunc({ opType: UnaryOpType.RELU }); + var reluConfig = { + kernelName: tf.Relu, + backendName: 'webgpu', + kernelFunc: relu + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var relu6 = unaryKernelFunc({ opType: UnaryOpType.RELU6 }); + var relu6Config = { + kernelName: tf.Relu6, + backendName: 'webgpu', + kernelFunc: relu6 + }; + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var ResizeBilinearProgram = /** @class */ (function () { + function ResizeBilinearProgram(inputShape, newHeight, newWidth) { + this.variableNames = ['x']; + this.uniforms = 'adjustHeightWidth : vec2, halfPixelCenters : f32,'; + this.workGroupSize = [64, 1, 1]; + this.size = true; + this.outputShape = [inputShape[0], newHeight, newWidth, inputShape[3]]; + this.dispatchLayout = flatDispatchLayout(this.outputShape); + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize); + this.shaderKey = "resizeBilinear"; + } + ResizeBilinearProgram.prototype.getUserCode = function () { + var userCode = "\n " + getMainHeaderAndGlobalIndexString() + "\n if (index < uniforms.size) {\n let coords = getCoordsFromIndex(index);\n let b = coords[0];\n let d = coords[3];\n let rc = coords.yz;\n\n let effectiveInSize = vec2(\n f32(uniforms.xShape.y) - uniforms.adjustHeightWidth[0],\n f32(uniforms.xShape.z) - uniforms.adjustHeightWidth[1]);\n\n let effectiveOutSize = vec2(\n f32(uniforms.outShape.y) - uniforms.adjustHeightWidth[0],\n f32(uniforms.outShape.z) - uniforms.adjustHeightWidth[1]);\n\n let effectiveInputOverOutputRatioRC =\n effectiveInSize / effectiveOutSize;\n\n // Fractional source index\n let sourceFracIndexRC =\n (vec2(rc) + vec2(uniforms.halfPixelCenters)) *\n effectiveInputOverOutputRatioRC - vec2(uniforms.halfPixelCenters);\n\n // Compute the four integer indices.\n let sourceFloorRC = vec2(sourceFracIndexRC);\n let sourceCeilRC = vec2(\n min(vec2(uniforms.xShape.yz) - vec2(1.0), ceil(sourceFracIndexRC)));\n\n let topLeft = getX(b, sourceFloorRC.x, sourceFloorRC.y, d);\n let bottomLeft = getX(b, sourceCeilRC.x, sourceFloorRC.y, d);\n let topRight = getX(b, sourceFloorRC.x, sourceCeilRC.y, d);\n let bottomRight = getX(b, sourceCeilRC.x, sourceCeilRC.y, d);\n\n let fracRC = sourceFracIndexRC - vec2(sourceFloorRC);\n\n let top = topLeft + (topRight - topLeft) * fracRC.y;\n let bottom = bottomLeft + (bottomRight - bottomLeft) * fracRC.y;\n let newValue = top + (bottom - top) * fracRC.x;\n\n setOutputAtIndex(index, newValue);\n }\n }\n "; + return userCode; + }; + return ResizeBilinearProgram; + }()); + + function resizeBilinear(args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var images = inputs.images; + var alignCorners = attrs.alignCorners, size = attrs.size, halfPixelCenters = attrs.halfPixelCenters; + var _a = __read(size, 2), newHeight = _a[0], newWidth = _a[1]; + var adjustHeight = alignCorners && newHeight > 1 ? 1.0 : 0.0; + var adjustWidth = alignCorners && newWidth > 1 ? 1.0 : 0.0; + var halfPixelCentersValue = halfPixelCenters ? 0.5 : 0.0; + var uniformData = [ + { type: 'float32', data: [adjustHeight, adjustWidth] }, + { type: 'float32', data: [halfPixelCentersValue] } + ]; + var program = new ResizeBilinearProgram(images.shape, newHeight, newWidth); + return backend.runWebGPUProgram(program, [images], 'float32', uniformData); + } + var resizeBilinearConfig = { + kernelName: tf.ResizeBilinear, + backendName: 'webgpu', + kernelFunc: resizeBilinear + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var ResizeNearestNeighborProgram = /** @class */ (function () { + function ResizeNearestNeighborProgram(inputShape, newHeight, newWidth, halfPixelCenters) { + this.variableNames = ['x']; + this.uniforms = 'adjustHeightWidth : vec2, roundBase : f32,'; + this.workGroupSize = [64, 1, 1]; + this.size = true; + this.outputShape = [inputShape[0], newHeight, newWidth, inputShape[3]]; + this.dispatchLayout = flatDispatchLayout(this.outputShape); + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize); + this.halfPixelCenters = halfPixelCenters; + this.shaderKey = "resizeNearest_" + halfPixelCenters; + } + ResizeNearestNeighborProgram.prototype.getUserCode = function () { + var sourceFracIndexRC; + if (this.halfPixelCenters) { + sourceFracIndexRC = + "max((vec2(rc) + vec2(0.5)) * effectiveInputOverOutputRatioRC" + + ", vec2(0.0))"; + } + else { + sourceFracIndexRC = "vec2(rc) * effectiveInputOverOutputRatioRC"; + } + var userCode = "\n " + getMainHeaderAndGlobalIndexString() + "\n if (index < uniforms.size) {\n let coords = getCoordsFromIndex(index);\n let b = coords[0];\n let d = coords[3];\n let rc = coords.yz;\n\n let effectiveInSize = vec2(\n f32(uniforms.xShape.y) - uniforms.adjustHeightWidth[0],\n f32(uniforms.xShape.z) - uniforms.adjustHeightWidth[1]);\n\n let effectiveOutSize = vec2(\n f32(uniforms.outShape.y) - uniforms.adjustHeightWidth[0],\n f32(uniforms.outShape.z) - uniforms.adjustHeightWidth[1]);\n\n let effectiveInputOverOutputRatioRC =\n effectiveInSize / effectiveOutSize;\n\n // Fractional source index\n let sourceFracIndexRC = " + sourceFracIndexRC + ";\n\n // Compute the coordinators of nearest neighbor point.\n let inputShapeRC = vec2(f32(uniforms.xShape.y), f32(uniforms.xShape.z));\n let sourceNearestRC = vec2(\n min(inputShapeRC - 1.0, floor(sourceFracIndexRC + uniforms.roundBase)));\n let newValue = getX(b, sourceNearestRC.x, sourceNearestRC.y, d);\n\n setOutputAtIndex(index, newValue);\n }\n }\n "; + return userCode; + }; + return ResizeNearestNeighborProgram; + }()); + + function resizeNearestNeighbor(args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var images = inputs.images; + var alignCorners = attrs.alignCorners, halfPixelCenters = attrs.halfPixelCenters, size = attrs.size; + var _a = __read(size, 2), newHeight = _a[0], newWidth = _a[1]; + var adjustHeight = alignCorners && newHeight > 1 ? 1.0 : 0.0; + var adjustWidth = alignCorners && newWidth > 1 ? 1.0 : 0.0; + // When align corners is false, we rounds the value with floor. + var roundBase = alignCorners ? 0.5 : 0.0; + var uniformData = [ + { type: 'float32', data: [adjustHeight, adjustWidth] }, + { type: 'float32', data: [roundBase] } + ]; + var program = new ResizeNearestNeighborProgram(images.shape, newHeight, newWidth, halfPixelCenters); + return backend.runWebGPUProgram(program, [images], images.dtype, uniformData); + } + var resizeNearestNeighborConfig = { + kernelName: tf.ResizeNearestNeighbor, + backendName: 'webgpu', + kernelFunc: resizeNearestNeighbor + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var RotateProgram = /** @class */ (function () { + function RotateProgram(imageShape, fillValue) { + this.outputShape = []; + this.variableNames = ['x']; + this.workGroupSize = [64, 1, 1]; + this.size = true; + this.outputShape = imageShape; + this.dispatchLayout = flatDispatchLayout(this.outputShape); + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize); + this.uniforms = "centerX : f32, centerY : f32, sinRadians : f32,\n cosRadians : f32,"; + this.shaderKey = 'rotate'; + this.outputShape = imageShape; + if (typeof fillValue === 'number') { + this.uniforms += " fillValue : f32,"; + this.fillSnippet = "var outputValue = uniforms.fillValue;"; + this.shaderKey += '_float'; + } + else { + this.uniforms += " fillValue : vec3,"; + this.fillSnippet = "var outputValue = uniforms.fillValue[coords[3]];"; + this.shaderKey += '_vec3'; + } + } + RotateProgram.prototype.getUserCode = function () { + var userCode = "\n " + getMainHeaderAndGlobalIndexString() + "\n\n if (index < uniforms.size) {\n let coords = getCoordsFromIndex(index);\n let coordXFloat = (f32(coords[2]) - uniforms.centerX) *\n uniforms.cosRadians - (f32(coords[1]) - uniforms.centerY) *\n uniforms.sinRadians;\n let coordYFloat = (f32(coords[2]) - uniforms.centerX) *\n uniforms.sinRadians + (f32(coords[1]) - uniforms.centerY) *\n uniforms.cosRadians;\n let coordX = i32(round(coordXFloat + uniforms.centerX));\n let coordY = i32(round(coordYFloat + uniforms.centerY));\n " + this.fillSnippet + "\n if(coordX >= 0 && coordX < uniforms.xShape[2] && coordY >= 0 &&\n coordY < uniforms.xShape[1]) {\n outputValue = getX(coords[0], coordY, coordX, coords[3]);\n }\n setOutputAtIndex(index, outputValue);\n }\n }\n "; + return userCode; + }; + return RotateProgram; + }()); + + var rotateWithOffsetConfig = { + kernelName: tf.RotateWithOffset, + backendName: 'webgpu', + kernelFunc: function (_a) { + var inputs = _a.inputs, attrs = _a.attrs, backend = _a.backend; + var image = inputs.image; + var radians = attrs.radians, fillValue = attrs.fillValue, center = attrs.center; + var webgpuBackend = backend; + var program = new RotateProgram(image.shape, fillValue); + var _b = __read(tf.backend_util.getImageCenter(center, image.shape[1], image.shape[2]), 2), centerX = _b[0], centerY = _b[1]; + var uniformData = [ + { type: 'float32', data: [centerX] }, + { type: 'float32', data: [centerY] }, + { type: 'float32', data: [Math.sin(radians)] }, + { type: 'float32', data: [Math.cos(radians)] } + ]; + if (typeof fillValue === 'number') { + uniformData.push({ type: 'float32', data: [Number.parseFloat(fillValue.toFixed(2))] }); + } + else { + uniformData.push({ type: 'float32', data: fillValue }); + } + var output = webgpuBackend.runWebGPUProgram(program, [image], image.dtype, uniformData); + return output; + } + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var rsqrt = unaryKernelFunc({ opType: UnaryOpType.RSQRT, cpuKernelImpl: rsqrtImplCPU }); + var rsqrtConfig = { + kernelName: tf.Rsqrt, + backendName: 'webgpu', + kernelFunc: rsqrt + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var ScatterOptimizedProgram = /** @class */ (function () { + function ScatterOptimizedProgram(flattenXShape, sliceDim, indicesRank, updatesRank, strides, shape, outputDtype) { + this.variableNames = ['updates', 'indices']; + this.workGroupSize = [64, 1, 1]; + this.atomic = true; + this.outputShape = shape; + this.type = outputDtype; + this.dispatchLayout = flatDispatchLayout(flattenXShape); + // Dispatching based on |updates| shape instead of output shape. + this.dispatch = + computeDispatch(this.dispatchLayout, flattenXShape, this.workGroupSize); + this.sliceDimGreaterThanOne = sliceDim > 1; + this.shaderKey = "scatter_" + indicesRank + "_" + updatesRank + "_" + this.sliceDimGreaterThanOne + "_" + outputDtype; + var stridesType = getCoordsDataType(strides.length); + this.uniforms = "sliceDim : i32, strides: " + stridesType + ", size: i32,"; + this.updatesRank = updatesRank; + this.indicesRank = indicesRank; + } + ScatterOptimizedProgram.prototype.getUserCode = function () { + var indicesString = ''; + if (this.indicesRank === 1) { + indicesString = 'coords[0]'; + } + else if (this.indicesRank === 2) { + indicesString = 'coords[0], j'; + } + var indicesSnippet = "getIndices(" + indicesString + ")"; + var strideString = this.sliceDimGreaterThanOne ? 'uniforms.strides[j]' : + 'uniforms.strides'; + var updatesString = ''; + var outCoordsString = ''; + var getUpdatesCoordsFromFlatIndex = ''; + if (this.updatesRank === 1) { + updatesString = 'coords[0]'; + outCoordsString = 'flattenedIndex'; + getUpdatesCoordsFromFlatIndex = "\n fn getUpdatesCoordsFromFlatIndex(index : i32) -> i32 {\n return index;\n }\n "; + } + else if (this.updatesRank === 2) { + updatesString = 'coords[0], coords[1]'; + outCoordsString = 'vec2(flattenedIndex, coords[1])'; + getUpdatesCoordsFromFlatIndex = "\n fn getUpdatesCoordsFromFlatIndex(index : i32) -> vec2 {\n let d0 = index / uniforms.updatesShape[1];\n let d1 = index - d0 * uniforms.updatesShape[1];\n return vec2(d0, d1);\n }\n "; + } + var updatesSnippet = "getUpdates(" + updatesString + ")"; + // atomicAdd only supports uint/int type. For float, we use + // atomicCompareExchangeWeak to simulate. + var atomicAddSnippet = this.type === 'int32' ? + "atomicAdd(&(result[flatIndex]), i32(updateValue));" : + "\n var assumed = atomicLoad(&(result[flatIndex]));\n var success = 0;\n for (; success == 0;) {\n let new = bitcast(assumed) + updateValue;\n let newI32 = bitcast(new);\n let resValue = atomicCompareExchangeWeak(&(result[flatIndex]), assumed, newI32);\n assumed = resValue[0];\n success = resValue[1];\n }\n "; + var userCode = "\n " + getUpdatesCoordsFromFlatIndex + "\n\n " + getMainHeaderAndGlobalIndexString() + "\n\n if (index < uniforms.size) {\n let coords = getUpdatesCoordsFromFlatIndex(index);\n var flattenedIndex = 0;\n for (var j = 0; j < uniforms.sliceDim; j = j + 1) {\n let indexInside = i32(round(" + indicesSnippet + "));\n flattenedIndex = flattenedIndex + indexInside * " + strideString + ";\n }\n let updateValue = " + updatesSnippet + ";\n let flatIndex = getOutputIndexFromCoords(" + outCoordsString + ");\n\n " + atomicAddSnippet + "\n }\n }"; + return userCode; + }; + return ScatterOptimizedProgram; + }()); + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function scatterNd(args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var indices = inputs.indices, updates = inputs.updates; + var shape = attrs.shape; + var _a = tf.backend_util.calculateShapes(updates, indices, shape), sliceRank = _a.sliceRank, numUpdates = _a.numUpdates, sliceSize = _a.sliceSize, strides = _a.strides, outputSize = _a.outputSize; + var flattenShape = [outputSize / sliceSize, sliceSize]; + if (outputSize === 0) { + return backend.makeTensorInfo(shape, indices.dtype); + } + var flattenIndices = reshape({ inputs: { x: indices }, backend: backend, attrs: { shape: [numUpdates, sliceRank] } }); + var flattenX = reshape({ inputs: { x: updates }, backend: backend, attrs: { shape: [numUpdates, sliceSize] } }); + var type = flattenX.dtype; + var output = fill({ backend: backend, attrs: { shape: flattenShape, value: 0, dtype: type } }); + var size = tf.util.sizeFromShape(flattenX.shape); + var uniformData = [ + { type: 'int32', data: [sliceRank] }, { type: 'int32', data: strides }, + { type: 'int32', data: [size] } + ]; + var program = new ScatterOptimizedProgram(flattenX.shape, sliceRank, flattenIndices.shape.length, flattenX.shape.length, strides, flattenShape, type); + var res = backend.runWebGPUProgram(program, [flattenX, flattenIndices], type, uniformData, output); + var reshaped = reshape({ inputs: { x: res }, backend: backend, attrs: { shape: shape } }); + backend.disposeData(flattenIndices.dataId); + backend.disposeData(flattenX.dataId); + backend.disposeData(res.dataId); + return reshaped; + } + var scatterNdConfig = { + kernelName: tf.ScatterNd, + backendName: 'webgpu', + kernelFunc: scatterNd + }; + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var SelectProgram = /** @class */ (function () { + function SelectProgram(cRank, shape, rank) { + this.variableNames = ['c', 'a', 'b']; + this.workGroupSize = [64, 1, 1]; + this.size = true; + this.outputShape = shape; + this.dispatchLayout = flatDispatchLayout(this.outputShape); + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize); + this.cRank = cRank; + this.rank = rank; + this.shaderKey = 'select'; + } + SelectProgram.prototype.getUserCode = function () { + // TODO(WGSL): below code can be merged with getUserCode. + var cCoords; + var abCoords; + if (this.rank > 4) { + throw Error("Where for rank " + this.rank + " is not yet supported"); + } + if (this.rank === 1) { + abCoords = "resRC"; + cCoords = "resRC"; + } + else { + var currentCoords = ['resRC.x', 'resRC.y', 'resRC.z', 'resRC.w']; + var cCoordVars = []; + var abCoordVars = []; + for (var i = 0; i < this.outputShape.length; i++) { + abCoordVars.push("" + currentCoords[i]); + if (i < this.cRank) { + cCoordVars.push("" + currentCoords[i]); + } + } + cCoords = cCoordVars.join(); + abCoords = abCoordVars.join(); + } + var userCode = "\n " + getMainHeaderAndGlobalIndexString() + "\n if (index < uniforms.size) {\n let resRC = getCoordsFromIndex(index);\n let cVal = getC(" + cCoords + ");\n if (cVal >= 1.0) {\n setOutputAtIndex(index, getA(" + abCoords + "));\n } else {\n setOutputAtIndex(index, getB(" + abCoords + "));\n }\n }\n }\n "; + return userCode; + }; + return SelectProgram; + }()); + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function select(args) { + var inputs = args.inputs, backend = args.backend; + var condition = inputs.condition, t = inputs.t, e = inputs.e; + var program = new SelectProgram(condition.shape.length, t.shape, t.shape.length); + return backend.runWebGPUProgram(program, [condition, t, e], tf.upcastType(t.dtype, e.dtype)); + } + var selectConfig = { + kernelName: tf.Select, + backendName: 'webgpu', + kernelFunc: select + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var sigmoid = unaryKernelFunc({ opType: UnaryOpType.SIGMOID }); + var sigmoidConfig = { + kernelName: tf.Sigmoid, + backendName: 'webgpu', + kernelFunc: sigmoid, + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var sin = unaryKernelFunc({ opType: UnaryOpType.SIN }); + var sinConfig = { + kernelName: tf.Sin, + backendName: 'webgpu', + kernelFunc: sin + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var sinh = unaryKernelFunc({ opType: UnaryOpType.SINH }); + var sinhConfig = { + kernelName: tf.Sinh, + backendName: 'webgpu', + kernelFunc: sinh + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var sub = binaryKernelFunc({ + opSnippet: BinaryOpType.SUB, + cpuKernelImpl: subImplCPU, + supportsComplex: true + }); + var subConfig = { + kernelName: tf.Sub, + backendName: 'webgpu', + kernelFunc: sub + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function softmax(args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var logits = inputs.logits; + var dim = attrs.dim; + var axes = tf.util.parseAxisParam([dim], logits.shape); + var maxLogit = max({ + inputs: { x: logits }, + backend: backend, + attrs: { reductionIndices: axes, keepDims: false } + }); + var expandedShape = tf.backend_util.expandShapeToKeepDim(maxLogit.shape, axes); + var maxLogitsReshaped = reshape({ inputs: { x: maxLogit }, backend: backend, attrs: { shape: expandedShape } }); + var a = sub({ inputs: { a: logits, b: maxLogitsReshaped }, backend: backend }); + var b = exp({ inputs: { x: a }, backend: backend }); + var sumExp = sum({ inputs: { x: b }, backend: backend, attrs: { axis: axes, keepDims: false } }); + var sumExpReshaped = reshape({ inputs: { x: sumExp }, backend: backend, attrs: { shape: expandedShape } }); + var res = realDiv({ inputs: { a: b, b: sumExpReshaped }, backend: backend }); + backend.disposeData(maxLogit.dataId); + backend.disposeData(maxLogitsReshaped.dataId); + backend.disposeData(a.dataId); + backend.disposeData(b.dataId); + backend.disposeData(sumExp.dataId); + backend.disposeData(sumExpReshaped.dataId); + return res; + } + var softmaxConfig = { + kernelName: tf.Softmax, + backendName: 'webgpu', + kernelFunc: softmax + }; + + var spaceToBatchND = function (args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var x = inputs.x; + var blockShape = attrs.blockShape, paddings = attrs.paddings; + tf.util.assert(x.shape.length <= 4, function () { return 'spaceToBatchND for rank > 4 with a WebGPU backend not ' + + 'implemented yet'; }); + var prod = blockShape.reduce(function (a, b) { return a * b; }); + var completePaddings = [[0, 0]]; + completePaddings.push.apply(completePaddings, __spread(paddings)); + for (var i = 1 + blockShape.length; i < x.shape.length; ++i) { + completePaddings.push([0, 0]); + } + var toDispose = []; + var paddedX = padV2({ + inputs: { x: x }, + backend: backend, + attrs: { paddings: completePaddings, constantValue: 0 } + }); + var reshapedPaddedShape = tf.backend_util.getReshaped(paddedX.shape, blockShape, prod, false); + var permutedReshapedPaddedPermutation = tf.backend_util.getPermuted(reshapedPaddedShape.length, blockShape.length, false); + var flattenShape = tf.backend_util.getReshapedPermuted(paddedX.shape, blockShape, prod, false); + var reshapedPaddedX = reshape({ inputs: { x: paddedX }, backend: backend, attrs: { shape: reshapedPaddedShape } }); + var paddedXT = transpose({ + inputs: { x: reshapedPaddedX }, + backend: backend, + attrs: { perm: permutedReshapedPaddedPermutation } + }); + var result = reshape({ inputs: { x: paddedXT }, backend: backend, attrs: { shape: flattenShape } }); + toDispose.push(paddedX); + toDispose.push(reshapedPaddedX); + toDispose.push(paddedXT); + toDispose.forEach(function (t) { return backend.disposeData(t.dataId); }); + return result; + }; + var spaceToBatchNDConfig = { + kernelName: tf.SpaceToBatchND, + backendName: 'webgpu', + kernelFunc: spaceToBatchND + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var ScatterProgram = /** @class */ (function () { + function ScatterProgram(updateSize, sliceDim, indicesRank, updatesRank, strides, shape, summingDupeIndex) { + this.variableNames = ['updates', 'indices', 'defaultValue']; + this.workGroupSize = [64, 1, 1]; + this.workPerThread = 4; + this.size = true; + this.outputShape = shape; + this.dispatchLayout = flatDispatchLayout(this.outputShape); + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize, [this.workPerThread, 1, 1]); + var sliceDimGreaterThanOne = sliceDim > 1; + this.shaderKey = + "scatter_" + indicesRank + "_" + updatesRank + "_" + sliceDimGreaterThanOne; + var stridesType = getCoordsDataType(strides.length); + this.uniforms = + "updateSize : i32, sliceDim : i32, strides: " + stridesType + ","; + var indicesString = ''; + if (indicesRank === 1) { + indicesString = 'i'; + } + else if (indicesRank === 2) { + indicesString = 'i, j'; + } + this.indicesSnippet = "getIndices(" + indicesString + ")"; + var updatesString = ''; + if (updatesRank === 1) { + updatesString = 'i'; + } + else if (updatesRank === 2) { + updatesString = 'i, coords[1]'; + } + this.updatesSnippet = "getUpdates(" + updatesString + ")"; + this.strideString = + sliceDimGreaterThanOne ? 'uniforms.strides[j]' : 'uniforms.strides'; + } + ScatterProgram.prototype.getUserCode = function () { + var userCode = "\n " + getMainHeaderAndGlobalIndexString() + "\n\n let globalIndex = index * " + this.workPerThread + ";\n if (globalIndex < uniforms.size) {\n var sum = vec4(0.0);\n var found = vec4(false);\n for (var i = 0; i < uniforms.updateSize; i = i + 1) {\n var flattenedIndex = 0;\n for (var j = 0; j < uniforms.sliceDim; j = j + 1) {\n let indexInside = i32(round(" + this.indicesSnippet + "));\n flattenedIndex = flattenedIndex + indexInside * " + this.strideString + ";\n }\n for (var innerIndex = 0; innerIndex < " + this.workPerThread + "; innerIndex = innerIndex + 1) {\n let curIndex = globalIndex + innerIndex;\n let coords = getCoordsFromIndex(curIndex);\n if (flattenedIndex == coords[0]) {\n sum[innerIndex] = sum[innerIndex] + " + this.updatesSnippet + ";\n found[innerIndex] = true;\n }\n }\n }\n for (var innerIndex = 0; innerIndex < " + this.workPerThread + "; innerIndex = innerIndex + 1) {\n let curIndex = globalIndex + innerIndex;\n if (curIndex < uniforms.size)\n {\n setOutputAtIndex(curIndex, mix(getDefaultValue(), sum[innerIndex], f32(found[innerIndex])));\n }\n }\n }\n }"; + return userCode; + }; + return ScatterProgram; + }()); + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function sparseToDense(args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var sparseIndices = inputs.sparseIndices, sparseValues = inputs.sparseValues, defaultValue = inputs.defaultValue; + var outputShape = attrs.outputShape; + var _a = tf.backend_util.calculateShapes(sparseValues, sparseIndices, outputShape), sliceRank = _a.sliceRank, numUpdates = _a.numUpdates, strides = _a.strides, outputSize = _a.outputSize; + var sumDupeIndices = false; + var uniformData = [ + { type: 'int32', data: [numUpdates] }, + { type: 'int32', data: [sliceRank] }, + { type: 'int32', data: strides }, + ]; + var program = new ScatterProgram(numUpdates, sliceRank, sparseIndices.shape.length, sparseValues.shape.length, strides, [outputSize, 1], sumDupeIndices); + var res = backend.runWebGPUProgram(program, [sparseValues, sparseIndices, defaultValue], sparseValues.dtype, uniformData); + var reshaped = reshape({ inputs: { x: res }, backend: backend, attrs: { shape: outputShape } }); + backend.disposeData(res.dataId); + return reshaped; + } + var sparseToDenseConfig = { + kernelName: tf.SparseToDense, + backendName: 'webgpu', + kernelFunc: sparseToDense + }; + + function splitV(args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var x = inputs.x; + var numOrSizeSplits = attrs.numOrSizeSplits, axis = attrs.axis; + var $axis = tf.util.parseAxisParam(axis, x.shape)[0]; + var splitSizes = tf.backend_util.prepareSplitSize(x, numOrSizeSplits, $axis); + var xRank = x.shape.length; + var begin = new Array(xRank).fill(0); + var size = x.shape.slice(); + return splitSizes.map(function (s) { + var sliceSize = __spread(size); + sliceSize[$axis] = s; + var sliceT = slice({ inputs: { x: x }, backend: backend, attrs: { begin: begin, size: sliceSize } }); + begin[$axis] += s; + return sliceT; + }); + } + var splitVConfig = { + kernelName: tf.SplitV, + backendName: 'webgpu', + kernelFunc: splitV + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var sqrt = unaryKernelFunc({ opType: UnaryOpType.SQRT }); + var sqrtConfig = { + kernelName: tf.Sqrt, + backendName: 'webgpu', + kernelFunc: sqrt + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var squareConfig = { + kernelName: tf.Square, + backendName: 'webgpu', + kernelFunc: function (_a) { + var inputs = _a.inputs, backend = _a.backend; + var x = inputs.x; + var webGPUBackend = backend; + var program = new UnaryOpProgram(x.shape, UnaryOpType.SQUARE); + return webGPUBackend.runWebGPUProgram(program, [x], x.dtype); + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var squaredDifference = binaryKernelFunc({ + opSnippet: BinaryOpType.SQUARED_DIFFERENCE, + }); + var squaredDifferenceConfig = { + kernelName: tf.SquaredDifference, + backendName: 'webgpu', + kernelFunc: squaredDifference + }; + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var StridedSliceProgram = /** @class */ (function () { + function StridedSliceProgram(destSize) { + this.variableNames = ['x']; + // TODO(xing.xu): Increase the workPerThread. + this.workPerThread = 1; + this.workGroupSize = [64, 1, 1]; + this.size = true; + this.outputShape = destSize; + this.dispatchLayout = flatDispatchLayout(this.outputShape); + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize, [this.workPerThread, 1, 1]); + var dtype = getCoordsDataType(this.outputShape.length); + this.uniforms = "begin : " + dtype + ", strides : " + dtype + ", "; + this.shaderKey = 'stridedSlice'; + } + StridedSliceProgram.prototype.getUserCode = function () { + var _this = this; + var rank = this.outputShape.length; + var newCoords = ''; + if (rank === 1) { + newCoords = 'coords * uniforms.strides + uniforms.begin'; + } + else { + var outputAxis_1 = 0; + newCoords = + this.outputShape + .map(function (_, i) { + outputAxis_1++; + return _this.outputShape.length === 1 ? + "coords * uniforms.strides[" + i + "] + uniforms.begin[" + i + "]" : + "coords[" + (outputAxis_1 - 1) + "] * uniforms.strides[" + i + "] + uniforms.begin[" + i + "]"; + }) + .join(','); + } + var userCode = "\n " + getMainHeaderAndGlobalIndexString() + "\n if (index < uniforms.size) {\n let coords = getCoordsFromIndex(index);\n setOutputAtIndex(index, getX(" + newCoords + "));\n }\n }\n "; + return userCode; + }; + return StridedSliceProgram; + }()); + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function stridedSlice(args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var x = inputs.x; + var begin = attrs.begin, end = attrs.end, strides = attrs.strides, beginMask = attrs.beginMask, endMask = attrs.endMask, ellipsisMask = attrs.ellipsisMask, newAxisMask = attrs.newAxisMask, shrinkAxisMask = attrs.shrinkAxisMask; + var _a = tf.slice_util.sliceInfo(x.shape, begin, end, strides, beginMask, endMask, ellipsisMask, newAxisMask, shrinkAxisMask), finalShapeSparse = _a.finalShapeSparse, finalShape = _a.finalShape, isIdentity = _a.isIdentity, sliceDim0 = _a.sliceDim0, isSimpleSlice = _a.isSimpleSlice, $begin = _a.begin, $end = _a.end, $strides = _a.strides; + var result; + if (isIdentity) { + // Optimization #1, slice is a no-op plus reshape + result = reshape({ inputs: { x: x }, backend: backend, attrs: { shape: finalShape } }); + } + else if (sliceDim0 || isSimpleSlice) { + // Optimization #2, slice is memory contiguous (only occurs in dim 0) + tf.util.assert(x.shape.length >= 1, function () { return "Input must have rank at least 1, got: " + x.shape.length; }); + var size = tf.slice_util.computeOutShape($begin, $end, $strides); + // To tolerate begin[0] > end[0] (a 0-output slice), we min(begin, end). + var sliced = slice({ inputs: { x: x }, backend: backend, attrs: { begin: $begin, size: size } }); + result = + reshape({ inputs: { x: sliced }, backend: backend, attrs: { shape: finalShape } }); + backend.disposeData(sliced.dataId); + } + else { + var shouldExecuteOnCPU = backend.shouldExecuteOnCPU([x]); + if (shouldExecuteOnCPU) { + var values = backend.readSync(x.dataId); + var xBuf = tf.buffer(x.shape, x.dtype, values); + var resultValues = stridedSliceImplCPU(finalShapeSparse, xBuf, $strides, $begin); + result = backend.makeTensorInfo(finalShape, x.dtype, resultValues.values); + } + else { + var program = new StridedSliceProgram(finalShapeSparse); + var uniformData = [{ type: 'int32', data: $begin }, { type: 'int32', data: $strides }]; + var resultValues = backend.runWebGPUProgram(program, [x], x.dtype, uniformData); + result = reshape({ inputs: { x: resultValues }, backend: backend, attrs: { shape: finalShape } }); + backend.disposeData(resultValues.dataId); + } + } + return result; + } + var stridedSliceConfig = { + kernelName: tf.StridedSlice, + backendName: 'webgpu', + kernelFunc: stridedSlice + }; + + function stringNGrams(args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var separator = attrs.separator, nGramWidths = attrs.nGramWidths, leftPad = attrs.leftPad, rightPad = attrs.rightPad, padWidth = attrs.padWidth, preserveShortSequences = attrs.preserveShortSequences; + var data = inputs.data, dataSplits = inputs.dataSplits; + var $data = backend.readSync(data.dataId); + var $dataSplits = backend.readSync(dataSplits.dataId); + var _a = __read(stringNGramsImplCPU($data, $dataSplits, separator, nGramWidths, leftPad, rightPad, padWidth, preserveShortSequences), 2), nGrams = _a[0], nGramsSplits = _a[1]; + return [ + backend.makeTensorInfo([nGrams.length], 'string', nGrams), + backend.makeTensorInfo(dataSplits.shape, 'int32', nGramsSplits), + ]; + } + var stringNGramsConfig = { + kernelName: tf.StringNGrams, + backendName: 'webgpu', + kernelFunc: stringNGrams, + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var tanh = unaryKernelFunc({ opType: UnaryOpType.TANH }); + var tanhConfig = { + kernelName: tf.Tanh, + backendName: 'webgpu', + kernelFunc: tanh + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var TileProgram = /** @class */ (function () { + function TileProgram(aShape, reps) { + this.variableNames = ['A']; + this.workGroupSize = [64, 1, 1]; + this.size = true; + var outputShape = new Array(aShape.length); + for (var i = 0; i < outputShape.length; i++) { + outputShape[i] = aShape[i] * reps[i]; + } + this.outputShape = outputShape; + this.dispatchLayout = flatDispatchLayout(this.outputShape); + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize); + this.rank = this.outputShape.length; + this.shaderKey = 'tile'; + } + TileProgram.prototype.getUserCode = function () { + var sourceCoords = getSourceCoords(this.rank, 'uniforms.'); + var userCode = "\n " + getMainHeaderAndGlobalIndexString() + "\n if (index < uniforms.size) {\n let resRC = getCoordsFromIndex(index);\n setOutputAtIndex(index, getA(" + sourceCoords + "));\n }\n }\n "; + return userCode; + }; + return TileProgram; + }()); + function getSourceCoords(rank, uniformPrefix) { + if (uniformPrefix === void 0) { uniformPrefix = ''; } + if (rank >= 5) { + throw Error("Tile for rank " + rank + " is not yet supported"); + } + if (rank === 1) { + return "(resRC % " + uniformPrefix + "aShape)"; + } + var currentCoords = ['resRC.x', 'resRC.y', 'resRC.z', 'resRC.w']; + var sourceCoords = []; + for (var i = 0; i < rank; i++) { + sourceCoords.push("(" + currentCoords[i] + " % " + uniformPrefix + "aShape[" + i + "])"); + } + return sourceCoords.join(); + } + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function tile(params) { + var inputs = params.inputs, backend = params.backend, attrs = params.attrs; + var x = inputs.x; + var reps = attrs.reps; + // tile gpu program cannot handle rank >= 5 case. + if (backend.shouldExecuteOnCPU([x]) || x.dtype === 'string' || + x.shape.length >= 5) { + // Even thought string tensor is always on CPU, just to be consistent on how + // to access tensor data. + var data = backend.readSync(x.dataId); + var value = x.dtype === 'string' ? + data.map(function (d) { return tf.util.decodeString(d); }) : + data; + var buf = tf.buffer(x.shape, x.dtype, value); + var outBuf = tileImplCPU(buf, reps); + return backend.makeTensorInfo(outBuf.shape, outBuf.dtype, outBuf.values); + } + var program = new TileProgram(x.shape, reps); + var output = backend.runWebGPUProgram(program, [x], x.dtype); + return output; + } + var tileConfig = { + kernelName: tf.Tile, + backendName: 'webgpu', + kernelFunc: tile, + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + // Based on Algorithm 2 of Bitonic Top K, ref: + // https://anilshanbhag.in/static/papers/gputopk_sigmod18.pdf + // The original algorithm is based on computing the top K only, however + // since for TFJS we require the indices of the top K values as well then the + // algorithm found here is a bit modified. Rather than producing the values + // at each step, the indices containing the top K are generated instead. + // The output values are not generated to reduce the number of outputs in the + // GPU, the values can easily be retrieved from the indices using a gather + // op. + var SwapProgram = /** @class */ (function () { + function SwapProgram(shape) { + this.variableNames = ['x', 'indices']; + this.workGroupSize = [256, 1, 1]; + this.size = true; + this.outputShape = shape; + this.dispatchLayout = flatDispatchLayout(this.outputShape); + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize); + this.uniforms = "inputSize : i32, firstPass : i32, negativeInf : f32,\n dir : i32, inc : i32,"; + this.shaderKey = 'swap'; + } + SwapProgram.prototype.getUserCode = function () { + var userCode = "\n " + getMainHeaderAndGlobalIndexString() + "\n if (index < uniforms.size) {\n let outC = getCoordsFromIndex(index);\n let batch = outC[0];\n let elemIdx = outC[1];\n // We compare elements pair-wise within a group of size 2 * inc.\n // The comparing rule for each group alternates between ascending\n // and descending. Within each group, we compare each pair at\n // positions i and i+inc. To decide whether an element at position i\n // is x0 or x1, we mod it by 2 * inc, if the result is smaller than\n // inc, it is in the first half of the group, we denote it as x0,\n // otherwise we denote it as x1.\n // For example, as shown in the Bitonic top K paper referenced\n // above, Figure5(a) shows that element[1] is in the second half of\n // the group when group size is 2, but it is in the first half of\n // the group when group size is 4.\n let isFirstInPair = elemIdx % (2 * uniforms.inc) < uniforms.inc;\n var i = 0;\n if (isFirstInPair) {\n i = elemIdx;\n } else {\n i = elemIdx - uniforms.inc;\n }\n\n var i0 = 0;\n if (uniforms.firstPass == 1) {\n i0 = i;\n } else {\n i0 = i32(getIndices(batch, i));\n }\n\n var i1 = 0;\n if (uniforms.firstPass == 1) {\n i1 = i + uniforms.inc;\n } else {\n i1 = i32(getIndices(batch, i + uniforms.inc));\n }\n\n var x0 = f32(0.0);\n var x1 = f32(0.0);\n if (i0 < uniforms.inputSize) {\n x0 = getX(batch, i0);\n } else {\n x0 = uniforms.negativeInf;\n }\n if (i1 < uniforms.inputSize) {\n x1 = getX(batch, i1);\n } else {\n x1 = uniforms.negativeInf;\n }\n\n let reverse = elemIdx % (2 * uniforms.dir) >= uniforms.dir;\n let isGreater = x0 > x1 || (x0 == x1 && i1 > i0);\n if (reverse == isGreater) {\n // Elements in opposite order of direction\n let iTemp = i0;\n i0 = i1;\n i1 = iTemp;\n }\n if (isFirstInPair) {\n setOutputAtIndex(index, f32(i0));\n } else {\n setOutputAtIndex(index, f32(i1));\n }\n }\n }\n "; + return userCode; + }; + return SwapProgram; + }()); + var MergeProgram = /** @class */ (function () { + function MergeProgram(shape) { + this.variableNames = ['x', 'indices']; + this.workGroupSize = [256, 1, 1]; + this.size = true; + this.outputShape = shape; + this.dispatchLayout = flatDispatchLayout(this.outputShape); + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize); + // |n| Size of the original input of TopK + // |firstPass| indicates if this is the first time swap is being used which + // means no indices input containing the top K is present yet. + // |k| Top k elements desired + this.uniforms = "inputSize : i32, firstPass : i32, k : i32,"; + this.shaderKey = 'merge'; + } + MergeProgram.prototype.getUserCode = function () { + var userCode = "\n " + getMainHeaderAndGlobalIndexString() + "\n if (index < uniforms.size) {\n let outC = getCoordsFromIndex(index);\n let batch = outC[0];\n let elemIdx = outC[1];\n // The output size is half of the previous size.\n // If the previous sequence is | | | | _ _ _ _ | | | | _ _ _ _\n // (k=4), we only need to output the indices at positions |, the\n // indices at positions _ can be thrown away, see Figure5(b) After\n // Phase 2 (Merge phase) in the Bitonic Top K paper referenced\n // above.\n // For example, the paper shows we only need to output the orange\n // bars. The output sequence should look like this | | | | | | | |.\n // Because the sequence is halved, to map the output index back to\n // the previous sequence to find the corresponding value, we need\n // to double the index. When we double the index, we basically\n // interpolate a position, so 2i looks like\n // | _ | _ | _ | _ | _ | _ | _. We move the | to the first k\n // position of each 2k positions by - elemIdx % k. E.g. for output\n // at index 4,5,6,7, we want to get the corresponding element at\n // original index 8,9,10,11, for output at index 8,9,10,11,\n // we want to get the corresponding element at original index\n // 16,17,18,19, so on and so forth.\n\n var i = 0;\n if (elemIdx < uniforms.k) {\n i = elemIdx;\n } else {\n i = elemIdx * 2 - elemIdx % uniforms.k;\n }\n var i0 = 0;\n if (uniforms.firstPass == 1) {\n i0 = i;\n } else {\n i0 = i32(getIndices(batch, i));\n }\n var i1 = 0;\n if (uniforms.firstPass == 1) {\n i1 = i + uniforms.k;\n } else {\n i1 = i32(getIndices(batch, i + uniforms.k));\n }\n\n let x0 = getX(batch, i0);\n var x1 = f32(0.0);\n if (i1 < uniforms.inputSize) {\n x1 = getX(batch, i1);\n } else {\n x1 = x0;\n }\n\n if (x0 >= x1) {\n setOutputAtIndex(index, f32(i0));\n } else {\n setOutputAtIndex(index, f32(i1));\n }\n }\n }\n "; + return userCode; + }; + return MergeProgram; + }()); + + function disposeIntermediateTensorInfoOrNull(backend, tensorInfo) { + if (tensorInfo !== null) { + backend.disposeData(tensorInfo.dataId); + } + } + function roundUpToPow2(num) { + var pow2 = 1; + while (pow2 < num) { + pow2 *= 2; + } + return pow2; + } + // Based on Algorithm 2 of Bitonic Top K, ref: + // https://anilshanbhag.in/static/papers/gputopk_sigmod18.pdf + function topK(args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var x = inputs.x; + var k = attrs.k, sorted = attrs.sorted; + var xShape = x.shape; + var lastDim = xShape[xShape.length - 1]; + if (backend.shouldExecuteOnCPU([x])) { + var xVals = backend.readSync(x.dataId); + var _a = __read(topKImplCPU(xVals, xShape, x.dtype, k, sorted), 2), allTopKVals = _a[0], allTopKIndices = _a[1]; + return [ + backend.makeTensorInfo(allTopKVals.shape, allTopKVals.dtype, allTopKVals.values), + backend.makeTensorInfo(allTopKIndices.shape, allTopKIndices.dtype, allTopKIndices.values) + ]; + } + if (k === 0) { + xShape[xShape.length - 1] = 0; + return [ + backend.makeTensorInfo(xShape, x.dtype, []), + backend.makeTensorInfo(xShape, 'int32', []) + ]; + } + if (lastDim === 1 /* firstPass */) { + return [ + x, fill({ attrs: { shape: xShape, dtype: 'int32', value: 0 }, backend: backend }) + ]; + } + // Reshape into a 2d tensor [batch, lastDim] and compute topk along lastDim. + var xSize = tf.util.sizeFromShape(xShape); + var batch = xSize / lastDim; + var x2D = reshape({ inputs: { x: x }, attrs: { shape: [batch, lastDim] }, backend: backend }); + var kPow2 = roundUpToPow2(k); + var lastDimPow2 = roundUpToPow2(lastDim); + // Only the indices containing the top K are kept at every step to reduce + // number of outputs in the GPU algorithms, so once the final set of indices + // is computed then gather is used to grab the corresponding values + // from the original input. + var indices = null; + // GPU algorithm always takes in an indices input but this input is not used + // on the first run of a GPU algorithm, therefore if indices is null we simply + // pass in x2D instead of it but the value will not actually be used + var getInputs = function () { return indices === null ? [x2D, x2D] : [x2D, indices]; }; + var runSwap = function (dir, inc, shape) { + var inputs = getInputs(); + var program = new SwapProgram(shape); + var firstPass = indices === null ? 1 : 0; + var uniformDataSwap = [ + { type: 'int32', data: [lastDim] }, + { type: 'int32', data: [firstPass] }, + { type: 'float32', data: [Number.NEGATIVE_INFINITY] }, + { type: 'int32', data: [dir] }, + { type: 'int32', data: [inc] } + ]; + var prevIndices = indices; + indices = backend.runWebGPUProgram(program, inputs, 'int32', uniformDataSwap); + disposeIntermediateTensorInfoOrNull(backend, prevIndices); + }; + // Step 1: local sort + for (var len = 1; len < kPow2; len *= 2) { + var dir = len * 2; + for (var inc = len; inc >= 1; inc /= 2) { + runSwap(dir, inc, [batch, lastDimPow2]); + } + } + // Step 2: merge + for (var indicesSize = lastDimPow2; indicesSize > kPow2; indicesSize /= 2) { + var inputs_1 = getInputs(); + var mergeProgram = new MergeProgram([batch, indicesSize / 2]); + var firstPass = indices === null ? 1 : 0; + var uniformDataMerge = [ + { type: 'int32', data: [lastDim] }, + { type: 'int32', data: [firstPass] }, + { type: 'int32', data: [kPow2] } + ]; + var prevIndices_1 = indices; + indices = backend.runWebGPUProgram(mergeProgram, inputs_1, 'int32', uniformDataMerge); + disposeIntermediateTensorInfoOrNull(backend, prevIndices_1); + // Step 3: rebuild + var len = kPow2 / 2; + var dir = len * 2; + for (var inc = len; inc >= 1; inc /= 2) { + runSwap(dir, inc, indices.shape); + } + } + // Keep only the requested top K results instead of kPow2 + var prevIndices = indices; + indices = slice({ inputs: { x: indices }, backend: backend, attrs: { begin: 0, size: [batch, k] } }); + disposeIntermediateTensorInfoOrNull(backend, prevIndices); + // Gather values on last dimension + var values = gatherV2({ inputs: { x: x2D, indices: indices }, backend: backend, attrs: { axis: 1, batchDims: 1 } }); + disposeIntermediateTensorInfoOrNull(backend, x2D); + // Reshape back to the original input shape, except that the last + // dimension is k. + var newShape = xShape.slice(0, -1); + newShape.push(k); + prevIndices = indices; + indices = reshape({ inputs: { x: indices }, attrs: { shape: newShape }, backend: backend }); + disposeIntermediateTensorInfoOrNull(backend, prevIndices); + var prevValues = values; + values = reshape({ inputs: { x: values }, attrs: { shape: newShape }, backend: backend }); + disposeIntermediateTensorInfoOrNull(backend, prevValues); + return [values, indices]; + } + var topKConfig = { + kernelName: tf.TopK, + backendName: 'webgpu', + kernelFunc: topK + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var TransformProgram = /** @class */ (function () { + function TransformProgram(outShape) { + this.variableNames = ['Image', 'Transforms']; + this.uniforms = 'interpolationModeId : i32, fillModeId : i32, fillValue : f32,'; + this.workGroupSize = [64, 1, 1]; + this.size = true; + this.outputShape = outShape; + this.dispatchLayout = flatDispatchLayout(this.outputShape); + this.dispatch = computeDispatch(this.dispatchLayout, this.outputShape, this.workGroupSize); + this.shaderKey = 'transform'; + } + TransformProgram.prototype.getUserCode = function () { + var userCode = "\n fn mapCoord(outCoord : f32, len : f32) -> f32{\n var inCoord = outCoord;\n if(uniforms.fillModeId == 2) {\n if (inCoord < 0.0) {\n if (len <= 1.0) {\n inCoord = 0.0;\n } else {\n let sz2 = 2.0 * len;\n if (inCoord < sz2) {\n inCoord = sz2 * f32(i32(f32(-inCoord / sz2))) +\n inCoord;\n }\n if (inCoord < -len) {\n inCoord = inCoord + sz2;\n } else {\n inCoord = -inCoord - 1.0;\n }\n }\n } else if (inCoord > len - 1.0) {\n if (len <= 1.0) {\n inCoord = 0.0;\n } else {\n let sz2 = 2.0 * len;\n inCoord = inCoord - sz2 * f32(i32(f32(inCoord / sz2)));\n if (inCoord >= len) {\n inCoord = sz2 - inCoord - 1.0;\n }\n }\n }\n return clamp(inCoord, 0.0, len - 1.0);\n } else if (uniforms.fillModeId == 3) {\n if (inCoord < 0.0) {\n if (len <= 1.0) {\n inCoord = 0.0;\n } else {\n let sz = len - 1.0;\n inCoord = inCoord + len * (f32(i32(f32(-inCoord / sz))) + 1.0);\n }\n } else if (inCoord > len - 1.0) {\n if (len <= 1.0) {\n inCoord = 0.0;\n } else {\n let sz = len - 1.0;\n inCoord = inCoord - len * f32(i32(f32(inCoord / sz)));\n }\n }\n return clamp(inCoord, 0.0, len - 1.0);\n } else if (uniforms.fillModeId == 4) {\n return clamp(outCoord, 0.0, len - 1.0);\n }\n return outCoord;\n }\n fn readWithFillValue(batch : i32, coordY : i32, coordX : i32,\n channel : i32) -> f32 {\n var outputValue : f32;\n if (0 <= coordY && coordY < uniforms.imageShape[1] && 0 <= coordX && coordX < uniforms.imageShape[2]) {\n outputValue = getImage(batch, coordY, coordX, channel);\n } else {\n outputValue = uniforms.fillValue;\n }\n return outputValue;\n }\n\n " + getMainHeaderAndGlobalIndexString() + "\n if (index < uniforms.size) {\n let coords = getCoordsFromIndex(index);\n var outputValue : f32;\n let batch = coords[0];\n let x = coords[2];\n let y = coords[1];\n let channel = coords[3];\n let xf = f32(x);\n let yf = f32(y);\n let a1 = getTransforms(batch, 0);\n let a2 = getTransforms(batch, 1);\n let a3 = getTransforms(batch, 2);\n let b1 = getTransforms(batch, 3);\n let b2 = getTransforms(batch, 4);\n let b3 = getTransforms(batch, 5);\n let c1 = getTransforms(batch, 6);\n let c2 = getTransforms(batch, 7);\n let projection = c1 * xf + c2 * yf + 1.0;\n if (projection == 0.0) {\n outputValue = uniforms.fillValue;\n } else {\n let inX = (a1 * xf + a2 * yf + a3) / projection;\n let inY = (b1 * xf + b2 * yf + b3) / projection;\n let mapX = mapCoord(inX, f32(uniforms.imageShape[2]));\n let mapY = mapCoord(inY, f32(uniforms.imageShape[1]));\n\n if (uniforms.interpolationModeId == 1) {\n let coordY = i32(round(mapY));\n let coordX = i32(round(mapX));\n outputValue = readWithFillValue(batch, coordY, coordX,\n channel);\n } else {\n let yFloor = floor(mapY);\n let xFloor = floor(mapX);\n let yCeil = yFloor + 1.0;\n let xCeil = xFloor + 1.0;\n let valueYFloor = (xCeil - mapX) *\n readWithFillValue(batch, i32(yFloor), i32(xFloor), channel) +\n (mapX - xFloor) *\n readWithFillValue(batch, i32(yFloor), i32(xCeil), channel);\n let valueYCeil = (xCeil - mapX) *\n readWithFillValue(batch, i32(yCeil), i32(xFloor), channel) +\n (mapX - xFloor) *\n readWithFillValue(batch, i32(yCeil), i32(xCeil), channel);\n outputValue = (yCeil - mapY) * valueYFloor +\n (mapY - yFloor) * valueYCeil;\n }\n }\n setOutputAtIndex(index, outputValue);\n }\n }\n "; + return userCode; + }; + return TransformProgram; + }()); + + function transform(args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var image = inputs.image, transforms = inputs.transforms; + var interpolation = attrs.interpolation, fillMode = attrs.fillMode, fillValue = attrs.fillValue, outputShape = attrs.outputShape; + var _a = __read(image.shape, 4), batch = _a[0], imageHeight = _a[1], imageWidth = _a[2], numChannels = _a[3]; + var _b = __read(outputShape != null ? outputShape : [imageHeight, imageWidth], 2), outHeight = _b[0], outWidth = _b[1]; + var outShape = [batch, outHeight, outWidth, + numChannels]; + var program = new TransformProgram(outShape); + var interpolationModeId = interpolation === 'nearest' ? 1 : 2; + var fillModeId; + switch (fillMode) { + case 'constant': + fillModeId = 1; + break; + case 'reflect': + fillModeId = 2; + break; + case 'wrap': + fillModeId = 3; + break; + case 'nearest': + fillModeId = 4; + break; + default: + fillModeId = 1; + break; + } + var uniformData = [ + { type: 'int32', data: [interpolationModeId] }, + { type: 'int32', data: [fillModeId] }, { type: 'float32', data: [fillValue] } + ]; + return backend.runWebGPUProgram(program, [image, transforms], 'float32', uniformData); + } + var transformConfig = { + kernelName: tf.Transform, + backendName: 'webgpu', + kernelFunc: transform + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function unpack(args) { + var inputs = args.inputs, backend = args.backend, attrs = args.attrs; + var value = inputs.value; + var axis = attrs.axis; + if (axis < 0) { + axis += value.shape.length; + } + var x = value; + var xRank = x.shape.length; + var num = value.shape[axis]; + var outShape = new Array(xRank - 1); + var outIndex = 0; + for (var i = 0; i < xRank; i++) { + if (i !== axis) { + outShape[outIndex++] = x.shape[i]; + } + } + var toDispose = []; + var begin = new Array(xRank).fill(0); + var size = x.shape.slice(); + size[axis] = 1; + var res = new Array(num); + for (var i = 0; i < res.length; i++) { + begin[axis] = i; + var sliced = slice({ inputs: { x: x }, backend: backend, attrs: { begin: begin, size: size } }); + var reshaped = reshape({ inputs: { x: sliced }, backend: backend, attrs: { shape: outShape } }); + res[i] = reshaped; + toDispose.push(sliced); + } + toDispose.forEach(function (t) { return backend.disposeData(t.dataId); }); + return res; + } + var unpackConfig = { + kernelName: tf.Unpack, + backendName: 'webgpu', + kernelFunc: unpack + }; + + var e_1, _a; + // List all kernel configs here + var kernelConfigs = [ + _fusedMatMulConfig, + absConfig, + addConfig, + addNConfig, + argMaxConfig, + argMinConfig, + avgPoolConfig, + batchMatMulConfig, + batchToSpaceNDConfig, + castConfig, + ceilConfig, + clipByValueConfig, + complexConfig, + concatConfig, + conv2DConfig, + conv2DBackpropInputConfig, + cosConfig, + coshConfig, + cropAndResizeConfig, + cumprodConfig, + cumsumConfig, + depthToSpaceConfig, + depthwiseConv2dNativeConfig, + einsumConfig, + eluConfig, + equalConfig, + expConfig, + expandDimsConfig, + expm1Config, + fillConfig, + flipLeftRightConfig, + fromPixelsConfig, + floorConfig, + floorDivConfig, + fusedBatchNormConfig, + fusedConv2DConfig, + fusedDepthwiseConv2DConfig, + gatherNdConfig, + gatherV2Config, + greaterConfig, + greaterEqualConfig, + identityConfig, + imagConfig, + leakyReluConfig, + lessConfig, + lessEqualConfig, + logConfig, + logicalAndConfig, + logicalNotConfig, + maxConfig, + maximumConfig, + maxPoolConfig, + meanConfig, + minConfig, + minimumConfig, + mirrorPadConfig, + multiplyConfig, + negConfig, + nonMaxSuppressionV3Config, + nonMaxSuppressionV5Config, + notEqualConfig, + onesLikeConfig, + packConfig, + padV2Config, + powConfig, + preluConfig, + prodConfig, + rangeConfig, + realConfig, + realDivConfig, + reluConfig, + relu6Config, + reshapeConfig, + resizeBilinearConfig, + resizeNearestNeighborConfig, + rotateWithOffsetConfig, + rsqrtConfig, + scatterNdConfig, + selectConfig, + sigmoidConfig, + sinConfig, + sinhConfig, + sliceConfig, + stridedSliceConfig, + stringNGramsConfig, + softmaxConfig, + spaceToBatchNDConfig, + sparseToDenseConfig, + splitVConfig, + sqrtConfig, + squareConfig, + squaredDifferenceConfig, + subConfig, + sumConfig, + tanhConfig, + tileConfig, + topKConfig, + transformConfig, + transposeConfig, + unpackConfig, + zerosLikeConfig + ]; + try { + for (var kernelConfigs_1 = __values(kernelConfigs), kernelConfigs_1_1 = kernelConfigs_1.next(); !kernelConfigs_1_1.done; kernelConfigs_1_1 = kernelConfigs_1.next()) { + var kernelConfig = kernelConfigs_1_1.value; + tf.registerKernel(kernelConfig); + } + } + catch (e_1_1) { e_1 = { error: e_1_1 }; } + finally { + try { + if (kernelConfigs_1_1 && !kernelConfigs_1_1.done && (_a = kernelConfigs_1.return)) _a.call(kernelConfigs_1); + } + finally { if (e_1) throw e_1.error; } + } + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var BufferManager = /** @class */ (function () { + function BufferManager(device) { + this.device = device; + this.numUsedBuffers = 0; + this.numFreeBuffers = 0; + this.freeBuffers = new Map(); + this.usedBuffers = new Map(); + this.numBytesUsed = 0; + this.numBytesAllocated = 0; + } + BufferManager.prototype.acquireUploadBuffer = function (byteSize, usage) { + return this.acquireBuffer(byteSize, usage, true); + }; + BufferManager.prototype.acquireBuffer = function (byteSize, usage, mappedAtCreation) { + if (mappedAtCreation === void 0) { mappedAtCreation = false; } + var key = getBufferKey(byteSize, usage); + if (!this.freeBuffers.has(key)) { + this.freeBuffers.set(key, []); + } + if (!this.usedBuffers.has(key)) { + this.usedBuffers.set(key, []); + } + this.numBytesUsed += byteSize; + this.numUsedBuffers++; + if (this.freeBuffers.get(key).length > 0) { + this.numFreeBuffers--; + var newBuffer_1 = this.freeBuffers.get(key).shift(); + this.usedBuffers.get(key).push(newBuffer_1); + return newBuffer_1; + } + this.numBytesAllocated += byteSize; + var newBuffer = this.device.createBuffer({ mappedAtCreation: mappedAtCreation, size: byteSize, usage: usage }); + this.usedBuffers.get(key).push(newBuffer); + return newBuffer; + }; + BufferManager.prototype.releaseBuffer = function (buffer, byteSize, usage) { + if (this.freeBuffers.size === 0) { + return; + } + var key = getBufferKey(byteSize, usage); + if (!this.freeBuffers.has(key)) { + this.freeBuffers.set(key, []); + } + this.freeBuffers.get(key).push(buffer); + this.numFreeBuffers++; + this.numUsedBuffers--; + var bufferList = this.usedBuffers.get(key); + var bufferIndex = bufferList.indexOf(buffer); + if (bufferIndex < 0) { + throw new Error('Cannot release a buffer that was never provided by this ' + + 'buffer manager'); + } + bufferList.splice(bufferIndex, 1); + this.numBytesUsed -= byteSize; + }; + BufferManager.prototype.releaseUploadBuffer = function (buffer, byteSize, usage) { + var _this = this; + buffer.mapAsync(GPUMapMode.WRITE) + .then(function () { + _this.releaseBuffer(buffer, byteSize, usage); + }, function (err) { + // Do nothing; + }); + }; + BufferManager.prototype.getNumUsedBuffers = function () { + return this.numUsedBuffers; + }; + BufferManager.prototype.getNumFreeBuffers = function () { + return this.numFreeBuffers; + }; + BufferManager.prototype.dispose = function () { + this.freeBuffers.forEach(function (buffers, key) { + buffers.forEach(function (buff) { + buff.destroy(); + }); + }); + this.usedBuffers.forEach(function (buffers, key) { + buffers.forEach(function (buff) { + buff.destroy(); + }); + }); + this.freeBuffers = new Map(); + this.usedBuffers = new Map(); + this.numUsedBuffers = 0; + this.numFreeBuffers = 0; + this.numBytesUsed = 0; + this.numBytesAllocated = 0; + }; + return BufferManager; + }()); + function getBufferKey(byteSize, usage) { + return byteSize + "_" + usage; + } + + /** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var TextureManager = /** @class */ (function () { + function TextureManager(device) { + this.device = device; + this.numUsedTextures = 0; + this.numFreeTextures = 0; + this.freeTextures = new Map(); + this.usedTextures = new Map(); + this.numBytesUsed = 0; + this.numBytesAllocated = 0; + } + TextureManager.prototype.acquireTexture = function (width, height, format, usage) { + var bytesPerElement = getBytesPerElement(format); + var byteSize = width * height * bytesPerElement; + var key = getTextureKey(width, height, format, usage); + if (!this.freeTextures.has(key)) { + this.freeTextures.set(key, []); + } + if (!this.usedTextures.has(key)) { + this.usedTextures.set(key, []); + } + this.numBytesUsed += byteSize; + this.numUsedTextures++; + if (this.freeTextures.get(key).length > 0) { + this.numFreeTextures--; + var newTexture_1 = this.freeTextures.get(key).shift(); + this.usedTextures.get(key).push(newTexture_1); + return newTexture_1; + } + this.numBytesAllocated += byteSize; + var newTexture = this.device.createTexture({ + size: [width, height], + format: format, + usage: usage, + }); + this.usedTextures.get(key).push(newTexture); + return newTexture; + }; + TextureManager.prototype.releaseTexture = function (texture, width, height, format, usage) { + if (this.freeTextures.size === 0) { + return; + } + var key = getTextureKey(width, height, format, usage); + if (!this.freeTextures.has(key)) { + this.freeTextures.set(key, []); + } + this.freeTextures.get(key).push(texture); + this.numFreeTextures++; + this.numUsedTextures--; + var textureList = this.usedTextures.get(key); + var textureIndex = textureList.indexOf(texture); + if (textureIndex < 0) { + throw new Error('Cannot release a texture that was never provided by this ' + + 'texture manager'); + } + textureList.splice(textureIndex, 1); + var bytesPerElement = getBytesPerElement(format); + var byteSize = width * height * bytesPerElement; + this.numBytesUsed -= byteSize; + }; + TextureManager.prototype.getNumUsedTextures = function () { + return this.numUsedTextures; + }; + TextureManager.prototype.getNumFreeTextures = function () { + return this.numFreeTextures; + }; + TextureManager.prototype.dispose = function () { + this.freeTextures.forEach(function (textures, key) { + textures.forEach(function (texture) { + texture.destroy(); + }); + }); + this.usedTextures.forEach(function (textures, key) { + textures.forEach(function (texture) { + texture.destroy(); + }); + }); + this.freeTextures = new Map(); + this.usedTextures = new Map(); + this.numUsedTextures = 0; + this.numFreeTextures = 0; + this.numBytesUsed = 0; + this.numBytesAllocated = 0; + }; + return TextureManager; + }()); + function getTextureKey(width, height, format, usage) { + return width + "_" + height + "_" + format + "_" + usage; + } + function getBytesPerElement(format) { + if (format === 'rgba8unorm') { + return 16; + } + else { + throw new Error(format + " is not supported!"); + } + } + + var makeBindGroup = function (device, bindGroupLayout, inputs, output, uniforms) { + var bindings = __spread([output], inputs); + if (uniforms) { + bindings.push(uniforms); + } + return device.createBindGroup({ + layout: bindGroupLayout, + entries: bindings.map(function (b, i) { return ({ binding: i, resource: b }); }), + }); + }; + var compileProgram = function (device, program, pipelineLayout, inputsData, output, isFromPixel) { + if (isFromPixel === void 0) { isFromPixel = false; } + var outputData = { dtype: output.dtype, shape: output.shape }; + var source = makeShader(inputsData, outputData, program, isFromPixel); + var module = device.createShaderModule({ code: source, label: program.constructor.name }); + var pipeline = device.createComputePipeline({ + layout: pipelineLayout, + compute: { module: module, entryPoint: 'main' }, + label: program.constructor.name + }); + return pipeline; + }; + function makeShaderKey(program, shapes, types, broadcastDimsKey, inputShapesEqualsOutShape) { + if (types === void 0) { types = []; } + if (broadcastDimsKey === void 0) { broadcastDimsKey = ''; } + if (inputShapesEqualsOutShape === void 0) { inputShapesEqualsOutShape = ''; } + var key = program.shaderKey + '_' + + (program.workGroupSize ? program.workGroupSize.join(',') : '') + + shapes.map(function (shape) { return shape.length; }).join(',') + types.join(',') + + program.variableNames.join(',') + broadcastDimsKey + + inputShapesEqualsOutShape; + return key; + } + + // Empirically determined constant used to determine size threshold for handing + // off execution to the CPU. + var CPU_HANDOFF_SIZE_THRESHOLD = tf.env().getNumber('WEBGPU_CPU_HANDOFF_SIZE_THRESHOLD'); + // Reshape dispatch, not to exceed device limits. + var reshapeDispatch = function (device, program) { + var MAX_COMPUTE_PER_DIMENSION_DISPATCH_SIZE = device.limits.maxComputeWorkgroupsPerDimension; + var layout = program['dispatchLayout']; + var dispatch = program['dispatch']; + if (dispatch.every(function (d) { return d <= MAX_COMPUTE_PER_DIMENSION_DISPATCH_SIZE; })) { + return dispatch; + } + tf.util.assert(dispatch[0] > MAX_COMPUTE_PER_DIMENSION_DISPATCH_SIZE && + layout.y === undefined && layout.z === undefined, function () { return 'Dispatch size exceeds WebGPU limits in Y or Z dimension.'; }); + var dispatchAverage = Math.ceil(Math.sqrt(dispatch[0])); + if (dispatchAverage > MAX_COMPUTE_PER_DIMENSION_DISPATCH_SIZE) { + dispatchAverage = Math.ceil(Math.cbrt(dispatch[0])); + tf.util.assert(dispatchAverage <= MAX_COMPUTE_PER_DIMENSION_DISPATCH_SIZE, function () { return 'Total dispatch size exceeds WebGPU maximum.'; }); + return [dispatchAverage, dispatchAverage, dispatchAverage]; + } + else { + return [dispatchAverage, dispatchAverage, 1]; + } + }; + var WebGPUBackend = /** @class */ (function (_super) { + __extends(WebGPUBackend, _super); + function WebGPUBackend(device, supportTimeQuery) { + if (supportTimeQuery === void 0) { supportTimeQuery = false; } + var _this = _super.call(this) || this; + _this.commandQueueOwnedIds = new WeakSet(); + _this.tensorDisposalQueue = []; + _this.uniformDisposalQueue = []; + _this.stagingDisposalQueue = []; + _this.textureDisposalQueue = []; + _this.disposed = false; + _this.uploadWaitMs = 0; + _this.downloadWaitMs = 0; + _this.dispatchNumberInEncoder = 0; + _this.fromPixelTextureLayout = null; + _this.fromPixelImportTextureLayout = null; + if (!isWebGPUSupported()) { + throw new Error('WebGPU is not supported on this device'); + } + _this.layoutCache = {}; + _this.pipelineCache = {}; + _this.device = device; + _this.queue = device.queue; + _this.currentCommandEncoder = null; + _this.currentComputePass = null; + _this.supportTimeQuery = supportTimeQuery; + _this.bufferManager = new BufferManager(_this.device); + _this.textureManager = new TextureManager(_this.device); + _this.tensorMap = new tf.DataStorage(_this, tf.engine()); + if (_this.supportTimeQuery) { + _this.querySet = _this.device.createQuerySet({ + type: 'timestamp', + count: 2, + }); + } + // Profiling tools like PIX needs this dummy canvas to + // trigger capturing a frame. + if (tf.env().getBool('WEBGPU_USE_PROFILE_TOOL')) { + _this.dummyCanvas = document.createElement('canvas'); + _this.dummyCanvas.width = 1; + _this.dummyCanvas.height = 1; + _this.dummyContext = _this.dummyCanvas.getContext('webgpu'); + _this.dummyContext.configure({ + device: device, + format: 'bgra8unorm', + }); + document.body.appendChild(_this.dummyCanvas); + } + return _this; + } + WebGPUBackend.prototype.nextDataId = function () { + return WebGPUBackend.nextDataId++; + }; + WebGPUBackend.prototype.floatPrecision = function () { + return 32; + }; + WebGPUBackend.prototype.defaultGpuBufferUsage = function () { + return GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC | + GPUBufferUsage.COPY_DST; + }; + WebGPUBackend.prototype.flushDisposalQueue = function () { + var _this = this; + this.tensorDisposalQueue.forEach(function (d) { + _this.maybeReleaseBuffer(d); + _this.tensorMap.delete(d); + }); + this.uniformDisposalQueue.forEach(function (d) { return _this.bufferManager.releaseBuffer(d.buffer, d.byteSize, d.usage); }); + this.stagingDisposalQueue.forEach(function (d) { return _this.bufferManager.releaseUploadBuffer(d.buffer, d.byteSize, d.usage); }); + this.textureDisposalQueue.forEach(function (d) { return _this.textureManager.releaseTexture(d.texture, d.width, d.height, d.format, d.usage); }); + this.tensorDisposalQueue = []; + this.uniformDisposalQueue = []; + this.stagingDisposalQueue = []; + this.textureDisposalQueue = []; + }; + /** + * Dispose the memory if the dataId has 0 refCount. Return true if the memory + * is released or memory is not managed in this backend, false if memory is + * not cleared. + * @param dataId + * @oaram force Optional, remove the data regardless of refCount + */ + WebGPUBackend.prototype.disposeData = function (dataId, force) { + if (force === void 0) { force = false; } + if (this.tensorMap.has(dataId)) { + var data = this.tensorMap.get(dataId); + data.refCount--; + if (!force && data.refCount > 0) { + return false; + } + if (this.commandQueueOwnedIds.has(dataId)) { + this.tensorDisposalQueue.push(dataId); + return false; + } + else { + this.maybeReleaseBuffer(dataId); + } + var complexTensorInfos = this.tensorMap.get(dataId).complexTensorInfos; + if (complexTensorInfos != null) { + this.disposeData(complexTensorInfos.real.dataId, true); + this.disposeData(complexTensorInfos.imag.dataId, true); + } + this.tensorMap.delete(dataId); + } + return true; + }; + WebGPUBackend.prototype.memory = function () { + return { + numBytesInGPU: this.bufferManager.numBytesUsed, + numBytesAllocatedInGPU: this.bufferManager.numBytesAllocated, + unreliable: false + }; + }; + WebGPUBackend.prototype.getBufferManager = function () { + return this.bufferManager; + }; + WebGPUBackend.prototype.getTextureManager = function () { + return this.textureManager; + }; + WebGPUBackend.prototype.acquireBuffer = function (byteSize, usage) { + if (usage === void 0) { usage = this.defaultGpuBufferUsage(); } + return this.bufferManager.acquireBuffer(byteSize, usage); + }; + WebGPUBackend.prototype.maybeReleaseBuffer = function (dataId) { + var info = this.tensorMap.get(dataId); + if (info != null && info.bufferInfo.buffer != null) { + this.bufferManager.releaseBuffer(info.bufferInfo.buffer, info.bufferInfo.byteSize, info.bufferInfo.usage); + info.bufferInfo.buffer = null; + } + }; + /** Return refCount of a `TensorData`. */ + WebGPUBackend.prototype.refCount = function (dataId) { + if (this.tensorMap.has(dataId)) { + var tensorData = this.tensorMap.get(dataId); + return tensorData.refCount; + } + return 0; + }; + /** Increase refCount of a `TensorData`. */ + WebGPUBackend.prototype.incRef = function (dataId) { + var tensorData = this.tensorMap.get(dataId); + tensorData.refCount++; + }; + /** Decrease refCount of a `TensorData`. */ + WebGPUBackend.prototype.decRef = function (dataId) { + if (this.tensorMap.has(dataId)) { + var tensorData = this.tensorMap.get(dataId); + tensorData.refCount--; + } + }; + WebGPUBackend.prototype.write = function (values, shape, dtype) { + if (dtype === 'complex64' && values != null) { + throw new Error("Cannot write to a complex64 dtype. " + + "Please use tf.complex(real, imag)."); + } + var dataId = { id: this.nextDataId() }; + var byteSize = tf.util.sizeFromShape(shape) * GPUBytesPerElement(dtype); + this.tensorMap.set(dataId, { + dtype: dtype, + values: values, + bufferInfo: { byteSize: byteSize, usage: this.defaultGpuBufferUsage() }, + refCount: 1 + }); + return dataId; + }; + WebGPUBackend.prototype.move = function (dataId, values, shape, dtype, refCount) { + if (dtype === 'complex64') { + throw new Error("Cannot write to a complex64 dtype. " + + "Please use tf.complex(real, imag)."); + } + var byteSize = tf.util.sizeFromShape(shape) * GPUBytesPerElement(dtype); + this.tensorMap.set(dataId, { + dtype: dtype, + values: values, + bufferInfo: { byteSize: byteSize, usage: this.defaultGpuBufferUsage() }, + refCount: refCount + }); + }; + WebGPUBackend.prototype.submitQueue = function () { + this.ensureComputePassEnded(); + this.queue.submit([this.currentCommandEncoder.finish()]); + this.currentCommandEncoder = null; + this.dispatchNumberInEncoder = 0; + this.commandQueueOwnedIds = new WeakSet(); + this.flushDisposalQueue(); + }; + WebGPUBackend.prototype.getBuffer = function (dataId) { + this.uploadToGPU(dataId); + return this.tensorMap.get(dataId).bufferInfo.buffer; + }; + WebGPUBackend.prototype.ensureCommandEncoderReady = function () { + if (!this.currentCommandEncoder) { + this.currentCommandEncoder = this.device.createCommandEncoder(); + } + }; + WebGPUBackend.prototype.ensureComputePassEnded = function () { + if (this.currentComputePass) { + this.currentComputePass.end(); + this.currentComputePass = null; + } + }; + WebGPUBackend.prototype.getComputePass = function () { + if (!this.currentComputePass) { + this.currentComputePass = this.currentCommandEncoder.beginComputePass(); + } + return this.currentComputePass; + }; + WebGPUBackend.prototype.getBufferData = function (info) { + return __awaiter(this, void 0, void 0, function () { + var staging, values; + return __generator(this, function (_b) { + switch (_b.label) { + case 0: + if (info.values != null) { + // Data is on the CPU. + return [2 /*return*/, info.values]; + } + staging = this.acquireBuffer(info.bufferInfo.byteSize, GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ); + this.ensureCommandEncoderReady(); + this.ensureComputePassEnded(); + this.currentCommandEncoder.copyBufferToBuffer(info.bufferInfo.buffer, 0, staging, 0, info.bufferInfo.byteSize); + this.submitQueue(); + return [4 /*yield*/, staging.mapAsync(GPUMapMode.READ)]; + case 1: + _b.sent(); + values = staging.getMappedRange().slice(0); + staging.unmap(); + if (staging != null) { + this.bufferManager.releaseBuffer(staging, info.bufferInfo.byteSize, GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ); + } + // Need to get texture from swapChain to enable profiling tool + // to capture a frame + if (tf.env().getBool('WEBGPU_USE_PROFILE_TOOL')) { + tf.util.assert(this.dummyContext !== undefined, function () { return "Fail to get context for profiling tool"; }); + this.dummyContext.getCurrentTexture(); + } + return [2 /*return*/, values]; + } + }); + }); + }; + WebGPUBackend.prototype.convertAndCacheOnCPU = function (dataId, data) { + var info = this.tensorMap.get(dataId); + this.maybeReleaseBuffer(dataId); + info.values = data; + return info.values; + }; + // TODO: Remove once this is fixed: + // https://github.com/tensorflow/tfjs/issues/1595 + WebGPUBackend.prototype.readSync = function (dataId) { + var texData = this.tensorMap.get(dataId); + var values = texData.values; + if (values == null) { + throw new Error('WebGPU readSync is only available for CPU-resident tensors.'); + } + return values; + }; + WebGPUBackend.prototype.read = function (dataId) { + return __awaiter(this, void 0, void 0, function () { + var info, values, vals, ps, realValues, imagValues, data; + return __generator(this, function (_b) { + switch (_b.label) { + case 0: + if (!this.tensorMap.has(dataId)) { + throw new Error("Tensor " + dataId + " was not registered!"); + } + info = this.tensorMap.get(dataId); + values = info.values; + if (values != null) { + // TODO(xing.xu@intel.com): Merge backend_util.BackendValues and + // backend_util.TypedArray. + return [2 /*return*/, this.convertAndCacheOnCPU(dataId, values)]; + } + if (!(info.dtype === 'complex64')) return [3 /*break*/, 2]; + return [4 /*yield*/, Promise.all([ + this.read(info.complexTensorInfos.real.dataId), + this.read(info.complexTensorInfos.imag.dataId) + ])]; + case 1: + ps = _b.sent(); + realValues = ps[0]; + imagValues = ps[1]; + vals = tf.backend_util.mergeRealAndImagArrays(realValues, imagValues); + return [3 /*break*/, 4]; + case 2: return [4 /*yield*/, this.getBufferData(info)]; + case 3: + data = _b.sent(); + vals = + ArrayBufferToTypedArray(data, info.dtype); + _b.label = 4; + case 4: + this.convertAndCacheOnCPU(dataId, vals); + return [2 /*return*/, vals]; + } + }); + }); + }; + WebGPUBackend.prototype.bufferSync = function (t) { + var data = this.readSync(t.dataId); + var decodedData = data; + if (t.dtype === 'string') { + try { + // Decode the bytes into string. + decodedData = data.map(function (d) { return tf.util.decodeString(d); }); + } + catch (_a) { + throw new Error('Failed to decode encoded string bytes into utf-8'); + } + } + return tf.buffer(t.shape, t.dtype, decodedData); + }; + WebGPUBackend.prototype.time = function (f) { + return __awaiter(this, void 0, void 0, function () { + var oldActiveTimers, newActiveTimers, outerMostTime, flattenedActiveTimerQueries, flattenedActiveTimerNames, res, kernelMs; + return __generator(this, function (_b) { + switch (_b.label) { + case 0: + oldActiveTimers = this.activeTimers; + newActiveTimers = []; + outerMostTime = false; + if (this.programTimersStack == null) { + this.programTimersStack = newActiveTimers; + outerMostTime = true; + } + else { + this.activeTimers.push(newActiveTimers); + } + this.activeTimers = newActiveTimers; + f(); + flattenedActiveTimerQueries = tf.util.flatten(this.activeTimers.map(function (d) { return d.query; })) + .filter(function (d) { return d != null; }); + flattenedActiveTimerNames = tf.util.flatten(this.activeTimers.map(function (d) { return d.name; })) + .filter(function (d) { return d != null; }); + this.activeTimers = oldActiveTimers; + if (outerMostTime) { + this.programTimersStack = null; + } + res = { + uploadWaitMs: this.uploadWaitMs, + downloadWaitMs: this.downloadWaitMs, + kernelMs: null, + wallMs: null + }; + return [4 /*yield*/, Promise.all(flattenedActiveTimerQueries)]; + case 1: + kernelMs = _b.sent(); + res['kernelMs'] = tf.util.sum(kernelMs); + res['getExtraProfileInfo'] = function () { return kernelMs.map(function (d, i) { return ({ name: flattenedActiveTimerNames[i], ms: d }); }) + .map(function (d) { return d.name + ": " + d.ms; }) + .join(', '); }; + this.uploadWaitMs = 0; + this.downloadWaitMs = 0; + return [2 /*return*/, res]; + } + }); + }); + }; + WebGPUBackend.prototype.getAndSavePipeline = function (key, getPipeline) { + if (!(key in this.pipelineCache)) { + this.pipelineCache[key] = getPipeline(); + } + return this.pipelineCache[key]; + }; + WebGPUBackend.prototype.makeTensorInfo = function (shape, dtype, values) { + var dataId; + if (dtype === 'string' && values != null && values.length > 0 && + tf.util.isString(values[0])) { + var encodedValues = values.map(function (d) { return tf.util.encodeString(d); }); + dataId = this.write(encodedValues, shape, dtype); + } + else { + dataId = this.write(values, shape, dtype); + } + return { dataId: dataId, shape: shape, dtype: dtype }; + }; + WebGPUBackend.prototype.tensorToBinding = function (tensor) { + if (!tensor) { + return null; + } + var tensorData = this.tensorMap.get(tensor.dataId); + return { + offset: 0, + size: tensorData.bufferInfo.byteSize, + buffer: tensorData.bufferInfo.buffer + }; + }; + WebGPUBackend.prototype.getQueryTime = function (query) { + return __awaiter(this, void 0, void 0, function () { + return __generator(this, function (_b) { + if (this.supportTimeQuery) { + return [2 /*return*/, this.getTimeFromQuerySet(query)]; + } + else { + return [2 /*return*/, 0]; + } + }); + }); + }; + WebGPUBackend.prototype.uploadToGPU = function (dataId) { + var info = this.tensorMap.get(dataId); + if (info.bufferInfo.buffer != null) { + // Already on the GPU. + return; + } + info.bufferInfo.buffer = this.acquireBuffer(info.bufferInfo.byteSize); + if (info.values) { + var stagingBuffer = this.bufferManager.acquireUploadBuffer(info.bufferInfo.byteSize, GPUBufferUsage.MAP_WRITE | GPUBufferUsage.COPY_SRC); + var arrayBuffer = stagingBuffer.getMappedRange(); + if (info.dtype === 'int32' || info.dtype === 'bool') { + new Int32Array(arrayBuffer).set(info.values); + } + else { + new Float32Array(arrayBuffer).set(info.values); + } + stagingBuffer.unmap(); + this.ensureCommandEncoderReady(); + this.ensureComputePassEnded(); + this.currentCommandEncoder.copyBufferToBuffer(stagingBuffer, 0, info.bufferInfo.buffer, 0, info.bufferInfo.byteSize); + var stagingInfo = { + byteSize: info.bufferInfo.byteSize, + usage: GPUBufferUsage.MAP_WRITE | GPUBufferUsage.COPY_SRC, + buffer: stagingBuffer + }; + this.stagingDisposalQueue.push(stagingInfo); + // TODO: WebGPU doesn't support read data synchronously from GPU to CPU. + // So it will report error when switching backend from WebGPU to others. + // There are two situations: 1) swithcing the backend after running a + // model; 2) swithcing the backend within the model. Temporarilly keep the + // values on CPU to solve the first issue. + // info.values = null; + } + }; + WebGPUBackend.prototype.makeUniforms = function (uniformsWithType) { + var currentOffset = 0; + var preLength = 0; + var offsets = []; + uniformsWithType.forEach(function (d) { + if (d.data.length === 0) { + d.data = [1]; + } + // https://www.w3.org/TR/WGSL/#alignof + var baseAlignment; + switch (d.data.length) { + case 1: + baseAlignment = 4; + break; + case 2: + baseAlignment = 8; + break; + case 3: + baseAlignment = 16; + break; + case 4: + baseAlignment = 16; + break; + case 5: + baseAlignment = 16; + break; + case 6: + baseAlignment = 16; + break; + default: + tf.util.assert(false, function () { return "Unsupported " + d.data.length + "D shape"; }); + } + if (preLength === 5 || preLength === 6) { + baseAlignment = 16; + } + currentOffset = Math.ceil(currentOffset / baseAlignment) * baseAlignment; + preLength = d.data.length; + offsets.push(currentOffset); + currentOffset += d.data.length * 4; + }); + var arrayBuffer = new ArrayBuffer(currentOffset); + uniformsWithType.forEach(function (d, i) { + var offset = offsets[i]; + if (d.type === 'int32') { + new Int32Array(arrayBuffer, offset, d.data.length).set(d.data); + } + else if (d.type === 'uint32') { + new Uint32Array(arrayBuffer, offset, d.data.length).set(d.data); + } + else { + new Float32Array(arrayBuffer, offset, d.data.length).set(d.data); + } + }); + var uniformBuffer = this.acquireBuffer(currentOffset, GPUBufferUsage.COPY_DST | GPUBufferUsage.UNIFORM); + this.queue.writeBuffer(uniformBuffer, 0, arrayBuffer, 0, currentOffset); + var uniformInfo = { + byteSize: currentOffset, + usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.UNIFORM, + buffer: uniformBuffer + }; + this.uniformDisposalQueue.push(uniformInfo); + return { offset: 0, size: currentOffset, buffer: uniformBuffer }; + }; + // This layout is used by all programs except fromPixel. + WebGPUBackend.prototype.createLayout = function (inputEntrySize) { + var bindGroupLayoutEntries = []; + // Output buffer binding layout. + bindGroupLayoutEntries.push({ + binding: 0, + visibility: GPUShaderStage.COMPUTE, + buffer: { type: 'storage' } + }); + // Input buffer binding layout. Depends on variableNames length. + for (var i = 0; i < inputEntrySize; i++) { + bindGroupLayoutEntries.push({ + binding: i + 1, + visibility: GPUShaderStage.COMPUTE, + buffer: { type: 'read-only-storage' } + }); + } + bindGroupLayoutEntries.push({ + binding: inputEntrySize + 1, + visibility: GPUShaderStage.COMPUTE, + buffer: { type: 'uniform' } + }); + var bindGroupLayout = this.device.createBindGroupLayout({ entries: bindGroupLayoutEntries }); + var pipelineLayout = this.device.createPipelineLayout({ bindGroupLayouts: [bindGroupLayout] }); + return { bindGroupLayout: bindGroupLayout, pipelineLayout: pipelineLayout }; + }; + WebGPUBackend.prototype.getCachedOrCreateLayout = function (inputEntrySize) { + if (!(inputEntrySize in this.layoutCache)) { + this.layoutCache[inputEntrySize] = this.createLayout(inputEntrySize); + } + return this.layoutCache[inputEntrySize]; + }; + WebGPUBackend.prototype.runWebGPUProgram = function (program, inputs, outputDtype, programUniforms, output) { + var _this = this; + if (!output) { + output = this.makeTensorInfo(program.outputShape, outputDtype); + if (tf.util.sizeFromShape(output.shape) === 0) { + // Short-circuit the computation since the result is empty (has 0 in its + // shape). + var outData = this.tensorMap.get(output.dataId); + outData.values = + tf.util.getTypedArrayFromDType(output.dtype, 0); + return output; + } + this.uploadToGPU(output.dataId); + } + program.dispatch = reshapeDispatch(this.device, program); + // There are five kinds of uniforms: NAN, shapes, shape strides, program + // size, program defined uniforms. + var uniformsWithType = [{ type: 'float32', data: [NaN] }]; + var bufferShapes = inputs.concat(output).map(function (d) { return d.shape; }); + var uniformsType = 'int32'; + bufferShapes.map(function (d) { + uniformsWithType.push({ type: uniformsType, data: d }); + }); + var strides = tf.util.computeStrides(output.shape); + uniformsWithType.push({ type: uniformsType, data: strides }); + if (program.size) { + var size = tf.util.sizeFromShape(program.outputShape); + uniformsWithType.push({ type: uniformsType, data: [program.isVec4 ? size / 4 : size] }); + } + if (programUniforms) { + uniformsWithType = __spread(uniformsWithType, programUniforms); + } + var uniforms = this.makeUniforms(uniformsWithType); + var inputsData = inputs.map(function (input, i) { + if (input.dtype === 'complex64') { + throw new Error("GPGPUProgram does not support complex64 input. For complex64 " + + "dtypes, please separate the program into real and imaginary " + + "parts."); + } + _this.uploadToGPU(input.dataId); + return { + // Returning dtype from tensorMap because it reflects dtype + // of underlying buffer, rather than abstract dtype. + dtype: _this.tensorMap.get(input.dataId).dtype, + shape: input.shape, + name: program.variableNames[i] + }; + }); + var bufferTypes = inputsData.map(function (d) { return d.dtype; }).concat(output.dtype); + var broadcastDims = inputsData.map(function (d) { return tf.backend_util.getBroadcastDims(d.shape, output.shape); }); + var inputShapesEqualsOutShape = inputsData.map(function (d) { return tf.util.arraysEqual(d.shape, output.shape); }).join('_'); + var broadcastDimsKey = broadcastDims.map(function (d) { return d.join('_'); }).join(';'); + var key = makeShaderKey(program, bufferShapes, bufferTypes, broadcastDimsKey, inputShapesEqualsOutShape); + var _b = this.getCachedOrCreateLayout(program.variableNames.length), bindGroupLayout = _b.bindGroupLayout, pipelineLayout = _b.pipelineLayout; + var pipeline = this.getAndSavePipeline(key, function () { + return compileProgram(_this.device, program, pipelineLayout, inputsData, output); + }); + var shouldTimeProgram = this.activeTimers != null; + // Creating bind groups on the fly should never be a bottleneck. + var bg = makeBindGroup(this.device, bindGroupLayout, inputs.map(function (t) { return _this.tensorToBinding(t); }), this.tensorToBinding(output), uniforms); + this.ensureCommandEncoderReady(); + var pass = this.getComputePass(); + if (shouldTimeProgram) { + if (this.supportTimeQuery) { + // tslint:disable-next-line:no-any + pass.writeTimestamp(this.querySet, 0); + } + } + pass.setPipeline(pipeline); + pass.setBindGroup(0, bg); + pass.dispatch(program.dispatch[0], program.dispatch[1], program.dispatch[2]); + if (shouldTimeProgram) { + if (this.supportTimeQuery) { + // tslint:disable-next-line:no-any + pass.writeTimestamp(this.querySet, 1); + } + } + this.dispatchNumberInEncoder++; + inputs.forEach(function (input) { + _this.commandQueueOwnedIds.add(input.dataId); + }); + this.commandQueueOwnedIds.add(output.dataId); + if (tf.env().get('WEBGPU_DEFERRED_SUBMIT_BATCH_SIZE') <= this.dispatchNumberInEncoder) { + this.submitQueue(); + } + if (shouldTimeProgram) { + this.activeTimers.push({ + name: program.constructor.name, + query: this.getQueryTime(this.querySet) + }); + } + return output; + }; + WebGPUBackend.prototype.getFromPixelTextureLayout = function (useImport) { + if (useImport) { + if (this.fromPixelImportTextureLayout === null) { + this.fromPixelImportTextureLayout = + this.createFromPixelTextureLayout(true /* useImport */); + } + return this.fromPixelImportTextureLayout; + } + if (this.fromPixelTextureLayout === null) { + this.fromPixelTextureLayout = + this.createFromPixelTextureLayout(false /* useImport */); + } + return this.fromPixelTextureLayout; + }; + WebGPUBackend.prototype.createFromPixelTextureLayout = function (useImport) { + var bindGroupLayoutEntries = []; + // Output buffer binding layout. + bindGroupLayoutEntries.push({ + binding: 0, + visibility: GPUShaderStage.COMPUTE, + buffer: { type: 'storage' } + }); + // Input texture binding layout. + if (useImport) { + bindGroupLayoutEntries.push({ + binding: 1, + visibility: GPUShaderStage.COMPUTE, + externalTexture: {}, + }); + } + else { + bindGroupLayoutEntries.push({ binding: 1, visibility: GPUShaderStage.COMPUTE, texture: {} }); + } + // Uniform buffer binding layout. + bindGroupLayoutEntries.push({ binding: 2, visibility: GPUShaderStage.COMPUTE, buffer: {} }); + var fromPixelBindGroupLayout = this.device.createBindGroupLayout({ entries: bindGroupLayoutEntries }); + var fromPixelPipelineLayout = this.device.createPipelineLayout({ bindGroupLayouts: [fromPixelBindGroupLayout] }); + return { + bindGroupLayout: fromPixelBindGroupLayout, + pipelineLayout: fromPixelPipelineLayout + }; + }; + WebGPUBackend.prototype.copyExternalImageToTexture = function (externalImage, outShape) { + var textureUsage = GPUTextureUsage.COPY_DST | + GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING; + var textureFormat = 'rgba8unorm'; + var texture = this.textureManager.acquireTexture(outShape[1], outShape[0], textureFormat, textureUsage); + var externalResource = texture.createView(); + this.queue.copyExternalImageToTexture({ source: externalImage }, { texture: texture }, [outShape[1], outShape[0]]); + var textureInfo = { + width: outShape[1], + height: outShape[0], + format: textureFormat, + usage: textureUsage, + texture: texture + }; + this.textureDisposalQueue.push(textureInfo); + return externalResource; + }; + WebGPUBackend.prototype.runFromPixelsProgram = function (program, outShape, programUniforms, useImport, externalImage) { + var _this = this; + program.dispatch = reshapeDispatch(this.device, program); + var output = this.makeTensorInfo(outShape, 'int32'); + if (tf.util.sizeFromShape(output.shape) === 0) { + // Short-circuit the computation since the result is empty (has 0 in its + // shape). + var outData = this.tensorMap.get(output.dataId); + outData.values = + tf.util.getTypedArrayFromDType(output.dtype, 0); + return output; + } + this.uploadToGPU(output.dataId); + var key = makeShaderKey(program, [output.shape]); + var layout = this.getFromPixelTextureLayout(useImport); + var pipeline = this.getAndSavePipeline(key, function () { + return compileProgram(_this.device, program, layout.pipelineLayout, [], output, true); + }); + var externalResource; + if (useImport) { + var externalTextureDescriptor = { + source: externalImage + }; + externalResource = + this.device.importExternalTexture(externalTextureDescriptor); + } + else { + externalResource = this.copyExternalImageToTexture(externalImage, output.shape); + } + var binding = this.tensorToBinding(output); + var uniforms = this.makeUniforms(programUniforms); + var bindGroup = this.device.createBindGroup({ + layout: layout.bindGroupLayout, + entries: [ + { + binding: 0, + resource: { + // tslint:disable-next-line: no-unnecessary-type-assertion + buffer: binding.buffer, + } + }, + { + binding: 1, + resource: externalResource, + }, + { + binding: 2, + resource: { + // tslint:disable-next-line: no-unnecessary-type-assertion + buffer: uniforms.buffer, + } + } + ], + }); + this.ensureCommandEncoderReady(); + var pass = this.getComputePass(); + var shouldTimeProgram = this.activeTimers != null; + if (shouldTimeProgram) { + if (this.supportTimeQuery) { + // tslint:disable-next-line:no-any + pass.writeTimestamp(this.querySet, 0); + } + } + pass.setPipeline(pipeline); + pass.setBindGroup(0, bindGroup); + pass.dispatch(program.dispatch[0], program.dispatch[1], program.dispatch[2]); + if (shouldTimeProgram) { + if (this.supportTimeQuery) { + // tslint:disable-next-line:no-any + pass.writeTimestamp(this.querySet, 1); + } + } + this.commandQueueOwnedIds.add(output.dataId); + this.dispatchNumberInEncoder++; + if (tf.env().get('WEBGPU_DEFERRED_SUBMIT_BATCH_SIZE') <= this.dispatchNumberInEncoder) { + this.submitQueue(); + } + if (shouldTimeProgram) { + this.activeTimers.push({ + name: program.constructor.name, + query: this.getQueryTime(this.querySet) + }); + } + return output; + }; + WebGPUBackend.prototype.getTimeFromQuerySet = function (querySet) { + return __awaiter(this, void 0, void 0, function () { + var queryBuffer, dst, arrayBuf, timeElapsedNanos; + return __generator(this, function (_b) { + switch (_b.label) { + case 0: + queryBuffer = this.acquireBuffer(16, GPUBufferUsage.COPY_SRC | GPUBufferUsage.QUERY_RESOLVE); + dst = this.acquireBuffer(16, GPUBufferUsage.MAP_READ | GPUBufferUsage.COPY_DST); + this.ensureCommandEncoderReady(); + this.ensureComputePassEnded(); + this.currentCommandEncoder.resolveQuerySet(querySet, 0, 2, queryBuffer, 0); + this.currentCommandEncoder.copyBufferToBuffer(queryBuffer, 0, dst, 0, 16); + this.submitQueue(); + return [4 /*yield*/, dst.mapAsync(GPUMapMode.READ)]; + case 1: + _b.sent(); + arrayBuf = new BigUint64Array(dst.getMappedRange()); + timeElapsedNanos = Number((arrayBuf[1] - arrayBuf[0])); + dst.unmap(); + this.bufferManager.releaseBuffer(dst, 16, GPUBufferUsage.MAP_READ | GPUBufferUsage.COPY_DST); + this.bufferManager.releaseBuffer(queryBuffer, 16, GPUBufferUsage.COPY_SRC | GPUBufferUsage.QUERY_RESOLVE); + // Return milliseconds. + return [2 /*return*/, timeElapsedNanos / 1000000]; + } + }); + }); + }; + WebGPUBackend.prototype.shouldExecuteOnCPU = function (inputs, sizeThreshold) { + var _this = this; + if (sizeThreshold === void 0) { sizeThreshold = CPU_HANDOFF_SIZE_THRESHOLD; } + return tf.env().getBool('WEBGPU_CPU_FORWARD') && + inputs.every(function (input) { return _this.tensorMap.get(input.dataId).bufferInfo.buffer == null && + tf.util.sizeFromShape(input.shape) < sizeThreshold; }); + }; + WebGPUBackend.prototype.numDataIds = function () { + return this.tensorMap.numDataIds() - this.tensorDisposalQueue.length; + }; + WebGPUBackend.prototype.dispose = function () { + if (this.disposed) { + return; + } + this.bufferManager.dispose(); + this.textureManager.dispose(); + this.disposed = true; + }; + return WebGPUBackend; + }(tf.KernelBackend)); + WebGPUBackend.nextDataId = 0; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + + var webgpu = { + __proto__: null, + webgpu_util: webgpu_util, + WebGPUBackend: WebGPUBackend + }; + + var _this = undefined; + if (isWebGPUSupported()) { + tf.registerBackend('webgpu', function () { return __awaiter(_this, void 0, void 0, function () { + var gpuDescriptor, adapter, adapterLimits, deviceDescriptor, supportTimeQuery, device; + return __generator(this, function (_a) { + switch (_a.label) { + case 0: + // Remove it once we figure out how to correctly read the tensor data + // before the tensor is disposed in profiling mode. + tf.env().set('CHECK_COMPUTATION_FOR_ERRORS', false); + gpuDescriptor = { + powerPreference: tf.env().get('WEBGPU_USE_LOW_POWER_GPU') ? + 'low-power' : + 'high-performance' + }; + return [4 /*yield*/, navigator.gpu.requestAdapter(gpuDescriptor)]; + case 1: + adapter = _a.sent(); + adapterLimits = adapter.limits; + deviceDescriptor = {}; + supportTimeQuery = adapter.features.has('timestamp-query'); + deviceDescriptor.requiredLimits = { + 'maxComputeWorkgroupStorageSize': adapterLimits.maxComputeWorkgroupStorageSize, + 'maxComputeWorkgroupsPerDimension': adapterLimits.maxComputeWorkgroupsPerDimension, + }; + if (supportTimeQuery) { + deviceDescriptor.requiredFeatures = ['timestamp-query']; + } + else { + console.warn("This device doesn't support timestamp-query extension. " + + "Start Chrome browser with flag " + + "--disable-dawn-features=disallow_unsafe_apis then try again. " + + "Or zero will shown for the kernel time when profiling mode is" + + "enabled. Using performance.now is not workable for webgpu since" + + "it doesn't support synchronously to read data from GPU."); + } + return [4 /*yield*/, adapter.requestDevice(deviceDescriptor)]; + case 2: + device = _a.sent(); + return [2 /*return*/, new WebGPUBackend(device, supportTimeQuery)]; + } + }); + }); }, 3 /*priority*/); + } + + exports.webgpu = webgpu; + + Object.defineProperty(exports, '__esModule', { value: true }); + +}))); +//# sourceMappingURL=tf-backend-webgpu.js.map diff --git a/src/js/third_party/tfjs/tf-backend-webgpu.js.map b/src/js/third_party/tfjs/tf-backend-webgpu.js.map new file mode 100644 index 000000000..26e2ce030 --- /dev/null +++ b/src/js/third_party/tfjs/tf-backend-webgpu.js.map @@ -0,0 +1 @@ +{"version":3,"file":"tf-backend-webgpu.js","sources":["../../../../node_modules/tslib/tslib.es6.js","../../../../tfjs-backend-webgpu/src/flags_webgpu.ts","../../../../tfjs-backend-webgpu/src/binary_op_util.ts","../../../../tfjs-backend-webgpu/src/unary_op_util.ts","../../../../tfjs-backend-webgpu/src/activation_util.ts","../../../../tfjs-backend-webgpu/src/shader_util.ts","../../../../tfjs-backend-webgpu/src/shader_preprocessor.ts","../../../../tfjs-backend-webgpu/src/webgpu_util.ts","../../../../tfjs-backend-webgpu/src/matmul_packed_vec4_webgpu.ts","../../../../tfjs-backend-webgpu/src/matmul_packed_webgpu.ts","../../../../tfjs-backend-webgpu/src/matmul_reduce.ts","../../../../tfjs-backend-webgpu/src/matmul_small_output_size_webgpu.ts","../../../../tfjs-backend-webgpu/src/kernels/Reshape.ts","../../../../tfjs-backend-webgpu/src/kernels/BatchMatMul_impl.ts","../../../../tfjs-backend-webgpu/src/kernels/_FusedMatMul.ts","../../../../tfjs-backend-webgpu/src/binary_op_complex_webgpu.ts","../../../../tfjs-backend-webgpu/src/binary_op_shared_webgpu.ts","../../../../tfjs-backend-webgpu/src/binary_op_vec4_webgpu.ts","../../../../tfjs-backend-webgpu/src/binary_op_webgpu.ts","../../../../tfjs-backend-webgpu/src/binary_ops.ts","../../../../tfjs-backend-webgpu/src/kernels/Identity.ts","../../../../tfjs-backend-webgpu/src/kernels/Complex.ts","../../../../tfjs-backend-webgpu/src/unary_op_webgpu.ts","../../../../tfjs-backend-webgpu/src/kernel_utils/kernel_funcs_utils.ts","../../../../../tfjs-backend-cpu/src/kernels/Abs.ts","../../../../../tfjs-backend-cpu/src/utils/binary_impl.ts","../../../../../tfjs-backend-cpu/src/kernels/Add.ts","../../../../../tfjs-backend-cpu/src/utils/unary_impl.ts","../../../../../tfjs-backend-cpu/src/kernels/Ceil.ts","../../../../../tfjs-backend-cpu/src/kernels/Concat_impl.ts","../../../../../tfjs-backend-cpu/src/kernels/Equal.ts","../../../../../tfjs-backend-cpu/src/kernels/Exp.ts","../../../../../tfjs-backend-cpu/src/kernels/Expm1.ts","../../../../../tfjs-backend-cpu/src/kernels/Floor.ts","../../../../../tfjs-backend-cpu/src/kernels/GatherNd_Impl.ts","../../../../../tfjs-backend-cpu/src/kernels/GatherV2_impl.ts","../../../../../tfjs-backend-cpu/src/kernels/Greater.ts","../../../../../tfjs-backend-cpu/src/kernels/GreaterEqual.ts","../../../../../tfjs-backend-cpu/src/kernels/Less.ts","../../../../../tfjs-backend-cpu/src/kernels/LessEqual.ts","../../../../../tfjs-backend-cpu/src/kernels/Log.ts","../../../../../tfjs-backend-cpu/src/kernels/Max_impl.ts","../../../../../tfjs-backend-cpu/src/kernels/Maximum.ts","../../../../../tfjs-backend-cpu/src/kernels/Minimum.ts","../../../../../tfjs-backend-cpu/src/kernels/Multiply.ts","../../../../../tfjs-backend-cpu/src/kernels/Neg.ts","../../../../../tfjs-backend-cpu/src/kernels/NotEqual.ts","../../../../../tfjs-backend-cpu/src/kernels/Transpose_impl.ts","../../../../../tfjs-backend-cpu/src/kernels/Prod.ts","../../../../../tfjs-backend-cpu/src/kernels/Range_impl.ts","../../../../../tfjs-backend-cpu/src/kernels/Rsqrt.ts","../../../../../tfjs-backend-cpu/src/kernels/Slice.ts","../../../../../tfjs-backend-cpu/src/kernels/StridedSlice_impl.ts","../../../../../tfjs-backend-cpu/src/kernels/StringNGrams_impl.ts","../../../../../tfjs-backend-cpu/src/kernels/Sub.ts","../../../../../tfjs-backend-cpu/src/kernels/Tile_impl.ts","../../../../../tfjs-backend-cpu/src/kernels/TopK_impl.ts","../../../../tfjs-backend-webgpu/src/kernel_utils/shared.ts","../../../../tfjs-backend-webgpu/src/kernels/Abs.ts","../../../../tfjs-backend-webgpu/src/kernels/Add.ts","../../../../tfjs-backend-webgpu/src/addn_packed_webgpu.ts","../../../../tfjs-backend-webgpu/src/kernels/AddN.ts","../../../../tfjs-backend-webgpu/src/argminmax_webgpu.ts","../../../../tfjs-backend-webgpu/src/transpose_shared_webgpu.ts","../../../../tfjs-backend-webgpu/src/transpose_webgpu.ts","../../../../tfjs-backend-webgpu/src/kernels/Transpose.ts","../../../../tfjs-backend-webgpu/src/kernels/ArgMax.ts","../../../../tfjs-backend-webgpu/src/kernels/ArgMin.ts","../../../../tfjs-backend-webgpu/src/pool2d_webgpu.ts","../../../../tfjs-backend-webgpu/src/pool_filtersizeone_webgpu.ts","../../../../tfjs-backend-webgpu/src/kernels/AvgPool.ts","../../../../tfjs-backend-webgpu/src/kernels/BatchMatMul.ts","../../../../tfjs-backend-webgpu/src/slice_webgpu.ts","../../../../tfjs-backend-webgpu/src/kernels/Slice.ts","../../../../tfjs-backend-webgpu/src/kernels/BatchToSpaceND.ts","../../../../tfjs-backend-webgpu/src/kernels/NotEqual.ts","../../../../tfjs-backend-webgpu/src/kernels/Real.ts","../../../../tfjs-backend-webgpu/src/kernel_utils/int.ts","../../../../tfjs-backend-webgpu/src/kernels/Cast.ts","../../../../tfjs-backend-webgpu/src/kernels/Ceil.ts","../../../../tfjs-backend-webgpu/src/clip_vec4_webgpu.ts","../../../../tfjs-backend-webgpu/src/clip_webgpu.ts","../../../../tfjs-backend-webgpu/src/kernels/ClipByValue.ts","../../../../tfjs-backend-webgpu/src/concat_webgpu.ts","../../../../tfjs-backend-webgpu/src/kernels/Imag.ts","../../../../tfjs-backend-webgpu/src/kernels/Concat_impl.ts","../../../../tfjs-backend-webgpu/src/kernels/Concat.ts","../../../../tfjs-backend-webgpu/src/conv2d_mm_vec4_webgpu.ts","../../../../tfjs-backend-webgpu/src/conv2d_mm_webgpu.ts","../../../../tfjs-backend-webgpu/src/conv2d_naive_webgpu.ts","../../../../tfjs-backend-webgpu/src/im2col_webgpu.ts","../../../../tfjs-backend-webgpu/src/kernels/Conv2D_impl.ts","../../../../tfjs-backend-webgpu/src/kernels/Conv2D.ts","../../../../tfjs-backend-webgpu/src/conv_backprop_mm_webgpu.ts","../../../../tfjs-backend-webgpu/src/conv_backprop_webgpu.ts","../../../../tfjs-backend-webgpu/src/kernels/Conv2DBackpropInput.ts","../../../../tfjs-backend-webgpu/src/kernels/Cos.ts","../../../../tfjs-backend-webgpu/src/kernels/Cosh.ts","../../../../tfjs-backend-webgpu/src/crop_and_resize_webgpu.ts","../../../../tfjs-backend-webgpu/src/kernels/CropAndResize.ts","../../../../tfjs-backend-webgpu/src/cum_webgpu.ts","../../../../tfjs-backend-webgpu/src/kernels/Cum_impl.ts","../../../../tfjs-backend-webgpu/src/kernels/Cumprod.ts","../../../../tfjs-backend-webgpu/src/kernels/Cumsum.ts","../../../../tfjs-backend-webgpu/src/depth_to_space_webgpu.ts","../../../../tfjs-backend-webgpu/src/kernels/DepthToSpace.ts","../../../../tfjs-backend-webgpu/src/depthwise_conv2d_3x3_webgpu.ts","../../../../tfjs-backend-webgpu/src/depthwise_conv2d_webgpu.ts","../../../../tfjs-backend-webgpu/src/kernels/DepthwiseConv2dNative.ts","../../../../tfjs-backend-webgpu/src/kernels/Multiply.ts","../../../../tfjs-backend-webgpu/src/reduce_webgpu.ts","../../../../tfjs-backend-webgpu/src/kernel_utils/reduce.ts","../../../../tfjs-backend-webgpu/src/kernels/Sum.ts","../../../../tfjs-backend-webgpu/src/kernels/Einsum.ts","../../../../tfjs-backend-webgpu/src/kernels/Elu.ts","../../../../tfjs-backend-webgpu/src/kernels/Equal.ts","../../../../tfjs-backend-webgpu/src/kernels/Exp.ts","../../../../tfjs-backend-webgpu/src/kernels/ExpandDims.ts","../../../../tfjs-backend-webgpu/src/kernels/Expm1.ts","../../../../tfjs-backend-webgpu/src/fill_webgpu.ts","../../../../tfjs-backend-webgpu/src/kernels/Fill.ts","../../../../tfjs-backend-webgpu/src/flip_left_right_webgpu.ts","../../../../tfjs-backend-webgpu/src/kernels/FlipLeftRight.ts","../../../../tfjs-backend-webgpu/src/kernels/Floor.ts","../../../../tfjs-backend-webgpu/src/kernels/FloorDiv.ts","../../../../tfjs-backend-webgpu/src/from_pixels_webgpu.ts","../../../../tfjs-backend-webgpu/src/kernels/FromPixels.ts","../../../../tfjs-backend-webgpu/src/batchnorm_webgpu.ts","../../../../tfjs-backend-webgpu/src/kernels/FusedBatchNorm.ts","../../../../tfjs-backend-webgpu/src/kernels/FusedConv2D.ts","../../../../tfjs-backend-webgpu/src/kernels/FusedDepthwiseConv2D.ts","../../../../tfjs-backend-webgpu/src/gather_nd_webgpu.ts","../../../../tfjs-backend-webgpu/src/kernels/GatherNd.ts","../../../../tfjs-backend-webgpu/src/gather_webgpu.ts","../../../../tfjs-backend-webgpu/src/kernels/GatherV2.ts","../../../../tfjs-backend-webgpu/src/kernels/Greater.ts","../../../../tfjs-backend-webgpu/src/kernels/GreaterEqual.ts","../../../../tfjs-backend-webgpu/src/kernels/LeakyRelu.ts","../../../../tfjs-backend-webgpu/src/kernels/Less.ts","../../../../tfjs-backend-webgpu/src/kernels/LessEqual.ts","../../../../tfjs-backend-webgpu/src/kernels/Log.ts","../../../../tfjs-backend-webgpu/src/kernels/LogicalAnd.ts","../../../../tfjs-backend-webgpu/src/kernels/LogicalNot.ts","../../../../tfjs-backend-webgpu/src/kernels/Max.ts","../../../../tfjs-backend-webgpu/src/kernels/Maximum.ts","../../../../tfjs-backend-webgpu/src/kernels/MaxPool.ts","../../../../tfjs-backend-webgpu/src/kernels/Mean.ts","../../../../tfjs-backend-webgpu/src/kernels/Min.ts","../../../../tfjs-backend-webgpu/src/kernels/Minimum.ts","../../../../tfjs-backend-webgpu/src/mirror_pad_webgpu.ts","../../../../tfjs-backend-webgpu/src/kernels/MirrorPad.ts","../../../../tfjs-backend-webgpu/src/kernels/Neg.ts","../../../../tfjs-backend-webgpu/src/kernels/NonMaxSuppressionV3.ts","../../../../tfjs-backend-webgpu/src/kernels/NonMaxSuppressionV5.ts","../../../../tfjs-backend-webgpu/src/kernels/ZerosLike.ts","../../../../tfjs-backend-webgpu/src/kernels/OnesLike.ts","../../../../tfjs-backend-webgpu/src/kernels/Pack.ts","../../../../tfjs-backend-webgpu/src/pad_webgpu.ts","../../../../tfjs-backend-webgpu/src/kernels/PadV2.ts","../../../../tfjs-backend-webgpu/src/kernels/Pow.ts","../../../../tfjs-backend-webgpu/src/kernels/Prelu.ts","../../../../tfjs-backend-webgpu/src/kernels/Prod.ts","../../../../tfjs-backend-webgpu/src/kernels/Range.ts","../../../../tfjs-backend-webgpu/src/kernels/RealDiv.ts","../../../../tfjs-backend-webgpu/src/kernels/Relu.ts","../../../../tfjs-backend-webgpu/src/kernels/Relu6.ts","../../../../tfjs-backend-webgpu/src/resize_bilinear_webgpu.ts","../../../../tfjs-backend-webgpu/src/kernels/ResizeBilinear.ts","../../../../tfjs-backend-webgpu/src/resize_nearest_neighbor_webgpu.ts","../../../../tfjs-backend-webgpu/src/kernels/ResizeNearestNeighbor.ts","../../../../tfjs-backend-webgpu/src/rotate_webgpu.ts","../../../../tfjs-backend-webgpu/src/kernels/RotateWithOffset.ts","../../../../tfjs-backend-webgpu/src/kernels/Rsqrt.ts","../../../../tfjs-backend-webgpu/src/scatter_optimized_webgpu.ts","../../../../tfjs-backend-webgpu/src/kernels/ScatterNd.ts","../../../../tfjs-backend-webgpu/src/select_webgpu.ts","../../../../tfjs-backend-webgpu/src/kernels/Select.ts","../../../../tfjs-backend-webgpu/src/kernels/Sigmoid.ts","../../../../tfjs-backend-webgpu/src/kernels/Sin.ts","../../../../tfjs-backend-webgpu/src/kernels/Sinh.ts","../../../../tfjs-backend-webgpu/src/kernels/Sub.ts","../../../../tfjs-backend-webgpu/src/kernels/Softmax.ts","../../../../tfjs-backend-webgpu/src/kernels/SpaceToBatchND.ts","../../../../tfjs-backend-webgpu/src/scatter_webgpu.ts","../../../../tfjs-backend-webgpu/src/kernels/SparseToDense.ts","../../../../tfjs-backend-webgpu/src/kernels/SplitV.ts","../../../../tfjs-backend-webgpu/src/kernels/Sqrt.ts","../../../../tfjs-backend-webgpu/src/kernels/Square.ts","../../../../tfjs-backend-webgpu/src/kernels/SquaredDifference.ts","../../../../tfjs-backend-webgpu/src/strided_slice_webgpu.ts","../../../../tfjs-backend-webgpu/src/kernels/StridedSlice.ts","../../../../tfjs-backend-webgpu/src/kernels/StringNGrams.ts","../../../../tfjs-backend-webgpu/src/kernels/Tanh.ts","../../../../tfjs-backend-webgpu/src/tile_webgpu.ts","../../../../tfjs-backend-webgpu/src/kernels/Tile.ts","../../../../tfjs-backend-webgpu/src/top_k_webgpu.ts","../../../../tfjs-backend-webgpu/src/kernels/TopK.ts","../../../../tfjs-backend-webgpu/src/transform_webgpu.ts","../../../../tfjs-backend-webgpu/src/kernels/Transform.ts","../../../../tfjs-backend-webgpu/src/kernels/Unpack.ts","../../../../tfjs-backend-webgpu/src/register_all_kernels.ts","../../../../tfjs-backend-webgpu/src/buffer_manager.ts","../../../../tfjs-backend-webgpu/src/texture_manager.ts","../../../../tfjs-backend-webgpu/src/webgpu_program.ts","../../../../tfjs-backend-webgpu/src/backend_webgpu.ts","../../../../tfjs-backend-webgpu/src/webgpu.ts","../../../../tfjs-backend-webgpu/src/index.ts"],"sourcesContent":["/*! *****************************************************************************\r\nCopyright (c) Microsoft Corporation.\r\n\r\nPermission to use, copy, modify, and/or distribute this software for any\r\npurpose with or without fee is hereby granted.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\r\nREGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\r\nAND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\r\nINDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\r\nLOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\r\nOTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\r\nPERFORMANCE OF THIS SOFTWARE.\r\n***************************************************************************** */\r\n/* global Reflect, Promise */\r\n\r\nvar extendStatics = function(d, b) {\r\n extendStatics = Object.setPrototypeOf ||\r\n ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||\r\n function (d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; };\r\n return extendStatics(d, b);\r\n};\r\n\r\nexport function __extends(d, b) {\r\n extendStatics(d, b);\r\n function __() { this.constructor = d; }\r\n d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());\r\n}\r\n\r\nexport var __assign = function() {\r\n __assign = Object.assign || function __assign(t) {\r\n for (var s, i = 1, n = arguments.length; i < n; i++) {\r\n s = arguments[i];\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];\r\n }\r\n return t;\r\n }\r\n return __assign.apply(this, arguments);\r\n}\r\n\r\nexport function __rest(s, e) {\r\n var t = {};\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)\r\n t[p] = s[p];\r\n if (s != null && typeof Object.getOwnPropertySymbols === \"function\")\r\n for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {\r\n if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))\r\n t[p[i]] = s[p[i]];\r\n }\r\n return t;\r\n}\r\n\r\nexport function __decorate(decorators, target, key, desc) {\r\n var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;\r\n if (typeof Reflect === \"object\" && typeof Reflect.decorate === \"function\") r = Reflect.decorate(decorators, target, key, desc);\r\n else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;\r\n return c > 3 && r && Object.defineProperty(target, key, r), r;\r\n}\r\n\r\nexport function __param(paramIndex, decorator) {\r\n return function (target, key) { decorator(target, key, paramIndex); }\r\n}\r\n\r\nexport function __metadata(metadataKey, metadataValue) {\r\n if (typeof Reflect === \"object\" && typeof Reflect.metadata === \"function\") return Reflect.metadata(metadataKey, metadataValue);\r\n}\r\n\r\nexport function __awaiter(thisArg, _arguments, P, generator) {\r\n function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }\r\n return new (P || (P = Promise))(function (resolve, reject) {\r\n function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }\r\n function rejected(value) { try { step(generator[\"throw\"](value)); } catch (e) { reject(e); } }\r\n function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }\r\n step((generator = generator.apply(thisArg, _arguments || [])).next());\r\n });\r\n}\r\n\r\nexport function __generator(thisArg, body) {\r\n var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;\r\n return g = { next: verb(0), \"throw\": verb(1), \"return\": verb(2) }, typeof Symbol === \"function\" && (g[Symbol.iterator] = function() { return this; }), g;\r\n function verb(n) { return function (v) { return step([n, v]); }; }\r\n function step(op) {\r\n if (f) throw new TypeError(\"Generator is already executing.\");\r\n while (_) try {\r\n if (f = 1, y && (t = op[0] & 2 ? y[\"return\"] : op[0] ? y[\"throw\"] || ((t = y[\"return\"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;\r\n if (y = 0, t) op = [op[0] & 2, t.value];\r\n switch (op[0]) {\r\n case 0: case 1: t = op; break;\r\n case 4: _.label++; return { value: op[1], done: false };\r\n case 5: _.label++; y = op[1]; op = [0]; continue;\r\n case 7: op = _.ops.pop(); _.trys.pop(); continue;\r\n default:\r\n if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }\r\n if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }\r\n if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }\r\n if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }\r\n if (t[2]) _.ops.pop();\r\n _.trys.pop(); continue;\r\n }\r\n op = body.call(thisArg, _);\r\n } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }\r\n if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };\r\n }\r\n}\r\n\r\nexport function __createBinding(o, m, k, k2) {\r\n if (k2 === undefined) k2 = k;\r\n o[k2] = m[k];\r\n}\r\n\r\nexport function __exportStar(m, exports) {\r\n for (var p in m) if (p !== \"default\" && !exports.hasOwnProperty(p)) exports[p] = m[p];\r\n}\r\n\r\nexport function __values(o) {\r\n var s = typeof Symbol === \"function\" && Symbol.iterator, m = s && o[s], i = 0;\r\n if (m) return m.call(o);\r\n if (o && typeof o.length === \"number\") return {\r\n next: function () {\r\n if (o && i >= o.length) o = void 0;\r\n return { value: o && o[i++], done: !o };\r\n }\r\n };\r\n throw new TypeError(s ? \"Object is not iterable.\" : \"Symbol.iterator is not defined.\");\r\n}\r\n\r\nexport function __read(o, n) {\r\n var m = typeof Symbol === \"function\" && o[Symbol.iterator];\r\n if (!m) return o;\r\n var i = m.call(o), r, ar = [], e;\r\n try {\r\n while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);\r\n }\r\n catch (error) { e = { error: error }; }\r\n finally {\r\n try {\r\n if (r && !r.done && (m = i[\"return\"])) m.call(i);\r\n }\r\n finally { if (e) throw e.error; }\r\n }\r\n return ar;\r\n}\r\n\r\nexport function __spread() {\r\n for (var ar = [], i = 0; i < arguments.length; i++)\r\n ar = ar.concat(__read(arguments[i]));\r\n return ar;\r\n}\r\n\r\nexport function __spreadArrays() {\r\n for (var s = 0, i = 0, il = arguments.length; i < il; i++) s += arguments[i].length;\r\n for (var r = Array(s), k = 0, i = 0; i < il; i++)\r\n for (var a = arguments[i], j = 0, jl = a.length; j < jl; j++, k++)\r\n r[k] = a[j];\r\n return r;\r\n};\r\n\r\nexport function __await(v) {\r\n return this instanceof __await ? (this.v = v, this) : new __await(v);\r\n}\r\n\r\nexport function __asyncGenerator(thisArg, _arguments, generator) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var g = generator.apply(thisArg, _arguments || []), i, q = [];\r\n return i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i;\r\n function verb(n) { if (g[n]) i[n] = function (v) { return new Promise(function (a, b) { q.push([n, v, a, b]) > 1 || resume(n, v); }); }; }\r\n function resume(n, v) { try { step(g[n](v)); } catch (e) { settle(q[0][3], e); } }\r\n function step(r) { r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r); }\r\n function fulfill(value) { resume(\"next\", value); }\r\n function reject(value) { resume(\"throw\", value); }\r\n function settle(f, v) { if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]); }\r\n}\r\n\r\nexport function __asyncDelegator(o) {\r\n var i, p;\r\n return i = {}, verb(\"next\"), verb(\"throw\", function (e) { throw e; }), verb(\"return\"), i[Symbol.iterator] = function () { return this; }, i;\r\n function verb(n, f) { i[n] = o[n] ? function (v) { return (p = !p) ? { value: __await(o[n](v)), done: n === \"return\" } : f ? f(v) : v; } : f; }\r\n}\r\n\r\nexport function __asyncValues(o) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var m = o[Symbol.asyncIterator], i;\r\n return m ? m.call(o) : (o = typeof __values === \"function\" ? __values(o) : o[Symbol.iterator](), i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i);\r\n function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }\r\n function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }\r\n}\r\n\r\nexport function __makeTemplateObject(cooked, raw) {\r\n if (Object.defineProperty) { Object.defineProperty(cooked, \"raw\", { value: raw }); } else { cooked.raw = raw; }\r\n return cooked;\r\n};\r\n\r\nexport function __importStar(mod) {\r\n if (mod && mod.__esModule) return mod;\r\n var result = {};\r\n if (mod != null) for (var k in mod) if (Object.hasOwnProperty.call(mod, k)) result[k] = mod[k];\r\n result.default = mod;\r\n return result;\r\n}\r\n\r\nexport function __importDefault(mod) {\r\n return (mod && mod.__esModule) ? mod : { default: mod };\r\n}\r\n\r\nexport function __classPrivateFieldGet(receiver, privateMap) {\r\n if (!privateMap.has(receiver)) {\r\n throw new TypeError(\"attempted to get private field on non-instance\");\r\n }\r\n return privateMap.get(receiver);\r\n}\r\n\r\nexport function __classPrivateFieldSet(receiver, privateMap, value) {\r\n if (!privateMap.has(receiver)) {\r\n throw new TypeError(\"attempted to set private field on non-instance\");\r\n }\r\n privateMap.set(receiver, value);\r\n return value;\r\n}\r\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {env} from '@tensorflow/tfjs-core';\n\nconst ENV = env();\n\n/** The batched dispatching calls size in the device queue. */\nENV.registerFlag('WEBGPU_DEFERRED_SUBMIT_BATCH_SIZE', () => 15);\n\n/**\n * Whether we forward execution to the CPU backend if tensors are small and\n * reside on the CPU.\n */\nENV.registerFlag('WEBGPU_CPU_FORWARD', () => true);\n\n/**\n * Thread register block size for matmul kernel.\n */\nENV.registerFlag('WEBGPU_MATMUL_WORK_PER_THREAD', () => 4);\n\n/**\n * Whether to use conv2d_naive which directly implement the conv2d logic rather\n * than using a matmul to simulate.\n * Note that NCHW is not supported.\n */\nENV.registerFlag('WEBGPU_USE_NAIVE_CONV2D', () => false);\n\n/**\n * Whether to use conv2dTranspose_naive which directly implement the\n * conv2dTranspose logic rather than using a matmul to simulate.\n */\nENV.registerFlag('WEBGPU_USE_NAIVE_CONV2D_TRANSPOSE', () => false);\n\n/**\n * Whether we will run im2col as a separate shader for convolution.\n * Note that NCHW is not supported.\n */\nENV.registerFlag('WEBGPU_CONV_SEPARATE_IM2COL_SHADER', () => false);\n\n/**\n * Whether we use low power GPU. Otherwise, a high performance GPU will be\n * requested.\n */\nENV.registerFlag('WEBGPU_USE_LOW_POWER_GPU', () => false);\n\n/**\n * Threshold for input tensor size that determines whether WebGPU backend will\n * delegate computation to CPU.\n *\n * Default value is 1000.\n */\nENV.registerFlag('WEBGPU_CPU_HANDOFF_SIZE_THRESHOLD', () => 1000);\n\n/**\n * Whether to use a dummy canvas to make profiling tools like PIX work with\n * TFJS webgpu backend.\n */\nENV.registerFlag('WEBGPU_USE_PROFILE_TOOL', () => false);\n\n/**\n * Whether to use import API.\n */\nENV.registerFlag('WEBGPU_USE_IMPORT', () => false);\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nexport enum BinaryOpType {\n MUL,\n ADD,\n SUB,\n DIV,\n EQUAL,\n GREATER,\n GREATER_EQUAL,\n LESS,\n LESS_EQUAL,\n LOGICAL_AND,\n NOT_EQUAL,\n SQUARED_DIFFERENCE,\n INT_DIV,\n POW,\n PRELU,\n MAX,\n MIN,\n COMPLEX_MULTIPLY_REAL,\n COMPLEX_MULTIPLY_IMAG\n}\n\nconst ADD = 'return a + b;';\n// (Ar + Ai)(Br + Bi) =\n// ArBr + ArBi + AiBr + AiBi = ArBr - AB + ArBi + AiBr\n// Yr = ArBr - AB\n// Yi = ArBi + AiBr\nconst COMPLEX_MULTIPLY_REAL = 'return areal * breal - aimag * bimag;';\nconst COMPLEX_MULTIPLY_IMAG = 'return areal * bimag + aimag * breal;';\nconst DIV = 'return a / b;';\nconst MUL = 'return a * b;';\nconst SQUARED_DIFFERENCE = 'return (a - b) * (a - b);';\nconst SUB = 'return a - b;';\nconst EQUAL = 'return f32(a == b);';\nconst EQUAL_VEC4 = 'return vec4(a == b);';\nconst GREATER = 'return f32(a > b);';\nconst GREATER_VEC4 = 'return vec4(a > b);';\nconst GREATER_EQUAL = 'return f32(a >= b);';\nconst GREATER_EQUAL_VEC4 = 'return vec4(a >= b);';\nconst LESS = 'return f32(a < b);';\nconst LESS_VEC4 = 'return vec4(a < b);';\nconst LESS_EQUAL = 'return f32(a <= b);';\nconst LESS_EQUAL_VEC4 = 'return vec4(a <= b);';\nconst LOGICAL_AND = 'return f32(f32(a) >= 1.0 && f32(b) >= 1.0);';\nconst LOGICAL_AND_VEC4 = `return (vec4(a >= vec4(1.0)) *\n vec4(b >= vec4(1.0)));`;\nconst CHECK_NAN_SNIPPET = `\n if (isnan(a)) { return a; }\n if (isnan(b)) { return b; }\n `;\nconst CHECK_NAN_SNIPPET_VEC4 = `\n if (isNaN.r) {\n resultTemp.r = uniforms.NAN;\n }\n if (isNaN.g) {\n resultTemp.g = uniforms.NAN;\n }\n if (isNaN.b) {\n resultTemp.b = uniforms.NAN;\n }\n if (isNaN.a) {\n resultTemp.a = uniforms.NAN;\n }\n `;\nconst INT_DIV = `\n let s = sign(a) * sign(b);\n let ia = i32(round(a));\n let ib = i32(round(b));\n return f32(idiv(ia, ib, s));\n `;\n\nconst INT_DIV_VEC4 = `\n let ia = vec4(round(a));\n let ib = vec4(round(b));\n let cond = ib != vec4(0);\n var resultTemp = vec4(0);\n let s = sign(a) * sign(b);\n\n // Windows (D3D) wants guaranteed non-zero int division at compile-time.\n if (cond[0]) {\n resultTemp[0] = idiv(ia[0], ib[0], s[0]);\n }\n if (cond[1]) {\n resultTemp[1] = idiv(ia[1], ib[1], s[1]);\n }\n if (cond[2]) {\n resultTemp[2] = idiv(ia[2], ib[2], s[2]);\n }\n if (cond[3]) {\n resultTemp[3] = idiv(ia[3], ib[3], s[3]);\n }\n return vec4(resultTemp);\n `;\n\nconst NOT_EQUAL = 'return f32(a != b);';\nconst NOT_EQUAL_VEC4 = 'return vec4(a != b);';\nconst POW = `\n if(a < 0.0 && floor(b) < b) {\n return uniforms.NAN;\n }\n if (b == 0.0) {\n return 1.0;\n }\n if (round(abs(b) % 2.0) != 1.0) {\n return pow(abs(a), b);\n }\n return sign(a) * pow(abs(a), b);\n `;\nconst POW_VEC4 = `\n let isModRound1Bool = vec4(round(abs(b) % vec4(2.0))) == vec4(1);\n let isModRound1 = vec4(isModRound1Bool);\n let multiplier = sign(a) * isModRound1 + (vec4(1.0) - isModRound1);\n var resultTemp = multiplier * pow(abs(a), b);\n\n // Ensure that a^0 = 1, including 0^0 = 1 as this correspond to TF and JS\n let isExpZero = b == vec4(0.0);\n if (isExpZero.r) {\n resultTemp.r = 1.0;\n }\n if (isExpZero.g) {\n resultTemp.g = 1.0;\n }\n if (isExpZero.b) {\n resultTemp.b = 1.0;\n }\n if (isExpZero.a) {\n resultTemp.a = 1.0;\n }\n let isNaN = a < vec4(0.0) & floor(b) < b;\n ${CHECK_NAN_SNIPPET_VEC4}\n return resultTemp;\n `;\n\nconst PRELU = `if (a < 0.0) { return b * a; } return a;`;\nconst PRELU_VEC4 = `\n let aLessThanZero = vec4(a < vec4(0.0));\n return (aLessThanZero * (b * a)) + ((vec4(1.0) - aLessThanZero) * a);\n `;\n\nfunction getMinMaxString(op: string, useVec4: boolean) {\n const checkNanSnippet = useVec4 ? CHECK_NAN_SNIPPET_VEC4 : CHECK_NAN_SNIPPET;\n return useVec4 ? `\n var resultTemp = vec4(${op}(a, b));\n let isNaN = isnanVec4(a) | isnanVec4(b);\n ` + checkNanSnippet +\n `\n return resultTemp;\n ` :\n checkNanSnippet + `\n return ${op}(a, b);\n `;\n}\n\nexport function getBinaryOpString(\n type: BinaryOpType, useVec4?: boolean): string {\n switch (type) {\n case BinaryOpType.MUL:\n return MUL;\n case BinaryOpType.ADD:\n return ADD;\n case BinaryOpType.SUB:\n return SUB;\n case BinaryOpType.DIV:\n return DIV;\n case BinaryOpType.EQUAL:\n return useVec4 ? EQUAL_VEC4 : EQUAL;\n case BinaryOpType.GREATER:\n return useVec4 ? GREATER_VEC4 : GREATER;\n case BinaryOpType.GREATER_EQUAL:\n return useVec4 ? GREATER_EQUAL_VEC4 : GREATER_EQUAL;\n case BinaryOpType.LESS:\n return useVec4 ? LESS_VEC4 : LESS;\n case BinaryOpType.LESS_EQUAL:\n return useVec4 ? LESS_EQUAL_VEC4 : LESS_EQUAL;\n case BinaryOpType.LOGICAL_AND:\n return useVec4 ? LOGICAL_AND_VEC4 : LOGICAL_AND;\n case BinaryOpType.NOT_EQUAL:\n return useVec4 ? NOT_EQUAL_VEC4 : NOT_EQUAL;\n case BinaryOpType.SQUARED_DIFFERENCE:\n return SQUARED_DIFFERENCE;\n case BinaryOpType.INT_DIV:\n return useVec4 ? INT_DIV_VEC4 : INT_DIV;\n case BinaryOpType.PRELU:\n return useVec4 ? PRELU_VEC4 : PRELU;\n case BinaryOpType.MAX:\n return getMinMaxString('max', useVec4);\n case BinaryOpType.MIN:\n return getMinMaxString('min', useVec4);\n case BinaryOpType.POW:\n return useVec4 ? POW_VEC4 : POW;\n case BinaryOpType.COMPLEX_MULTIPLY_REAL:\n return COMPLEX_MULTIPLY_REAL;\n case BinaryOpType.COMPLEX_MULTIPLY_IMAG:\n return COMPLEX_MULTIPLY_IMAG;\n default:\n throw new Error(`BinaryType ${type} is not implemented!`);\n }\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nexport enum UnaryOpType {\n ABS,\n CEIL,\n COS,\n COSH,\n ELU,\n EXP,\n EXPM1,\n FLOOR,\n LINEAR,\n LOG,\n LOGICAL_NOT,\n NEG,\n RELU,\n RELU6,\n LEAKYRELU,\n RSQRT,\n SIN,\n SINH,\n SIGMOID,\n SQRT,\n SQUARE,\n TANH,\n TO_INT\n}\n\nconst ABS = `return abs(a);`;\nconst CEIL = `return ceil(a);`;\nconst COS = `return cos(a);`;\nconst COSH = `\n let e2x = exp(-a);\n return (e2x + 1.0 / e2x) / 2.0;\n`;\nconst EXPM1 = `return exp(a) - 1.0;`;\nconst ELU = `if (a >= 0.0) { return a; } return (exp(a) - 1.0);`;\nconst ELU_VEC4 = `\n var resFloat = exp(a) - vec4(1.0);\n if (a.r >= 0.0) {\n resFloat.r = a.r;\n }\n if (a.g >= 0.0) {\n resFloat.g = a.g;\n }\n if (a.b >= 0.0) {\n resFloat.b = a.b;\n }\n if (a.a >= 0.0) {\n resFloat.a = a.a;\n }\n return resFloat;\n`;\nconst EXP = `return exp(a);`;\nconst FLOOR = `return floor(a);`;\nconst LINEAR = `return a;`;\nconst LOG = `if (a < 0.0) { return 1.0/0.0; }\n return log(a);`;\nconst LOGICAL_NOT = `return f32(!(a >= 1.0));`;\nconst NEG = `return -a;`;\nconst LEAKYRELU = `if (a < 0.0) { return uniforms.alpha * a; } return a;`;\nconst LEAKYRELU_VEC4 = `\n let aLessThanZero = vec4(a < vec4(0.0));\n return (aLessThanZero * (uniforms.alpha * a)) + ((vec4(1.0) - aLessThanZero) * a);\n`;\nconst RELU = `if(a < 0.0) { return 0.0; } return a;`;\nconst RELU6 = 'return clamp(a, 0.0, 6.0);';\nconst RELU6_VEC4 =\n 'return clamp(a, vec4(0.0, 0.0, 0.0, 0.0), vec4(6.0, 6.0, 6.0, 6.0));';\nconst RELU_VEC4 = `\n var resFloat = a * vec4(a >= vec4(0.0));\n let isNaN = isnanVec4(a);\n\n if (isNaN.r) {\n resFloat.r = a.r;\n }\n if (isNaN.g) {\n resFloat.g = a.g;\n }\n if (isNaN.b) {\n resFloat.b = a.b;\n }\n if (isNaN.a) {\n resFloat.a = a.a;\n }\n return resFloat;\n`;\nconst RSQRT = `return 1.0/sqrt(a);`;\nconst SIGMOID = `return 1.0 / (1.0 + exp(-1.0 * a));`;\nconst SIN = `return sin(a);`;\nconst SINH = `\n let e2x = exp(a);\n return (e2x - 1.0 / e2x) / 2.0;\n`;\nconst SQRT = `return sqrt(a);`;\nconst SQUARE = `return a * a;`;\nconst TANH = `\n let e2x = exp(-2.0 * abs(a));\n return sign(a) * (1.0 - e2x) / (1.0 + e2x);\n`;\nconst TO_INT = `return f32(i32((a)));`;\n\nexport function getUnaryOpString(type: UnaryOpType, useVec4?: boolean): string {\n switch (type) {\n case UnaryOpType.ABS:\n return ABS;\n case UnaryOpType.COS:\n return COS;\n case UnaryOpType.COSH:\n return COSH;\n case UnaryOpType.CEIL:\n return CEIL;\n case UnaryOpType.ELU:\n return useVec4 ? ELU_VEC4 : ELU;\n case UnaryOpType.EXP:\n return EXP;\n case UnaryOpType.EXPM1:\n return EXPM1;\n case UnaryOpType.FLOOR:\n return FLOOR;\n case UnaryOpType.LINEAR:\n return LINEAR;\n case UnaryOpType.LOG:\n return LOG;\n case UnaryOpType.LOGICAL_NOT:\n return LOGICAL_NOT;\n case UnaryOpType.NEG:\n return NEG;\n case UnaryOpType.LEAKYRELU:\n return useVec4 ? LEAKYRELU_VEC4 : LEAKYRELU;\n case UnaryOpType.RELU:\n return useVec4 ? RELU_VEC4 : RELU;\n case UnaryOpType.RELU6:\n return useVec4 ? RELU6_VEC4 : RELU6;\n case UnaryOpType.RSQRT:\n return RSQRT;\n case UnaryOpType.SIGMOID:\n return SIGMOID;\n case UnaryOpType.SIN:\n return SIN;\n case UnaryOpType.SINH:\n return SINH;\n case UnaryOpType.SQRT:\n return SQRT;\n case UnaryOpType.SQUARE:\n return SQUARE;\n case UnaryOpType.TANH:\n return TANH;\n case UnaryOpType.TO_INT:\n return TO_INT;\n\n default:\n throw new Error(`BinaryType ${type} is not implemented!`);\n }\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util} from '@tensorflow/tfjs-core';\n\nimport {BinaryOpType, getBinaryOpString} from './binary_op_util';\nimport {getUnaryOpString, UnaryOpType} from './unary_op_util';\n\nexport function mapActivationToShaderProgram(\n activation: backend_util.Activation, packed = false): string {\n if (activation === null) {\n return null;\n } else if (activation === 'linear') {\n return getUnaryOpString(UnaryOpType.LINEAR);\n } else if (activation === 'relu') {\n return getUnaryOpString(UnaryOpType.RELU, packed);\n } else if (activation === 'elu') {\n return getUnaryOpString(UnaryOpType.ELU, packed);\n } else if (activation === 'relu6') {\n return getUnaryOpString(UnaryOpType.RELU6, packed);\n } else if (activation === 'prelu') {\n return getBinaryOpString(BinaryOpType.PRELU, packed);\n } else if (activation === 'sigmoid') {\n return getUnaryOpString(UnaryOpType.SIGMOID, packed);\n } else if (activation === 'leakyrelu') {\n return getUnaryOpString(UnaryOpType.LEAKYRELU, packed);\n }\n throw new Error(`Activation ${\n activation} has not been implemented for the WebGPU backend.`);\n}\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// Generates GLSL that computes strides.\nexport function symbolicallyComputeStrides(\n indicesArr: number[], variableName: string): string[] {\n if (Math.max(...indicesArr) > 3) {\n throw new Error('Cannot symbolically compute strides for rank > 4 tensor.');\n }\n\n const numCoords = indicesArr.length;\n const shape = indicesArr.map(d => `${variableName}[${d}]`);\n const strides = new Array(numCoords - 1);\n strides[numCoords - 2] = shape[numCoords - 1];\n for (let i = numCoords - 3; i >= 0; --i) {\n strides[i] = `(${strides[i + 1]} * ${shape[i + 1]})`;\n }\n\n return strides;\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, DataType, util} from '@tensorflow/tfjs-core';\nimport {symbolicallyComputeStrides} from './shader_util';\n\nexport function getCoordsDataType(rank: number): string {\n if (rank <= 1) {\n return 'i32';\n } else if (rank === 2) {\n return `vec2`;\n } else if (rank === 3) {\n return `vec3`;\n } else if (rank === 4) {\n return `vec4`;\n } else if (rank === 5) {\n return `vec5`;\n } else if (rank === 6) {\n return `vec6`;\n } else {\n throw Error(`GPU for rank ${rank} is not yet supported`);\n }\n}\n\nexport function getCoordsXYZ(index: number): string {\n if (index === 0) {\n return 'x';\n } else if (index === 1) {\n return 'y';\n } else if (index === 2) {\n return 'z';\n } else if (index === 3) {\n return 'w';\n } else if (index === 4) {\n return 'u';\n } else if (index === 5) {\n return 'v';\n } else {\n throw Error(`Index ${index} is not yet supported`);\n }\n}\n\ntype WGSLDataType = 'f32'|'i32'|'vec4'|'vec4'|'vec4';\nfunction mapToWgslTypes(type: DataType, isVec4: boolean): WGSLDataType|\n DataType {\n if (type === 'float32') {\n return isVec4 ? 'vec4' : 'f32';\n } else if (type === 'int32') {\n return isVec4 ? 'vec4' : 'i32';\n } else if (type === 'bool') {\n // Type 'bool' cannot be used in storage class,\n // https://www.w3.org/TR/WGSL/#host-shareable-types.\n return isVec4 ? 'vec4' : 'i32';\n }\n\n return type;\n}\n\ninterface ProgramParams {\n dispatchLayout: {x: number[], y?: number[], z?: number[]};\n workGroupSize: [number, number, number];\n variableNames: string[];\n uniforms?: string;\n isVec4?: boolean;\n size?: boolean;\n atomic?: boolean;\n getUserCode: () => string;\n}\n\nexport interface InputInfo {\n dtype: DataType;\n shape: number[];\n name: string;\n}\n\nexport function getWorkGroupSizeString(): string {\n return `\n @stage(compute) @workgroup_size(workGroupSizeX, workGroupSizeY, workGroupSizeZ)\n`;\n}\n\nexport function getMainHeaderString(): string {\n return `\n ${getWorkGroupSizeString()}\n fn main(@builtin(local_invocation_id) LocalId : vec3,\n @builtin(global_invocation_id) GlobalId : vec3,\n @builtin(num_workgroups) NumWorkgroups: vec3) {\n localId = LocalId;\n globalId = GlobalId;\n numWorkgroups = NumWorkgroups;\n`;\n}\n\nexport function getMainHeaderAndGlobalIndexString(): string {\n return `\n ${getMainHeaderString()}\n let index = getGlobalIndex();\n`;\n}\n\nexport function makeShader(\n inputInfo: InputInfo[], outputData: {dtype: DataType, shape: number[]},\n program: ProgramParams, isFromPixel = false): string {\n const prefixSnippets: string[] = [];\n prefixSnippets.push(`\n let workGroupSizeX = ${program.workGroupSize[0]}u;\n let workGroupSizeY = ${program.workGroupSize[1]}u;\n let workGroupSizeZ = ${program.workGroupSize[2]}u;\n\n var localId: vec3;\n var globalId: vec3;\n var numWorkgroups: vec3;\n\n // Only used when the y/z dimension of workgroup size is 1.\n fn getGlobalIndex() -> i32 {\n if (numWorkgroups.y == 1u && numWorkgroups.z == 1u) {\n return i32(globalId.x);\n }\n\n let localInvocationIndex = localId.z * workGroupSizeX * workGroupSizeY +\n localId.y * workGroupSizeX + localId.x;\n let workGroupID = (globalId - localId)/vec3(\n workGroupSizeX, workGroupSizeY, workGroupSizeZ);\n\n return i32((workGroupID.z * numWorkgroups.x * numWorkgroups.y +\n workGroupID.y * numWorkgroups.x + workGroupID.x) *\n (workGroupSizeX * workGroupSizeY * workGroupSizeZ) +\n localInvocationIndex);\n }\n `);\n\n if (isFromPixel === true) {\n prefixSnippets.push(`\n struct Uniform {\n size : i32,\n numChannels : i32,\n outShapeStrides : vec2,\n dispatchSize : vec3,\n };\n\n @group(0) @binding(0) var result: array<${\n mapToWgslTypes(outputData.dtype, program.isVec4)}>;\n @group(0) @binding(2) var uniforms: Uniform;\n `);\n return [\n commonSnippet,\n prefixSnippets.join('\\n'),\n getCoordsFromIndexSnippet(outputData.shape),\n program.getUserCode(),\n ].join('\\n');\n }\n\n let preMemberIsStruct = false;\n let currentMemberIsStruct = false;\n let uniformDeclaration = 'struct Uniforms { NAN : f32, ';\n program.variableNames.forEach((x, i) => {\n const perDataType = getCoordsDataType(inputInfo[i].shape.length);\n if (perDataType === 'vec5' || perDataType === 'vec6') {\n currentMemberIsStruct = true;\n }\n if (preMemberIsStruct || currentMemberIsStruct) {\n uniformDeclaration += `@align(16) `;\n }\n preMemberIsStruct = currentMemberIsStruct;\n uniformDeclaration +=\n `${x.charAt(0).toLowerCase() + x.slice(1)}Shape : ${perDataType}, `;\n });\n const outputDataType = getCoordsDataType(outputData.shape.length);\n currentMemberIsStruct =\n outputDataType === 'vec5' || outputDataType === 'vec6';\n if (preMemberIsStruct || currentMemberIsStruct) {\n uniformDeclaration += `@align(16) `;\n }\n preMemberIsStruct = currentMemberIsStruct;\n uniformDeclaration += `outShape : ${outputDataType}, `;\n const stridesLength = outputData.shape.length - 1;\n const stridesDataType = getCoordsDataType(stridesLength);\n currentMemberIsStruct =\n stridesDataType === 'vec5' || stridesDataType === 'vec6';\n if (preMemberIsStruct || currentMemberIsStruct) {\n uniformDeclaration += `@align(16) `;\n }\n preMemberIsStruct = currentMemberIsStruct;\n uniformDeclaration += `\n outShapeStrides: ${stridesDataType}, `;\n\n if (program.size) {\n if (preMemberIsStruct) {\n uniformDeclaration += `@align(16) `;\n }\n preMemberIsStruct = false;\n uniformDeclaration += 'size : i32, ';\n }\n\n if (program.uniforms) {\n if (preMemberIsStruct) {\n uniformDeclaration += `@align(16) `;\n }\n uniformDeclaration += program.uniforms;\n }\n uniformDeclaration += '};';\n\n prefixSnippets.push(uniformDeclaration);\n\n // Output buffer.\n if (program.atomic) {\n prefixSnippets.push(`\n @group(0) @binding(0) var result: array>;\n `);\n } else {\n prefixSnippets.push(`\n @group(0) @binding(0) var result: array<${\n mapToWgslTypes(outputData.dtype, program.isVec4)}>;\n `);\n }\n program.variableNames.forEach((x, i) => {\n prefixSnippets.push(`\n @group(0) @binding(${1 + i}) var ${x}: array<${\n mapToWgslTypes(inputInfo[i].dtype, program.isVec4)}>;\n `);\n });\n\n if (uniformDeclaration !== '') {\n prefixSnippets.push(`\n @group(0) @binding(${\n 1 + program.variableNames.length}) var uniforms: Uniforms;\n `);\n }\n\n const [coordsSnippet, dispatchLayoutRank] =\n getOutputCoordsSnippet(outputData.shape, program.dispatchLayout);\n\n const sources = [\n commonSnippet, prefixSnippets.join('\\n'),\n getCoordsFromIndexSnippet(outputData.shape), coordsSnippet,\n getOutputIndexFromCoordsSnippet(outputData.shape.length)\n ];\n if (!program.atomic) {\n sources.push(\n setOutputSnippet(outputData.shape, outputData.dtype, program.isVec4));\n }\n if (dispatchLayoutRank === outputData.shape.length) {\n // Input snippet is only meaningful when the output isn't getting\n // implicitly reshaped (like it does in conv2d_matmul).\n const inputSnippet = inputInfo\n .map(\n x => getInputSnippet(\n x, outputData.shape, program.isVec4,\n program.dispatchLayout.x.length ===\n outputData.shape.length))\n .join('\\n');\n sources.push(inputSnippet);\n }\n\n sources.push(program.getUserCode());\n const source = sources.join('\\n');\n return source;\n}\n\nconst commonSnippet = `\n struct vec5 {x: i32, y: i32, z: i32, w: i32, u: i32};\n struct vec6 {x: i32, y: i32, z: i32, w: i32, u: i32, v: i32};\n\n // Checks whether coordinates lie within the bounds of the shape.\n fn coordsInBounds2D(coord : vec2, shape : vec2) -> bool {\n return all(coord >= vec2(0)) && all(coord < shape);\n }\n fn coordsInBounds3D(coord : vec3, shape : vec3) -> bool {\n return all(coord >= vec3(0)) && all(coord < shape);\n }\n fn coordsInBounds4D(coord : vec4, shape : vec4) -> bool {\n return all(coord >= vec4(0)) && all(coord < shape);\n }\n\n fn getIndexFromCoords1D(coord : i32, shape : i32) -> i32 {\n return coord;\n }\n fn getIndexFromCoords2D(coords : vec2, shape : vec2) -> i32 {\n return dot(coords, vec2(shape.y, 1));\n }\n fn getIndexFromCoords3D(coords : vec3, shape : vec3) -> i32 {\n return dot(coords, vec3(shape.y * shape.z, shape.z, 1));\n }\n fn getIndexFromCoords4D(coords : vec4, shape : vec4) -> i32 {\n return dot(coords, vec4(\n shape.y * shape.z * shape.w, shape.z * shape.w, shape.w, 1));\n }\n fn getIndexFromCoords5D(coords : vec5, shape : vec5) -> i32 {\n let shapeStrides: vec5 = vec5(shape.y * shape.z * shape.w * shape.u, shape.z * shape.w * shape.u, shape.w * shape.u, shape.u, 1);\n return coords.x*shapeStrides.x + coords.y*shapeStrides.y + coords.z*shapeStrides.z + coords.w*shapeStrides.w + coords.u*shapeStrides.u;\n }\n fn getIndexFromCoords6D(coords : vec6, shape : vec6) -> i32 {\n let shapeStrides: vec6 = vec6(shape.y * shape.z * shape.w * shape.u * shape.v, shape.z * shape.w * shape.u * shape.v, shape.w * shape.u * shape.v, shape.u * shape.v, shape.v, 1);\n return coords.x*shapeStrides.x + coords.y*shapeStrides.y + coords.z*shapeStrides.z + coords.w*shapeStrides.w + coords.u*shapeStrides.u + coords.v*shapeStrides.v;\n }\n\n fn idiv(a: i32, b: i32, sign: f32) -> i32 {\n var res: i32 = a / b;\n let mod: i32 = a % b;\n if (sign < 0. && mod != 0) {\n res = res - 1;\n }\n return res;\n }\n\n // NaN defination in IEEE 754-1985 is :\n // - sign = either 0 or 1.\n // - biased exponent = all 1 bits.\n // - fraction = anything except all 0 bits (since all 0 bits represents infinity).\n // https://en.wikipedia.org/wiki/IEEE_754-1985#Representation_of_non-numbers\n fn isnan(val: f32) -> bool {\n let floatToUint: u32 = bitcast(val);\n return (floatToUint & 0x7fffffffu) > 0x7f800000u;\n }\n fn isnanVec4(val : vec4) -> vec4 {\n return vec4(isnan(val[0]), isnan(val[1]), isnan(val[2]), isnan(val[3]));\n }\n`;\n\nfunction getOutputIndexFromCoordsSnippet(outRank: number) {\n let snippet = '';\n switch (outRank) {\n case 0:\n case 1:\n snippet += `\n fn getOutputIndexFromCoords(coords : i32) -> i32 {\n return coords;\n }\n `;\n break;\n case 2:\n snippet += `\n fn getOutputIndexFromCoords(coords : vec2) -> i32 {\n return dot(coords, vec2(uniforms.outShapeStrides, 1));\n }\n `;\n break;\n case 3:\n snippet += `\n fn getOutputIndexFromCoords(coords : vec3) -> i32 {\n return dot(coords, vec3(uniforms.outShapeStrides.x, uniforms.outShapeStrides.y, 1));\n }\n `;\n break;\n case 4:\n snippet += `\n fn getOutputIndexFromCoords(coords : vec4) -> i32 {\n return dot(coords, vec4(\n uniforms.outShapeStrides.x, uniforms.outShapeStrides.y, uniforms.outShapeStrides.z, 1));\n }\n `;\n break;\n case 5:\n snippet += `\n fn getOutputIndexFromCoords(coords : vec5) -> i32 {\n return coords.x * uniforms.outShapeStrides.x +\n coords.y * uniforms.outShapeStrides.y +\n coords.z * uniforms.outShapeStrides.z +\n coords.w * uniforms.outShapeStrides.w +\n coords.u;\n }\n `;\n break;\n case 6:\n snippet += `\n fn getOutputIndexFromCoords(coords : vec6) -> i32 {\n return coords.x * uniforms.outShapeStrides.x +\n coords.y * uniforms.outShapeStrides.y +\n coords.z * uniforms.outShapeStrides.z +\n coords.w * uniforms.outShapeStrides.w +\n coords.u * uniforms.outShapeStrides.u +\n coords.v;\n }\n `;\n break;\n default:\n util.assert(false, () => `Unsupported ${outRank}D shape`);\n break;\n }\n return snippet;\n}\n\nfunction setOutputSnippet(\n outShape: number[], outBufferType: DataType, isVec4: boolean): string {\n const outRank = outShape.length;\n const wgslType = mapToWgslTypes(outBufferType, isVec4);\n let snippet;\n if (isVec4) {\n snippet = `fn setOutputAtIndex(flatIndex : i32, value : vec4) {\n result[flatIndex] = ${wgslType}(value);\n }\n fn setOutputAtIndexI32(flatIndex : i32, value : vec4) {\n result[flatIndex] = ${wgslType}(value);\n }`;\n } else {\n snippet = `fn setOutputAtIndex(flatIndex : i32, value : f32) {\n result[flatIndex] = ${wgslType}(value);\n }\n fn setOutputAtIndexI32(flatIndex : i32, value : i32) {\n result[flatIndex] = ${wgslType}(value);\n }`;\n }\n if (outRank >= 2) {\n const dims = ['d0', 'd1', 'd2', 'd3', 'd4', 'd5'].slice(0, outRank);\n const type = getCoordsDataType(outRank);\n\n if (isVec4) {\n snippet += `\n fn setOutputAtCoords(${\n dims.map(d => `${d} : i32`).join(', ')}, value : vec4) {\n let flatIndex = getOutputIndexFromCoords(${type}(${dims.join(', ')}));\n setOutputAtIndex(flatIndex / 4, value);\n }\n fn setOutputAtCoordsI32(${\n dims.map(d => `${d} : i32`).join(', ')}, value : vec4) {\n let flatIndex = getOutputIndexFromCoords(${type}(${dims.join(', ')}));\n setOutputAtIndexI32(flatIndex / 4, value);\n }\n `;\n } else {\n snippet += `\n fn setOutputAtCoords(${\n dims.map(d => `${d} : i32`).join(', ')}, value : f32) {\n let flatIndex = getOutputIndexFromCoords(${type}(${dims.join(', ')}));\n setOutputAtIndex(flatIndex, value);\n }\n fn setOutputAtCoordsI32(${\n dims.map(d => `${d} : i32`).join(', ')}, value : i32) {\n let flatIndex = getOutputIndexFromCoords(${type}(${dims.join(', ')}));\n setOutputAtIndexI32(flatIndex, value);\n }\n `;\n }\n }\n\n return snippet;\n}\n\nfunction getInputSnippet(\n inputInfo: InputInfo, outShape: number[], isVec4: boolean,\n isFlatDispatchLayout: boolean): string {\n let res = getInputAtCoordsSnippet(inputInfo, isVec4);\n\n const inShape = inputInfo.shape;\n if (inShape.length <= outShape.length) {\n res += getInputByOutputSnippet(\n inputInfo, outShape, isVec4, isFlatDispatchLayout);\n }\n\n return res;\n}\n\nfunction getInputAtCoordsSnippet(\n inputInfo: InputInfo, isVec4: boolean): string {\n const texName = inputInfo.name;\n const rank = inputInfo.shape.length;\n const type = getCoordsDataType(rank);\n const funcName = 'get' + texName.charAt(0).toUpperCase() + texName.slice(1);\n const dims = ['d0', 'd1', 'd2', 'd3', 'd4', 'd5'].slice(0, rank);\n const inputs = dims.map(d => `${d} : i32`).join(', ');\n\n if (rank < 1) {\n if (isVec4) {\n return `\n fn ${funcName}() -> vec4 {\n return vec4(${texName}[0]);\n }\n `;\n }\n\n return `\n fn ${funcName}() ->f32 {\n return f32(${texName}[0]);\n }\n `;\n }\n\n const shapeStr =\n `uniforms.${texName.charAt(0).toLowerCase() + texName.slice(1)}Shape`;\n let rankStr = `${rank}D`;\n if (rank === 0) {\n rankStr = '1D';\n }\n\n if (isVec4) {\n return `\n fn ${funcName}(${inputs}) -> vec4 {\n return vec4(${texName}[getIndexFromCoords${rankStr}(${type}(${\n dims.join(',')}),\n ${shapeStr}) / 4]);\n }\n `;\n }\n\n return `\n fn ${funcName}(${inputs}) -> f32 {\n return f32(${texName}[getIndexFromCoords${rankStr}(${type}(${\n dims.join(',')}),\n ${shapeStr})]);\n }\n `;\n}\n\nexport function getInputByOutputSnippet(\n inputInfo: InputInfo, outShape: number[], isVec4: boolean,\n isFlatDispatchLayout: boolean): string {\n const texName = inputInfo.name;\n const texFuncSnippet = texName.charAt(0).toUpperCase() + texName.slice(1);\n\n const funcName = 'get' + texFuncSnippet + 'ByOutput';\n\n const inRank = inputInfo.shape.length;\n const outRank = outShape.length;\n const type = getCoordsDataType(outRank);\n\n // If the inShape equals the outShape and the dispatch layout is flat, we can\n // directly use |gl_GlobalInvocationID.x| as the index and don't need coords\n // conversion between these two shapes.\n if (util.arraysEqual(inputInfo.shape, outShape) && isFlatDispatchLayout) {\n if (isVec4) {\n return `\n fn ${funcName}Index(globalIndex : i32) -> vec4 {\n return vec4(${texName}[globalIndex]);\n }\n\n fn ${funcName}Coords(coords : ${type}) -> vec4 {\n return vec4(${texName}[${\n outRank > 1 ? 'getOutputIndexFromCoords(coords)' : 'coords'} / 4]);\n }\n `;\n } else {\n return `\n fn ${funcName}Index(globalIndex : i32) -> f32 {\n return f32(${texName}[globalIndex]);\n }\n\n fn ${funcName}Coords(coords : ${type}) -> f32 {\n return f32(${texName}[${\n outRank > 1 ? 'getOutputIndexFromCoords(coords)' : 'coords'}]);\n }\n `;\n }\n }\n\n const broadcastDims =\n backend_util.getBroadcastDims(inputInfo.shape, outShape);\n const rankDiff = outRank - inRank;\n\n let coordsSnippet = '';\n\n if (inRank === 0) {\n if (isVec4) {\n return `\n fn ${funcName}Index(globalIndex : i32) -> vec4 {\n return get${texFuncSnippet}();\n }\n\n fn ${funcName}Coords(coords : ${type}) -> vec4 {\n return get${texFuncSnippet}();\n }\n `;\n }\n return `\n fn ${funcName}Index(globalIndex : i32) -> f32{\n return get${texFuncSnippet}();\n }\n\n fn ${funcName}Coords(coords : ${type}) -> f32{\n return get${texFuncSnippet}();\n }\n `;\n } else {\n if (outRank < 2 && broadcastDims.length >= 1) {\n coordsSnippet = 'coords = 0;';\n } else {\n coordsSnippet =\n broadcastDims.map(d => `coords.${getCoordsXYZ(d + rankDiff)} = 0;`)\n .join('\\n');\n }\n }\n\n let unpackedCoordsSnippet = '';\n if (outRank < 2 && inRank > 0) {\n unpackedCoordsSnippet = 'coords';\n } else {\n if (outRank > 1) {\n const coordsType = getCoordsDataType(inRank);\n const coordsValues =\n inputInfo.shape.map((s, i) => `coords.${getCoordsXYZ(i + rankDiff)}`)\n .join(', ');\n unpackedCoordsSnippet = `${coordsType}(${coordsValues})`;\n } else {\n unpackedCoordsSnippet = 'coords';\n }\n }\n\n const shapeStr =\n `uniforms.${texName.charAt(0).toLowerCase() + texName.slice(1)}Shape`;\n const rankStr = `${inRank}D`;\n if (isVec4) {\n return `\n fn ${funcName}Index(globalIndex : i32) -> vec4 {\n var coords = getCoordsFromIndex(globalIndex);\n ${coordsSnippet}\n return ${texName}[getIndexFromCoords${rankStr}(${\n unpackedCoordsSnippet}, ${shapeStr}) / 4];\n }\n\n fn ${funcName}Coords(coordsIn : ${type}) -> vec4 {\n var coords = coordsIn;\n ${coordsSnippet}\n return ${texName}[getIndexFromCoords${rankStr}(${\n unpackedCoordsSnippet}, ${shapeStr}) / 4];\n }\n `;\n }\n\n return `\n fn ${funcName}Index(globalIndex : i32) -> f32 {\n var coords = getCoordsFromIndex(globalIndex);\n ${coordsSnippet}\n return f32(${texName}[getIndexFromCoords${rankStr}(${\n unpackedCoordsSnippet}, ${shapeStr})]);\n }\n\n fn ${funcName}Coords(coordsIn : ${type}) -> f32 {\n var coords = coordsIn;\n ${coordsSnippet}\n return f32(${texName}[getIndexFromCoords${rankStr}(${\n unpackedCoordsSnippet}, ${shapeStr})]);\n }\n `;\n}\n\n/**\n * Generates getOutputCoords() function that computes output coordinates from\n * dispatch geometry to reduce arithmetic.\n */\nexport function getOutputCoordsSnippet(\n outShape: number[],\n dispatchLayout: {x: number[], y?: number[], z?: number[]}):\n [string, number] {\n const {x, y = [], z = []} = dispatchLayout;\n\n const outRank = outShape.length;\n if (x.length === outRank) {\n const dtype = getCoordsDataType(outRank);\n const snippet = `fn getOutputCoords() -> ${dtype}{\n let globalIndex = getGlobalIndex();\n return getCoordsFromIndex(globalIndex);\n }\n `;\n return [snippet, outRank];\n }\n\n let gatherDimensionsStr = '';\n const dims = [x, y, z];\n\n let rank = 0;\n\n for (let i = 0; i < dims.length; i++) {\n const arr = dims[i];\n\n if (arr.length === 0) {\n continue;\n }\n\n rank += arr.length;\n\n if (arr.length === 1) {\n gatherDimensionsStr += `let d${arr[0]} = i32(globalId[${i}]);`;\n } else {\n const strides = symbolicallyComputeStrides(arr, 'uniforms.outShape');\n gatherDimensionsStr += `var index${i} = i32(globalId[${i}]);`;\n for (let j = 0; j < strides.length; j++) {\n gatherDimensionsStr += `let d${arr[j]} = index${i} / ${strides[j]};`;\n\n if (j === strides.length - 1) {\n gatherDimensionsStr += `let d${arr[j + 1]} = ` +\n `index${i} - d${arr[j]} * ${strides[j]};`;\n } else {\n gatherDimensionsStr +=\n `index${i} = index${i} - d${arr[j]} * ${strides[j]};`;\n }\n }\n }\n }\n\n const dimensions = [];\n for (let i = 0; i < rank; i++) {\n dimensions.push(`d${i}`);\n }\n\n const dtype = getCoordsDataType(rank);\n let snippet = `fn getOutputCoords() -> ${dtype} {\n ${gatherDimensionsStr}\n `;\n if (dimensions.length === 0) {\n snippet += `return ${dtype}(0); }`;\n } else {\n snippet += `return ${dtype}(${dimensions.join(',')}); }`;\n }\n\n return [snippet, rank];\n}\n\n/**\n * Derives logical coordinates from a flat index. Performs integer division\n * with each stride and decrements the index until the index equals the final\n * dimension coordinate.\n */\nfunction getCoordsFromIndexSnippet(shape: number[]): string {\n const rank = shape.length;\n\n if (rank <= 1) {\n return `fn getCoordsFromIndex(index : i32) -> i32 { return index; }`;\n }\n\n const strides = util.computeStrides(shape);\n const dtype = getCoordsDataType(rank);\n\n const coords: string[] = [];\n for (let i = 0; i < rank; i++) {\n coords.push(`d${i}`);\n }\n\n if (strides.length === 1) {\n return ` fn getCoordsFromIndex(index : i32) -> vec2 {\n let d0 = index / uniforms.outShapeStrides; let d1 = index - d0 * uniforms.outShapeStrides;\n return vec2(d0, d1);\n }`;\n }\n let snippet;\n snippet = 'var index2 = index;' +\n strides\n .map((_, i) => {\n const line1 =\n `let ${coords[i]} = index2 / uniforms.outShapeStrides.${\n getCoordsXYZ(i)}`;\n const line2 = i === strides.length - 1 ?\n `let ${coords[i + 1]} = index2 - ${\n coords[i]} * uniforms.outShapeStrides.${getCoordsXYZ(i)}` :\n `index2 = index2 - ${coords[i]} * uniforms.outShapeStrides.${\n getCoordsXYZ(i)}`;\n return `${line1}; ${line2};`;\n })\n .join('');\n\n return `\n fn getCoordsFromIndex(index : i32) -> ${dtype} {\n ${snippet}\n return ${dtype}(${coords.join(',')});\n }\n `;\n}\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {DataType} from '@tensorflow/tfjs-core';\n\nconst arrayProduct = (arr: number[]) => {\n let product = 1;\n for (let i = 0; i < arr.length; i++) {\n product *= arr[i];\n }\n return product;\n};\n\nexport function tilesFitEvenlyIntoShape(\n tileSize: number[], shape: number[]): boolean {\n if (tileSize.length !== shape.length) {\n throw new Error(\n `Cannot compute whether rank ${tileSize.length}` +\n ` tiles fit evenly into rank ${shape.length} shape` +\n ` - ranks must match.`);\n }\n return shape.every(\n (dim: number, dimIdx: number) => dim % tileSize[dimIdx] === 0);\n}\n\n// Computes dispatch geometry based on layout of output dimensions and\n// workGroupSize.\nexport function computeDispatch(\n layout: {x: number[], y?: number[], z?: number[]}, outputShape: number[],\n workGroupSize: [number, number, number] = [1, 1, 1],\n elementsPerThread: [number, number, number] =\n [1, 1, 1]): [number, number, number] {\n const [dispatchX, dispatchY, dispatchZ] = [\n Math.ceil(\n arrayProduct(layout.x.map(d => outputShape[d])) /\n (workGroupSize[0] * elementsPerThread[0])),\n layout.y ? Math.ceil(\n arrayProduct(layout.y.map(d => outputShape[d])) /\n (workGroupSize[1] * elementsPerThread[1])) :\n 1,\n layout.z ? Math.ceil(\n arrayProduct(layout.z.map(d => outputShape[d])) /\n (workGroupSize[2] * elementsPerThread[2])) :\n 1\n ];\n return [dispatchX, dispatchY, dispatchZ];\n}\n\nexport function computeWorkGroupSizeForConv2d(\n layout: {x: number[], y?: number[], z?: number[]},\n outputShape: number[]): [number, number, number] {\n const dim0 = arrayProduct(layout.x.map(d => outputShape[d]));\n const dim1 = arrayProduct(layout.y.map(d => outputShape[d]));\n // TODO(jiajia.qin@intel.com): More fine tune based on outputShape.\n // These are experimental values. Usually, we need to adjust the work group\n // size based on the output shape. For example, when one dimension is smaller\n // than 4, it will be wasteful if we assign a larger size for this dimension,\n // which results lots of threads doing useless work and reduces parallelism\n // of hardware threads. But it is always a balance between work group size\n // and shared memory. If one dimension is too small, such as 1, shared memory\n // will won't be fully utilized.\n if (dim0 <= 4) {\n return [4, 16, 1];\n }\n if (dim1 <= 4) {\n return [16, 4, 1];\n }\n\n return [16, 16, 1];\n}\n\nexport function computeWorkGroupSizeForMatMul(\n dimAOuter: number, dimInner: number,\n dimBOuter: number): [number, number, number] {\n // These are experimental values. Usually, we need to adjust the work group\n // size based on the input shapes to improve the EU occupancy.\n // TODO: WebGPU limits the maximum allowed shared memory size as 16K. To make\n // sure it doesn't exceed this limitations. Temporarily reduce the work group\n // size to [8, 8, 1] and the work per thread size is [4, 4, 1]. But we should\n // revisit it and find the balance between work group size and work per thread\n // size.\n if (dimAOuter === 1) {\n return [32, 1, 1];\n } else if (dimBOuter === 1) {\n return [1, 32, 1];\n }\n\n return [8, 8, 1];\n}\n\nexport function computeWorkPerThreadForConv2d(\n layout: {x: number[], y?: number[], z?: number[]},\n outputShape: number[]): [number, number, number] {\n const dim0 = arrayProduct(layout.x.map(d => outputShape[d]));\n const dim1 = arrayProduct(layout.y.map(d => outputShape[d]));\n // TODO(jiajia.qin@intel.com): More fine tune based on outputShape.\n // The following conditions correspond to the values set in\n // computeWorkGroupSizeForConv2d.\n if (dim0 <= 4) {\n return [1, 2, 1];\n }\n if (dim1 <= 4) {\n return [2, 1, 1];\n }\n\n return [2, 2, 1];\n}\n\nexport function flatDispatchLayout(shape: number[]) {\n return {x: shape.map((d, i) => i)};\n}\n\nexport function GPUBytesPerElement(dtype: DataType): number {\n if (dtype === 'float32' || dtype === 'int32' || dtype === 'bool' ||\n dtype === 'string') {\n return 4;\n } else if (dtype === 'complex64') {\n return 8;\n } else {\n throw new Error(`Unknown dtype ${dtype}`);\n }\n}\n\nexport function ArrayBufferToTypedArray(data: ArrayBuffer, dtype: DataType) {\n if (dtype === 'float32') {\n return new Float32Array(data);\n } else if (dtype === 'int32') {\n return new Int32Array(data);\n } else if (dtype === 'bool' || dtype === 'string') {\n return Uint8Array.from(new Int32Array(data));\n } else {\n throw new Error(`Unknown dtype ${dtype}`);\n }\n}\n\nexport function isWebGPUSupported(): boolean {\n return ((typeof window !== 'undefined') ||\n //@ts-ignore\n (typeof WorkerGlobalScope !== 'undefined')) && !!navigator.gpu;\n}\n\nexport interface WebGPULayout {\n bindGroupLayout: GPUBindGroupLayout;\n pipelineLayout: GPUPipelineLayout;\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {mapActivationToShaderProgram} from './activation_util';\nimport {getMainHeaderString} from './shader_preprocessor';\nimport {WebGPUProgram} from './webgpu_program';\nimport {computeDispatch, tilesFitEvenlyIntoShape} from './webgpu_util';\n\nexport function makeMatMulPackedVec4Source(\n workPerThread: number[], tileAOuter: number, tileBOuter: number,\n tileInner: number): string {\n util.assert(\n tileInner % 4 === 0 && workPerThread[0] === 4,\n () => 'tileInner must be divisible by 4. And ColPerThread must be 4');\n return `\n var mm_Asub : array, ${\n tileInner / workPerThread[0]}>, ${tileAOuter}>;\n var mm_Bsub : array, ${\n tileBOuter / workPerThread[0]}>, ${tileInner}>;\n\n let RowPerThread = ${workPerThread[1]};\n let ColPerThread = ${workPerThread[0]};\n let TileInner = ${tileInner};\n\n ${getMainHeaderString()}\n\n let tileRow = ${tileAOuter === 1 ? '0' : 'i32(localId.y) * RowPerThread'};\n let tileCol = i32(localId.x);\n\n let globalRow = ${\n tileAOuter === 1 ? '0' : 'i32(globalId.y) * RowPerThread'};\n let globalCol = i32(globalId.x);\n let numTiles = (uniforms.dimInner - 1) / TileInner + 1;\n\n var acc: array, RowPerThread>;\n var ACached : vec4;\n var BCached : array, 4>;\n\n // Loop over shared dimension.\n var globalColA = tileCol;\n let RowPerThreadB = TileInner / i32(workGroupSizeY);\n let tileRowB = i32(localId.y) * RowPerThreadB;\n for (var t = 0; t < numTiles; t = t + 1) {\n // Load one tile of A into local memory.\n for (var innerRow = 0; innerRow < RowPerThread; innerRow = innerRow + 1) {\n let inputRow = tileRow + innerRow;\n let inputCol = tileCol;\n mm_Asub[inputRow][inputCol] = mm_readA(globalRow + innerRow, globalColA, globalId);\n }\n globalColA = globalColA + TileInner / ColPerThread;\n\n // Load one tile of B into local memory.\n for (var innerRow = 0; innerRow < RowPerThreadB; innerRow = innerRow + 1) {\n let inputRow = tileRowB + innerRow;\n let inputCol = tileCol;\n mm_Bsub[inputRow][inputCol] = mm_readB(t * TileInner + inputRow, globalCol, globalId);\n }\n\n workgroupBarrier();\n\n // Compute acc values for a single thread.\n for (var k = 0; k < TileInner / ColPerThread; k = k + 1) {\n BCached[0] = mm_Bsub[k * ColPerThread][tileCol];\n BCached[1] = mm_Bsub[k * ColPerThread + 1][tileCol];\n BCached[2] = mm_Bsub[k * ColPerThread + 2][tileCol];\n BCached[3] = mm_Bsub[k * ColPerThread + 3][tileCol];\n\n for (var i = 0; i < RowPerThread; i = i + 1) {\n ACached = mm_Asub[tileRow + i][k];\n acc[i] = BCached[0] * ACached.x + acc[i];\n acc[i] = BCached[1] * ACached.y + acc[i];\n acc[i] = BCached[2] * ACached.z + acc[i];\n acc[i] = BCached[3] * ACached.w + acc[i];\n }\n }\n\n workgroupBarrier();\n }\n\n for (var innerRow = 0; innerRow < RowPerThread; innerRow = innerRow + 1) {\n mm_write(globalRow + innerRow,\n globalCol,\n acc[innerRow], globalId);\n }\n }`;\n}\n\nexport class MatMulPackedVec4Program implements WebGPUProgram {\n outputShape: number[];\n shaderKey: string;\n dispatchLayout: {x: number[], y: number[], z: number[]};\n dispatch: [number, number, number];\n variableNames = ['A', 'B'];\n uniforms = `dimAOuter : i32, dimBOuter : i32, dimInner : i32,`;\n workGroupSize: [number, number, number] = [8, 8, 1];\n elementsPerThread: [number, number, number];\n isVec4 = true;\n aShape: [number, number, number];\n addBias: boolean;\n activation: backend_util.Activation;\n hasPreluActivationWeights: boolean;\n tileAOuter: number;\n tileBOuter: number;\n tileInner: number;\n fitA: boolean;\n fitB: boolean;\n batchAEqualOne: boolean;\n batchBEqualOne: boolean;\n\n constructor(\n aShape: [number, number, number], outputShape: [number, number, number],\n rowPerThread: number, batchAEqualOne: boolean, batchBEqualOne: boolean,\n bias: TensorInfo = null, activation: backend_util.Activation = null,\n preluActivationWeights: TensorInfo = null) {\n this.outputShape = outputShape;\n this.dispatchLayout = {x: [2], y: [1], z: [0]};\n // The first element in elementsPerThread must be 4.\n if (outputShape[1] === 1) {\n this.elementsPerThread = [4, 1, 1];\n } else {\n this.elementsPerThread = [4, 4, 1];\n }\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize,\n this.elementsPerThread);\n\n const addBias = bias != null;\n const hasPreluActivationWeights = preluActivationWeights != null;\n if (addBias) {\n this.variableNames.push('bias');\n }\n\n if (hasPreluActivationWeights) {\n this.variableNames.push('preluActivationWeights');\n }\n\n this.tileAOuter = outputShape[1] === 1 ?\n 1 :\n this.workGroupSize[1] * this.elementsPerThread[1];\n this.tileBOuter = this.workGroupSize[0] * this.elementsPerThread[0];\n this.tileInner = this.tileBOuter;\n\n this.aShape = aShape;\n this.addBias = addBias;\n this.activation = activation;\n this.hasPreluActivationWeights = hasPreluActivationWeights;\n this.batchAEqualOne = batchAEqualOne;\n this.batchBEqualOne = batchBEqualOne;\n\n [this.fitA, this.fitB] = this.getShapeFit();\n\n this.shaderKey = `matMulPackedVec4_${this.activation}_${this.fitA}_${\n this.fitB}_${this.elementsPerThread}_${this.batchAEqualOne}_${\n this.batchBEqualOne}`;\n }\n\n getShapeFit(): boolean[] {\n const dimInner = this.aShape[2];\n const dimBOuter = this.outputShape[2];\n const bShape = [this.outputShape[0], dimInner, dimBOuter];\n\n const tileSizeA = [this.tileAOuter, this.tileInner];\n const tileSizeB = [this.tileInner, this.tileBOuter];\n return [\n tilesFitEvenlyIntoShape(tileSizeA, this.aShape.slice(1)),\n tilesFitEvenlyIntoShape(tileSizeB, bShape.slice(1))\n ];\n }\n\n getUserCode(): string {\n const sampleA = this.fitA ?\n `return A[batch * batchASize + row * uniforms.dimInner / 4 + col]` :\n `if (coordsInBounds2D(vec2(row, col * 4), vec2(uniforms.dimAOuter, uniforms.dimInner))) {\n return A[batch * batchASize + row * uniforms.dimInner / 4 + col];\n }\n return vec4(0.0)`;\n\n const sampleB = this.fitB ?\n `return B[batch * batchBSize + row * uniforms.dimBOuter / 4 + col]` :\n `if(coordsInBounds2D(vec2(row, col * 4), vec2(uniforms.dimInner, uniforms.dimBOuter))) {\n return B[batch * batchBSize + row * uniforms.dimBOuter / 4 + col];\n }\n return vec4(0.0)`;\n\n let activationSnippet = '', applyActivationSnippet = '';\n if (this.activation) {\n const activationOp =\n mapActivationToShaderProgram(this.activation, this.isVec4);\n if (this.hasPreluActivationWeights) {\n activationSnippet =\n `fn activation(a : vec4, outCoord : vec3) -> vec4 {\n let b = getPreluActivationWeightsByOutputCoords(outCoord);\n ${activationOp}\n }`;\n } else {\n activationSnippet = `\n fn activation(a : vec4, outCoord : vec3) -> vec4 {\n ${activationOp}\n }`;\n }\n\n applyActivationSnippet = 'value = activation(value, outCoord);';\n }\n const addBiasSnippet =\n this.addBias ? 'value = value + getBiasByOutputCoords(outCoord);' : '';\n\n const userCode = `\n ${activationSnippet}\n fn mm_readA(row : i32, col : i32, globalId : vec3) -> vec4 {\n ${\n this.batchAEqualOne ? `\n let batchASize = 0;\n let batch = 0;\n ` :\n `\n let batchASize = uniforms.aShape[1] * uniforms.aShape[2] / 4;\n let batch = i32(globalId.z);\n `}\n\n ${sampleA};\n }\n\n fn mm_readB(row : i32, col : i32, globalId : vec3) -> vec4 {\n ${\n this.batchBEqualOne ? `\n let batchBSize = 0;\n let batch = 0;\n ` :\n `\n let batchBSize = uniforms.bShape[1] * uniforms.bShape[2] / 4;\n let batch = i32(globalId.z);\n `}\n ${sampleB};\n }\n\n fn mm_write(row : i32, col : i32, valueIn : vec4, globalId : vec3) {\n if (row < uniforms.aShape[1] && col * 4 < uniforms.bShape[2])\n {\n var value = valueIn;\n let batch = i32(globalId.z);\n let outCoord = vec3(batch, row, col * 4);\n ${addBiasSnippet}\n ${applyActivationSnippet}\n setOutputAtCoords(outCoord[0], outCoord[1], outCoord[2], value);\n }\n }\n ${\n makeMatMulPackedVec4Source(\n this.elementsPerThread, this.tileAOuter, this.tileBOuter,\n this.tileInner)}\n `;\n\n return userCode;\n }\n}\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {mapActivationToShaderProgram} from './activation_util';\nimport {getMainHeaderString} from './shader_preprocessor';\nimport {WebGPUProgram} from './webgpu_program';\nimport {computeDispatch, computeWorkGroupSizeForMatMul, tilesFitEvenlyIntoShape} from './webgpu_util';\n\nexport function makeMatMulPackedSource(\n workPerThread: number[], workGroupSize: [number, number, number]): string {\n const tileAOuter = workGroupSize[1] * workPerThread[1];\n const tileBOuter = workGroupSize[0] * workPerThread[0];\n const tileInner = tileAOuter > tileBOuter ? tileAOuter : tileBOuter;\n return `\n var mm_Asub : array, ${tileAOuter}>;\n var mm_Bsub : array, ${tileInner}>;\n ${getMainHeaderString()}\n let tileRow = i32(localId.y) * ${workPerThread[1]};\n let tileCol = i32(localId.x) * ${workPerThread[0]};\n\n let globalRow = i32(globalId.y) * ${workPerThread[1]};\n let globalCol = i32(globalId.x) * ${workPerThread[0]};\n\n let numTiles = (uniforms.dimInner - 1) / ${tileInner} + 1;\n\n var acc : array, ${workPerThread[1]}>;\n var ACached : f32;\n var BCached : array;\n\n // Without this initialization strange values show up in acc.\n for (var innerRow = 0; innerRow < ${\n workPerThread[1]}; innerRow = innerRow + 1) {\n for (var innerCol = 0; innerCol < ${\n workPerThread[0]}; innerCol = innerCol + 1) {\n acc[innerRow][innerCol] = 0.0;\n }\n }\n\n let ColPerThreadA = ${tileInner} / ${workGroupSize[0]};\n let tileColA = i32(localId.x) * ColPerThreadA;\n let RowPerThreadB = ${tileInner} / ${workGroupSize[1]};\n let tileRowB = i32(localId.y) * RowPerThreadB;\n\n // Loop over shared dimension.\n for (var t = 0; t < numTiles; t = t + 1) {\n // Load one tile of A into local memory.\n for (var innerRow = 0; innerRow < ${\n workPerThread[1]}; innerRow = innerRow + 1) {\n for (var innerCol = 0; innerCol < ColPerThreadA; innerCol = innerCol + 1) {\n let inputRow = tileRow + innerRow;\n let inputCol = tileColA + innerCol;\n\n mm_Asub[inputRow][inputCol] = mm_readA(\n globalRow + innerRow,\n t * ${tileInner} + inputCol, globalId);\n }\n }\n // Load one tile of B into local memory.\n for (var innerRow = 0; innerRow < RowPerThreadB; innerRow = innerRow + 1) {\n for (var innerCol = 0; innerCol < ${\n workPerThread[0]}; innerCol = innerCol + 1) {\n let inputRow = tileRowB + innerRow;\n let inputCol = tileCol + innerCol;\n\n mm_Bsub[inputRow][inputCol] = mm_readB(\n t * ${tileInner} + inputRow,\n globalCol + innerCol, globalId);\n }\n }\n\n workgroupBarrier();\n\n // Compute acc values for a single thread.\n for (var k = 0; k < ${tileInner}; k = k + 1) {\n for (var inner = 0; inner < ${workPerThread[0]}; inner = inner + 1) {\n BCached[inner] = mm_Bsub[k][tileCol + inner];\n }\n\n for (var innerRow = 0; innerRow < ${\n workPerThread[1]}; innerRow = innerRow + 1) {\n ACached = mm_Asub[tileRow + innerRow][k];\n for (var innerCol = 0; innerCol < ${\n workPerThread[0]}; innerCol = innerCol + 1) {\n acc[innerRow][innerCol] = acc[innerRow][innerCol] + ACached * BCached[innerCol];\n }\n }\n }\n\n workgroupBarrier();\n }\n\n for (var innerRow = 0; innerRow < ${\n workPerThread[1]}; innerRow = innerRow + 1) {\n for (var innerCol = 0; innerCol < ${\n workPerThread[0]}; innerCol = innerCol + 1) {\n\n if ((globalCol + innerCol) < uniforms.dimBOuter &&\n (globalRow + innerRow) < uniforms.dimAOuter) {\n mm_write(globalRow + innerRow,\n globalCol + innerCol,\n acc[innerRow][innerCol], globalId);\n }\n }\n }\n }\n `;\n}\n\nexport function makeMatMulVectorSource(workGroupSize: [number, number, number]):\n string {\n return `\n let TileSize = ${workGroupSize[0] * 4};\n var mm_Asub : array, ${workGroupSize[0]}>;\n\n ${getMainHeaderString()}\n let tileCol = i32(localId.x);\n let globalCol = i32(globalId.x);\n let globalRow = i32(globalId.y);\n\n let numTiles = (uniforms.dimInner - 1) / TileSize + 1;\n\n // Without this initialization strange values show up in acc.\n var acc = 0.0;\n\n // Loop over shared dimension.\n for (var t = 0; t < numTiles; t = t + 1) {\n // Load one tile of A into local memory.\n let colA = t * TileSize + tileCol * 4;\n mm_Asub[tileCol] = vec4(mm_readA(globalRow, colA, globalId),\n mm_readA(globalRow, colA + 1, globalId),\n mm_readA(globalRow, colA + 2, globalId),\n mm_readA(globalRow, colA + 3, globalId));\n workgroupBarrier();\n\n // Compute acc values for a single thread.\n for (var k = 0; k < TileSize / 4; k = k + 1) {\n let rowB = t * TileSize + k * 4;\n let BCached = vec4(mm_readB(rowB, globalCol, globalId),\n mm_readB(rowB + 1, globalCol, globalId),\n mm_readB(rowB + 2, globalCol, globalId),\n mm_readB(rowB + 3, globalCol, globalId));\n\n let ACached = mm_Asub[k];\n acc = acc + dot(ACached, BCached);\n }\n\n workgroupBarrier();\n }\n\n if (globalRow < uniforms.dimAOuter && globalCol < uniforms.dimBOuter) {\n mm_write(globalRow, globalCol, acc, globalId);\n }\n }\n `;\n}\n\nexport class MatMulPackedProgram implements WebGPUProgram {\n outputShape: number[];\n shaderKey: string;\n dispatchLayout: {x: number[], y: number[], z: number[]};\n dispatch: [number, number, number];\n workPerThread: number;\n variableNames = ['A', 'B'];\n uniforms = `dimAOuter : i32, dimBOuter : i32, dimInner : i32,`;\n workGroupSize: [number, number, number] = [16, 16, 1];\n aShape: [number, number, number];\n transposeA: boolean;\n transposeB: boolean;\n addBias: boolean;\n activation: backend_util.Activation;\n hasPreluActivationWeights: boolean;\n fitA: boolean;\n fitB: boolean;\n batchAEqualOne: boolean;\n batchBEqualOne: boolean;\n\n constructor(\n aShape: [number, number, number], outputShape: [number, number, number],\n workPerThread: number, batchAEqualOne: boolean, batchBEqualOne: boolean,\n transposeA = false, transposeB = false, bias: TensorInfo = null,\n activation: backend_util.Activation = null,\n preluActivationWeights: TensorInfo = null) {\n this.outputShape = outputShape;\n this.dispatchLayout = {x: [2], y: [1], z: [0]};\n const dimInner = transposeA ? aShape[1] : aShape[2];\n this.workGroupSize =\n computeWorkGroupSizeForMatMul(outputShape[1], dimInner, outputShape[2]);\n if (outputShape[1] === 1 || outputShape[2] === 1) {\n workPerThread = 1;\n }\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize,\n [workPerThread, workPerThread, 1]);\n // If dispaching number is one, it means only one work group is running.\n // For modern GPUs, it supports multiple work groups running in parallel.\n // So there may be some idle hardware threads.\n // In this case, we prefer to reduce the work per thread and improve the\n // thread utilization\n if (util.arraysEqual(this.dispatch, [1, 1, 1])) {\n workPerThread = 1;\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize,\n [workPerThread, workPerThread, 1]);\n }\n const addBias = bias != null;\n const hasPreluActivationWeights = preluActivationWeights != null;\n if (addBias) {\n this.variableNames.push('bias');\n }\n\n if (hasPreluActivationWeights) {\n this.variableNames.push('preluActivationWeights');\n }\n\n this.workPerThread = workPerThread;\n this.aShape = aShape;\n this.transposeA = transposeA;\n this.transposeB = transposeB;\n this.addBias = addBias;\n this.activation = activation;\n this.hasPreluActivationWeights = hasPreluActivationWeights;\n this.batchAEqualOne = batchAEqualOne;\n this.batchBEqualOne = batchBEqualOne;\n\n const dimBOuter = this.outputShape[2];\n const bShape = this.transposeB ?\n [this.outputShape[0], dimBOuter, dimInner] :\n [this.outputShape[0], dimInner, dimBOuter];\n\n [this.fitA, this.fitB] = this.getShapeFit(bShape);\n this.shaderKey = `matMulPacked_${this.workPerThread}_${transposeA}_${\n transposeB}_${this.activation}_${this.fitA}_${this.fitB}_${\n this.outputShape[1] > 1}_${this.batchAEqualOne}_${this.batchBEqualOne}`;\n }\n\n getShapeFit(bShape: number[]): boolean[] {\n const tileAOuter = this.workGroupSize[1] * this.workPerThread;\n const tileBOuter = this.workGroupSize[0] * this.workPerThread;\n let tileInner = tileAOuter > tileBOuter ? tileAOuter : tileBOuter;\n if (this.outputShape[1] === 1) {\n tileInner *= 4;\n }\n util.assert(\n tileInner % this.workGroupSize[0] === 0 &&\n tileInner % this.workGroupSize[1] === 0,\n () => `tileInner must be multiple of workgroupsize.x ` +\n `and workgroupsize.y`);\n const tileSizeA = [tileAOuter, tileInner];\n const tileSizeB = [tileInner, tileBOuter];\n\n return [\n tilesFitEvenlyIntoShape(tileSizeA, this.aShape.slice(1)),\n tilesFitEvenlyIntoShape(tileSizeB, bShape.slice(1))\n ];\n }\n\n getUserCode(): string {\n let sampleA;\n\n if (this.transposeA === false) {\n sampleA = this.fitA ?\n `return A[batch * batchASize + row * uniforms.dimInner + col];` :\n `if(coordsInBounds2D(vec2(row, col), vec2(uniforms.dimAOuter, uniforms.dimInner))) {\n return A[batch * batchASize + row * uniforms.dimInner + col];\n }\n return 0.0;`;\n } else {\n sampleA = this.fitA ?\n `return A[batch * batchASize + col * uniforms.dimAOuter + row];` :\n `if(coordsInBounds2D(vec2(row, col), vec2(uniforms.dimAOuter, uniforms.dimInner))) {\n return A[batch* batchASize + col * uniforms.dimAOuter + row];\n }\n return 0.0;`;\n }\n\n let sampleB;\n if (this.transposeB === false) {\n sampleB = this.fitB ?\n `return B[batch * batchBSize + row * uniforms.dimBOuter + col];` :\n `if(coordsInBounds2D(vec2(row, col), vec2(uniforms.dimInner, uniforms.dimBOuter))) {\n return B[batch * batchBSize + row * uniforms.dimBOuter + col];\n }\n return 0.0;`;\n } else {\n sampleB = this.fitB ?\n `return B[batch * batchBSize + col * uniforms.dimInner + row];` :\n `if(coordsInBounds2D(vec2(row, col), vec2(uniforms.dimInner, uniforms.dimBOuter))) {\n return B[batch * batchBSize + col * uniforms.dimInner + row];\n }\n return 0.0;`;\n }\n\n let activationSnippet = '', applyActivationSnippet = '';\n if (this.activation) {\n const activationOp = mapActivationToShaderProgram(this.activation, false);\n if (this.hasPreluActivationWeights) {\n activationSnippet =\n `fn activation(a : f32, outCoord : vec3) -> f32 {\n let b = getPreluActivationWeightsByOutputCoords(outCoord);\n ${activationOp}\n }`;\n } else {\n activationSnippet = `\n fn activation(a : f32, outCoord : vec3) -> f32 {\n ${activationOp}\n }\n `;\n }\n\n applyActivationSnippet = 'value = activation(value, outCoord);';\n }\n\n const addBiasSnippet =\n this.addBias ? 'value = value + getBiasByOutputCoords(outCoord);' : '';\n\n const userCode = `\n ${activationSnippet}\n\n fn mm_readA(row : i32, col : i32, globalId : vec3) -> f32 {\n ${\n this.batchAEqualOne ? `\n let batch = 0;\n let batchASize = 0;\n ` :\n `\n let batch = i32(globalId.z);\n let batchASize = uniforms.aShape[1] * uniforms.aShape[2];\n `}\n ${sampleA}\n }\n\n fn mm_readB(row : i32, col : i32, globalId : vec3) -> f32 {\n ${\n this.batchBEqualOne ? `\n let batch = 0;\n let batchBSize = 0;\n ` :\n `\n let batch = i32(globalId.z);\n let batchBSize = uniforms.bShape[1] * uniforms.bShape[2];\n `}\n ${sampleB}\n }\n\n fn mm_write(row : i32, col : i32, valueIn : f32, globalId : vec3) {\n var value = valueIn;\n let batch = i32(globalId.z);\n let outCoord = vec3(batch, row, col);\n ${addBiasSnippet}\n ${applyActivationSnippet}\n setOutputAtCoords(batch, row, col, value);\n }\n ${\n this.outputShape[1] > 1 ?\n makeMatMulPackedSource(\n [this.workPerThread, this.workPerThread, 1],\n this.workGroupSize) :\n makeMatMulVectorSource(this.workGroupSize)}\n `;\n return userCode;\n }\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {mapActivationToShaderProgram} from './activation_util';\nimport {getMainHeaderString} from './shader_preprocessor';\nimport {WebGPUProgram} from './webgpu_program';\nimport {computeDispatch} from './webgpu_util';\n\nexport function makeMatMulReduceSource(): string {\n return `\n var sumValues : array;\n ${getMainHeaderString()}\n let coords = getOutputCoords();\n let batch = coords[0];\n let row = coords[1];\n let col = coords[2];\n var sum = 0.0;\n let Length = uniforms.dimInner;\n for (var k = i32(localId.x); k < Length; k = k + i32(workGroupSizeX)) {\n let dataA = mm_readA(batch, row, k);\n let dataB = mm_readB(batch, k, col);\n sum = sum + dataA * dataB;\n }\n sumValues[localId.x] = sum;\n workgroupBarrier();\n\n for(var currentSize = workGroupSizeX / 2u; currentSize > 1u;\n currentSize = currentSize / 2u) {\n if (localId.x < currentSize)\n {\n sumValues[localId.x] = sumValues[localId.x] + sumValues[localId.x + currentSize];\n }\n workgroupBarrier();\n }\n\n if (localId.x == 0u) {\n sum = sumValues[0] + sumValues[1];\n mm_write(batch, row, col, sum);\n }\n }\n `;\n}\n\nexport class MatMulReduceProgram implements WebGPUProgram {\n outputShape: number[];\n shaderKey: string;\n dispatchLayout: {x: number[], y: number[], z: number[]};\n dispatch: [number, number, number];\n variableNames = ['A', 'B'];\n uniforms = `dimAOuter : i32, dimBOuter : i32, dimInner : i32,`;\n workGroupSize: [number, number, number] = [256, 1, 1];\n transposeA: boolean;\n transposeB: boolean;\n addBias: boolean;\n activation: backend_util.Activation;\n hasPreluActivationWeights: boolean;\n batchAEqualOne: boolean;\n batchBEqualOne: boolean;\n\n constructor(\n outputShape: [number, number, number], batchAEqualOne: boolean,\n batchBEqualOne: boolean, transposeA = false, transposeB = false,\n bias: TensorInfo = null, activation: backend_util.Activation = null,\n preluActivationWeights: TensorInfo = null) {\n this.outputShape = outputShape;\n this.dispatchLayout = {x: [], y: [1, 2], z: [0]};\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize);\n\n const addBias = bias != null;\n const hasPreluActivationWeights = preluActivationWeights != null;\n if (addBias) {\n this.variableNames.push('bias');\n }\n\n if (hasPreluActivationWeights) {\n this.variableNames.push('preluActivationWeights');\n }\n\n this.transposeA = transposeA;\n this.transposeB = transposeB;\n this.addBias = addBias;\n this.activation = activation;\n this.hasPreluActivationWeights = hasPreluActivationWeights;\n this.batchAEqualOne = batchAEqualOne;\n this.batchBEqualOne = batchBEqualOne;\n this.shaderKey = `matMulReduce_${this.activation}_${transposeA}_${\n transposeB}_${this.batchAEqualOne}_${this.batchBEqualOne}`;\n }\n\n getUserCode(): string {\n let sampleA;\n if (this.transposeA === false) {\n sampleA =\n `return f32(A[batch * batchASize + row * uniforms.dimInner + col]);`;\n } else {\n sampleA =\n `return f32(A[batch * batchASize + col * uniforms.dimAOuter + row]);`;\n }\n\n let sampleB;\n if (this.transposeB === false) {\n sampleB =\n `return f32(B[batch * batchBSize + row * uniforms.dimBOuter + col]);`;\n } else {\n sampleB =\n `return f32(B[batch * batchBSize + col * uniforms.dimInner + row]);`;\n }\n\n let activationSnippet = '', applyActivationSnippet = '';\n if (this.activation) {\n const activationOp = mapActivationToShaderProgram(this.activation, false);\n if (this.hasPreluActivationWeights) {\n activationSnippet =\n `fn activation(a : f32, outCoord : vec3) -> f32 {\n let b = getPreluActivationWeightsByOutputCoords(outCoord);\n ${activationOp}\n }`;\n } else {\n activationSnippet = `\n fn activation(a : f32, outCoord : vec3) -> f32 {\n ${activationOp}\n }\n `;\n }\n\n applyActivationSnippet = 'value = activation(value, outCoord);';\n }\n\n const addBiasSnippet =\n this.addBias ? 'value = value + getBiasByOutputCoords(outCoord);' : '';\n\n const userCode = `\n ${activationSnippet}\n\n fn mm_readA(batchIn: i32, row : i32, col : i32) -> f32 {\n ${\n this.batchAEqualOne ? `\n let batchASize = 0;\n let batch = 0;\n ` :\n `\n let batchASize = uniforms.aShape[1] * uniforms.aShape[2];\n let batch = batchIn;\n `}\n ${sampleA}\n }\n\n fn mm_readB(batchIn: i32, row : i32, col : i32) -> f32 {\n ${\n this.batchBEqualOne ? `\n let batch = 0;\n let batchBSize = 0;\n ` :\n `\n let batch = batchIn;\n let batchBSize = uniforms.bShape[1] * uniforms.bShape[2];\n `}\n ${sampleB}\n }\n\n fn mm_write(batch: i32, row : i32, col : i32, valueIn : f32) {\n var value = valueIn;\n let outCoord = vec3(batch, row, col);\n ${addBiasSnippet}\n ${applyActivationSnippet}\n setOutputAtCoords(batch, row, col, value);\n }\n ${makeMatMulReduceSource()}\n `;\n return userCode;\n }\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {mapActivationToShaderProgram} from './activation_util';\nimport {getMainHeaderString} from './shader_preprocessor';\nimport {WebGPUProgram} from './webgpu_program';\n\nexport function makeMatMulSmallOutputSizeSource(\n workGroupSize: [number, number, number]): string {\n const tileAOuter = workGroupSize[1] / 2;\n const tileBOuter = workGroupSize[0];\n const tileInner = tileAOuter > tileBOuter ? tileAOuter : tileBOuter;\n return `\n var mm_Asub1 : array, ${tileAOuter}>;\n var mm_Bsub1 : array, ${tileInner}>;\n var mm_Asub2 : array, ${tileAOuter}>;\n var mm_Bsub2 : array, ${tileInner}>;\n\n // If the output size is small for matrix multiplication, avoid to use vec4\n // and handle some elements per thread to optimally utilize the ALU.\n // Introduces two shared memory buffers, some logical threads could handle\n // arithmetic operations and others handle IO operations between barrier api,\n // makes ALUs and load/store units work simultaneously, could improves\n // the performance.\n ${getMainHeaderString()}\n let tileRow = i32(localId.y);\n let tileCol = i32(localId.x);\n let globalRow = i32(globalId.y);\n let globalCol = i32(globalId.x);\n\n // uniforms.dimInner should be greater than 0.\n let numTiles = (uniforms.dimInner - 1) / ${tileInner} + 1;\n var acc = 0.0;\n\n var globalColA = tileCol;\n var globalRowB = tileRow;\n for (var t = 0; t < numTiles; t = t + 1) {\n if (t == 0) {\n if (tileRow < ${tileAOuter}) {\n // Load one tile of A and B into local memory.\n // globalRow is always greater than or equal tileRow.\n mm_Asub1[tileRow][tileCol] =\n mm_readA((globalRow - tileRow) / 2 + tileRow, globalColA, globalId);\n globalColA = globalColA + ${tileInner};\n mm_Bsub1[tileRow][tileCol] = mm_readB(globalRowB, globalCol, globalId);\n globalRowB = globalRowB + ${tileInner};\n }\n } else {\n if (tileRow < ${tileAOuter}) {\n // Load one tile of A and B into local memory.\n // globalRow is always greater than or equal tileRow.\n mm_Asub1[tileRow][tileCol] =\n mm_readA((globalRow - tileRow) / 2 + tileRow, globalColA, globalId);\n globalColA = globalColA + ${tileInner};\n mm_Bsub1[tileRow][tileCol] = mm_readB(globalRowB, globalCol, globalId);\n globalRowB = globalRowB + ${tileInner};\n } else {\n // Compute acc values for a single thread.\n for (var k = 0; k < ${tileInner}; k = k + 1) {\n let subRow = tileRow - ${tileAOuter};\n if (subRow < 0) {\n continue;\n }\n acc = acc + mm_Asub2[subRow][k] * mm_Bsub2[k][tileCol];\n }\n }\n }\n workgroupBarrier();\n if (t != 0) {\n t = t + 1;\n }\n\n if (t < numTiles) {\n if (tileRow < ${tileAOuter}) {\n // Load one tile of A and B into local memory.\n // globalRow is always greater than or equal tileRow.\n mm_Asub2[tileRow][tileCol] =\n mm_readA((globalRow - tileRow) / 2 + tileRow, globalColA, globalId);\n globalColA = globalColA + ${tileInner};\n mm_Bsub2[tileRow][tileCol] = mm_readB(globalRowB, globalCol, globalId);\n globalRowB = globalRowB + ${tileInner};\n } else {\n // Compute acc values for a single thread.\n for (var k = 0; k < ${tileInner}; k = k + 1) {\n let subRow = tileRow - ${tileAOuter};\n if (subRow < 0) {\n continue;\n }\n acc = acc + mm_Asub1[subRow][k] * mm_Bsub1[k][tileCol];\n }\n }\n }\n workgroupBarrier();\n }\n let writeCol = (globalRow - tileRow) / 2 + tileRow - ${tileAOuter};\n if (tileRow >= ${tileAOuter} && writeCol >= 0) {\n mm_write(writeCol, globalCol, acc, globalId);\n }\n }\n `;\n}\n\nexport class MatMulSmallOutputSizeProgram implements WebGPUProgram {\n outputShape: number[];\n shaderKey: string;\n dispatchLayout: {x: number[], y: number[], z: number[]};\n dispatch: [number, number, number];\n variableNames = ['A', 'B'];\n uniforms = `dimAOuter : i32, dimBOuter : i32, dimInner : i32,`;\n workGroupSize: [number, number, number] = [8, 16, 1];\n addBias: boolean;\n activation: backend_util.Activation;\n hasPreluActivationWeights: boolean;\n batchAEqualOne: boolean;\n batchBEqualOne: boolean;\n\n constructor(\n aShape: [number, number, number], bShape: [number, number, number],\n outputShape: [number, number, number], bias: TensorInfo = null,\n activation: backend_util.Activation = null,\n preluActivationWeights: TensorInfo = null) {\n util.assert(\n aShape[1] <= 16 || bShape[2] <= 16,\n () =>\n 'This program can be only used when A width or B Height are small');\n\n this.outputShape = outputShape;\n\n this.dispatchLayout = {x: [2], y: [1], z: [0]};\n this.dispatch = [\n Math.ceil(outputShape[2] / this.workGroupSize[0]),\n Math.ceil(outputShape[1] * 2 / this.workGroupSize[1]), outputShape[0]\n ];\n\n const addBias = bias != null;\n if (addBias) {\n this.variableNames.push('bias');\n }\n\n const hasPreluActivationWeights = preluActivationWeights != null;\n if (hasPreluActivationWeights) {\n this.variableNames.push('preluActivationWeights');\n }\n\n this.addBias = addBias;\n this.activation = activation;\n this.hasPreluActivationWeights = hasPreluActivationWeights;\n this.batchAEqualOne = aShape[0] === 1;\n this.batchBEqualOne = bShape[0] === 1;\n this.shaderKey = `matMulSmallOutputSize_${this.activation}_${\n this.batchAEqualOne}_${this.batchBEqualOne}`;\n }\n\n getUserCode(): string {\n const sampleA =\n `if (coordsInBounds2D(vec2(row, col), vec2(uniforms.dimAOuter, uniforms.dimInner))) {\n return A[batch * batchASize + row * uniforms.dimInner + col];\n }\n return 0.0;`;\n\n const sampleB =\n `if (coordsInBounds2D(vec2(row, col), vec2(uniforms.dimInner, uniforms.dimBOuter))) {\n return B[batch * batchBSize + row * uniforms.dimBOuter + col];\n }\n return 0.0;`;\n\n let activationSnippet = '', applyActivationSnippet = '';\n if (this.activation) {\n const activationOp = mapActivationToShaderProgram(this.activation, false);\n if (this.hasPreluActivationWeights) {\n activationSnippet =\n `fn activation(a : f32, outCoord : vec3) -> f32 {\n let b = getPreluActivationWeightsByOutputCoords(outCoord);\n ${activationOp}\n }`;\n } else {\n activationSnippet =\n `fn activation(a : f32, outCoord : vec3) -> f32 {\n ${activationOp}\n }`;\n }\n\n applyActivationSnippet = 'value = activation(value, outCoord);';\n }\n\n const addBiasSnippet =\n this.addBias ? 'value = value + getBiasByOutputCoords(outCoord);' : '';\n\n const userCode = `\n ${activationSnippet}\n\n fn mm_readA(row : i32, col : i32, globalId : vec3) -> f32 {\n ${\n this.batchAEqualOne ? `\n let batch = 0;\n let batchASize = 0;\n ` :\n `\n let batchASize = uniforms.aShape[1] * uniforms.aShape[2];\n let batch = i32(globalId.z);\n `}\n ${sampleA}\n }\n fn mm_readB(row : i32, col : i32, globalId : vec3) -> f32 {\n ${\n this.batchBEqualOne ? `\n let batch = 0;\n let batchBSize = 0;\n ` :\n `\n let batch = i32(globalId.z);\n let batchBSize = uniforms.bShape[1] * uniforms.bShape[2];\n `}\n ${sampleB}\n }\n fn mm_write(row : i32, col : i32, valueIn : f32, globalId : vec3) {\n if (coordsInBounds2D(vec2(row, col), vec2(uniforms.dimAOuter, uniforms.dimBOuter))) {\n let batch = i32(globalId.z);\n let outCoord = vec3(batch, row, col);\n var value = valueIn;\n ${addBiasSnippet}\n ${applyActivationSnippet}\n setOutputAtCoords(batch, row, col, value);\n }\n }\n ${makeMatMulSmallOutputSizeSource(this.workGroupSize)}\n `;\n return userCode;\n }\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, Reshape, ReshapeAttrs, ReshapeInputs, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\n\nexport function reshape(\n args: {inputs: ReshapeInputs, backend: WebGPUBackend, attrs: ReshapeAttrs}):\n TensorInfo {\n const {inputs, attrs} = args;\n const {x} = inputs;\n const {shape} = attrs;\n\n const xSize = util.sizeFromShape(x.shape);\n const $shape = util.inferFromImplicitShape(shape, xSize);\n const $xSize = util.sizeFromShape($shape);\n\n util.assert(\n xSize === $xSize,\n () => `The new shape (${$shape}) has ${$xSize} elements and the old ` +\n `shape (${x.shape}) has ${xSize} elements. The new shape and old ` +\n `shape must have the same number of elements.`);\n\n // Backend needs to track refCount for the dataId for reshape op\n args.backend.incRef(x.dataId);\n return {dataId: x.dataId, shape: $shape, dtype: x.dtype};\n}\n\nexport const reshapeConfig: KernelConfig = {\n kernelName: Reshape,\n backendName: 'webgpu',\n kernelFunc: reshape as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, broadcast_util, env, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {MatMulPackedVec4Program} from '../matmul_packed_vec4_webgpu';\nimport {MatMulPackedProgram} from '../matmul_packed_webgpu';\nimport {MatMulReduceProgram} from '../matmul_reduce';\nimport {MatMulSmallOutputSizeProgram} from '../matmul_small_output_size_webgpu';\nimport {WebGPUProgram} from '../webgpu_program';\n\nimport {reshape} from './Reshape';\n\ntype BatchMatMulConfig = {\n a: TensorInfo,\n b: TensorInfo,\n transposeA: boolean,\n transposeB: boolean,\n backend: WebGPUBackend,\n bias?: TensorInfo,\n preluActivationWeights?: TensorInfo,\n leakyreluAlpha?: number,\n activation?: backend_util.Activation\n};\n\nexport function batchMatMulImpl({\n a,\n b,\n transposeA,\n transposeB,\n backend,\n bias = null,\n preluActivationWeights = null,\n leakyreluAlpha = 0,\n activation = null\n}: BatchMatMulConfig): TensorInfo {\n const aRank = a.shape.length;\n const bRank = b.shape.length;\n\n const innerShapeA = transposeA ? a.shape[aRank - 2] : a.shape[aRank - 1];\n const innerShapeB = transposeB ? b.shape[bRank - 1] : b.shape[bRank - 2];\n\n const outerShapeA = transposeA ? a.shape[aRank - 1] : a.shape[aRank - 2];\n const outerShapeB = transposeB ? b.shape[bRank - 2] : b.shape[bRank - 1];\n\n const outerDimsA = a.shape.slice(0, -2);\n const outerDimsB = b.shape.slice(0, -2);\n\n const batchDimA = util.sizeFromShape(outerDimsA);\n const batchDimB = util.sizeFromShape(outerDimsB);\n\n const outShapeOuterDims = broadcast_util.assertAndGetBroadcastShape(\n a.shape.slice(0, -2), b.shape.slice(0, -2));\n const outShape = outShapeOuterDims.concat([outerShapeA, outerShapeB]);\n\n util.assert(\n innerShapeA === innerShapeB,\n () => `Error in matMul: inner shapes (${innerShapeA}) and (` +\n `${innerShapeB}) of Tensors with shapes ${a.shape} and ` +\n `${b.shape} and transposeA=${transposeA}` +\n ` and transposeB=${transposeB} must match.`);\n\n const a3dShape: [number, number, number] = transposeA ?\n [batchDimA, innerShapeA, outerShapeA] :\n [batchDimA, outerShapeA, innerShapeA];\n const b3dShape: [number, number, number] = transposeB ?\n [batchDimB, outerShapeB, innerShapeB] :\n [batchDimB, innerShapeB, outerShapeB];\n\n // The rest of the implementation is designed to operate on rank-3 tensors\n const a3d = reshape({inputs: {x: a}, backend, attrs: {shape: a3dShape}});\n const b3d = reshape({inputs: {x: b}, backend, attrs: {shape: b3dShape}});\n const intermediates: TensorInfo[] = [a3d, b3d];\n\n const batchDim = Math.max(batchDimA, batchDimB);\n const batchAEqualOne = batchDimA === 1;\n const batchBEqualOne = batchDimB === 1;\n const useVec4 = innerShapeA % 4 === 0 && outerShapeB % 4 === 0 &&\n !transposeA && !transposeB;\n let program: WebGPUProgram;\n if (outerShapeA * outerShapeB <= 32) {\n program = new MatMulReduceProgram(\n [batchDim, outerShapeA, outerShapeB], batchAEqualOne, batchBEqualOne,\n transposeA, transposeB, bias, activation, preluActivationWeights);\n\n } else\n // When the output size is absolutely small or relatively small, we may\n // use MatMulSmallOutputSizeProgram to get better performance. Absolutely\n // small size means that the output size is smaller than [16, 512].\n // Relatively small size means that one demension size of the output is\n // smaller than 16, and the output size is also more than or equal two\n // times smaller than each of the two input sizes. For example, if input\n // sizes are [12, 2048] and [2048, 1024], the output size is [12, 1024],\n // which is relatively small compared to input sizes.\n if (!transposeA && !transposeB &&\n ((outerShapeA <= 16 &&\n (outerShapeB <= 512 || innerShapeB >= 2 * outerShapeB)) ||\n (outerShapeB <= 16 &&\n (outerShapeA <= 512 || innerShapeA >= 2 * outerShapeA)))) {\n program = new MatMulSmallOutputSizeProgram(\n a3dShape, b3dShape, [batchDim, outerShapeA, outerShapeB], bias,\n activation, preluActivationWeights);\n } else if (useVec4) {\n // TODO: Currently we need to make sure that innerShapeA and outerShapeB\n // are divisible by 4 since we use vec4 to get data. In future, we can\n // remove this limitation by insert 0 to pack data.\n program = new MatMulPackedVec4Program(\n a3dShape, [batchDim, outerShapeA, outerShapeB],\n env().get('WEBGPU_MATMUL_WORK_PER_THREAD') as number, batchAEqualOne,\n batchBEqualOne, bias, activation, preluActivationWeights);\n } else {\n program = new MatMulPackedProgram(\n a3dShape, [batchDim, outerShapeA, outerShapeB],\n env().get('WEBGPU_MATMUL_WORK_PER_THREAD') as number, batchAEqualOne,\n batchBEqualOne, transposeA, transposeB, bias, activation,\n preluActivationWeights);\n }\n const inputs: TensorInfo[] = [a3d, b3d];\n if (bias) {\n inputs.push(bias);\n }\n if (preluActivationWeights) {\n inputs.push(preluActivationWeights);\n }\n const dimensions = [\n {type: 'int32', data: [outerShapeA]}, {type: 'int32', data: [outerShapeB]},\n {type: 'int32', data: [innerShapeA]}\n ];\n if (activation === 'leakyrelu') {\n dimensions.push({type: 'float32', data: [leakyreluAlpha]});\n program.uniforms += ' alpha : f32,';\n }\n const out = backend.runWebGPUProgram(program, inputs, a.dtype, dimensions);\n const outReshaped =\n reshape({inputs: {x: out}, backend, attrs: {shape: outShape}});\n intermediates.push(out);\n for (const i of intermediates) {\n backend.disposeData(i.dataId);\n }\n return outReshaped;\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {_FusedMatMul, _FusedMatMulAttrs, _FusedMatMulInputs, KernelConfig, KernelFunc} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {batchMatMulImpl} from './BatchMatMul_impl';\n\nexport function _fusedMatMul(args: {\n inputs: _FusedMatMulInputs,\n attrs: _FusedMatMulAttrs,\n backend: WebGPUBackend\n}) {\n const {inputs, backend, attrs} = args;\n const {a, b, bias, preluActivationWeights} = inputs;\n const {transposeA, transposeB, activation, leakyreluAlpha} = attrs;\n\n return batchMatMulImpl({\n a,\n b,\n transposeA,\n transposeB,\n backend,\n bias,\n preluActivationWeights,\n leakyreluAlpha,\n activation\n });\n}\n\nexport const _fusedMatMulConfig: KernelConfig = {\n kernelName: _FusedMatMul,\n backendName: 'webgpu',\n kernelFunc: _fusedMatMul as {} as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util} from '@tensorflow/tfjs-core';\nimport {getMainHeaderAndGlobalIndexString} from './shader_preprocessor';\nimport {computeDispatch, flatDispatchLayout} from './webgpu_util';\nimport {BinaryOpType, getBinaryOpString} from './binary_op_util';\n\nimport {WebGPUProgram} from './webgpu_program';\n\nexport class BinaryOpComplexProgram implements WebGPUProgram {\n variableNames = ['AReal', 'AImag', 'BReal', 'BImag'];\n outputShape: number[];\n shaderKey: string;\n dispatchLayout: {x: number[]};\n dispatch: [number, number, number];\n workGroupSize: [number, number, number] = [128, 1, 1];\n op: BinaryOpType;\n size = true;\n\n constructor(op: BinaryOpType, aShape: number[], bShape: number[]) {\n this.outputShape = backend_util.assertAndGetBroadcastShape(aShape, bShape);\n this.dispatchLayout = flatDispatchLayout(this.outputShape);\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize);\n\n this.shaderKey = `binaryOpComplex_${op}`;\n this.op = op;\n }\n\n getUserCode(): string {\n const opStr = getBinaryOpString(this.op, false);\n const userCode = `\n fn binaryOpComplex(\n areal : f32, aimag : f32, breal : f32, bimag : f32) -> f32 {\n ${opStr}\n }\n\n ${getMainHeaderAndGlobalIndexString()}\n if(index < uniforms.size) {\n let areal = getARealByOutputIndex(index);\n let aimag = getAImagByOutputIndex(index);\n let breal = getBRealByOutputIndex(index);\n let bimag = getBImagByOutputIndex(index);\n setOutputAtIndex(index, binaryOpComplex(areal, aimag, breal, bimag));\n }\n }\n `;\n return userCode;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util} from '@tensorflow/tfjs-core';\n\nimport {BinaryOpType, getBinaryOpString} from './binary_op_util';\nimport {getMainHeaderAndGlobalIndexString} from './shader_preprocessor';\nimport {WebGPUProgram} from './webgpu_program';\nimport {computeDispatch, flatDispatchLayout} from './webgpu_util';\n\nexport class BinaryOpSharedProgram implements WebGPUProgram {\n outputShape: number[];\n shaderKey: string;\n dispatchLayout: {x: number[]};\n dispatch: [number, number, number];\n variableNames = ['A', 'B'];\n workPerThread: number;\n workGroupSize: [number, number, number];\n useSharedMemoryWithB: boolean;\n lastDimensionSize: number;\n op: BinaryOpType;\n size = true;\n\n constructor(\n op: BinaryOpType, aShape: number[], bShape: number[],\n useSharedMemoryWithB: boolean) {\n // This is an experimental value when using shared memory.\n // Note that the maximum of workgroup X dimension is 256.\n const workGroupSizeX = 256;\n this.workGroupSize = [workGroupSizeX, 1, 1];\n this.outputShape = backend_util.assertAndGetBroadcastShape(aShape, bShape);\n this.dispatchLayout = flatDispatchLayout(this.outputShape);\n this.lastDimensionSize = useSharedMemoryWithB ? bShape[0] : aShape[0];\n if (this.lastDimensionSize < 256) {\n this.workPerThread = 1;\n } else if (this.lastDimensionSize < 512) {\n this.workPerThread = 2;\n } else {\n this.workPerThread = 4;\n }\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize,\n [this.workPerThread, 1, 1]);\n\n this.useSharedMemoryWithB = useSharedMemoryWithB;\n this.op = op;\n // this.lastDimensionSize is used as sharedBuf array size, so can not be\n // used as uniform.\n this.shaderKey = `binaryShared_${op}_${this.lastDimensionSize}_${\n this.useSharedMemoryWithB}`;\n }\n\n getUserCode(): string {\n const sharedIndexSnippet = this.lastDimensionSize > 1 ?\n `coords[${this.outputShape.length - 1}]` :\n '0';\n const accessDataSnippet = this.useSharedMemoryWithB ?\n `let a = getAByOutputCoords(coords);\n let b = sharedBuf[${sharedIndexSnippet}];` :\n `let a = sharedBuf[${sharedIndexSnippet}];\n let b = getBByOutputCoords(coords);`;\n\n const opStr = getBinaryOpString(this.op, false);\n const userCode = `\n fn binaryOperation(a : f32, b : f32) -> f32 {\n ${opStr}\n }\n var sharedBuf : array;\n ${getMainHeaderAndGlobalIndexString()}\n\n // Fill in the shared memory buffer. Here we need a loop to make sure\n // that all data in A|B are uploaded when |sharedMemorySize| is larger\n // than work group size.\n for(var localIndex = i32(localId.x); localIndex < ${\n this.lastDimensionSize}; localIndex = localIndex + ${\n this.workGroupSize[0]}) {\n sharedBuf[localIndex] = f32(${\n this.useSharedMemoryWithB ? 'B' : 'A'}[localIndex]);\n }\n workgroupBarrier();\n\n for(var i = 0; i < ${this.workPerThread}; i = i + 1) {\n let flatIndex = index * ${this.workPerThread} + i;\n if(flatIndex < uniforms.size) {\n let coords = getCoordsFromIndex(flatIndex);\n\n ${accessDataSnippet}\n setOutputAtIndex(flatIndex, binaryOperation(a, b));\n }\n }\n }\n `;\n return userCode;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util} from '@tensorflow/tfjs-core';\nimport {getMainHeaderAndGlobalIndexString} from './shader_preprocessor';\nimport {computeDispatch, flatDispatchLayout} from './webgpu_util';\nimport {BinaryOpType, getBinaryOpString} from './binary_op_util';\n\nimport {WebGPUProgram} from './webgpu_program';\n\nexport class BinaryOpVec4Program implements WebGPUProgram {\n outputShape: number[];\n shaderKey: string;\n dispatchLayout: {x: number[]};\n dispatch: [number, number, number];\n variableNames = ['A', 'B'];\n workPerThread = 4;\n workGroupSize: [number, number, number];\n isVec4 = true;\n op: BinaryOpType;\n size = true;\n fitShape: boolean;\n\n constructor(op: BinaryOpType, aShape: number[], bShape: number[]) {\n // TODO(jiajia.qin@intel.com): Heuristically select a good work group size.\n const workGroupSizeX = 128;\n this.workGroupSize = [workGroupSizeX, 1, 1];\n this.outputShape = backend_util.assertAndGetBroadcastShape(aShape, bShape);\n this.dispatchLayout = flatDispatchLayout(this.outputShape);\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize,\n [this.workPerThread, 1, 1]);\n this.op = op;\n this.shaderKey = `binaryVec4_${op}`;\n }\n\n getUserCode(): string {\n const opStr = getBinaryOpString(this.op, this.isVec4);\n const userCode = `\n fn binaryOperation(a : vec4, b : vec4) -> vec4 {\n ${opStr}\n }\n ${getMainHeaderAndGlobalIndexString()}\n if (index < uniforms.size) {\n let a = getAByOutputIndex(index);\n let b = getBByOutputIndex(index);\n setOutputAtIndex(index, binaryOperation(a, b));\n }\n }\n `;\n return userCode;\n }\n}\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util} from '@tensorflow/tfjs-core';\nimport {getMainHeaderAndGlobalIndexString} from './shader_preprocessor';\nimport {computeDispatch, flatDispatchLayout} from './webgpu_util';\nimport {BinaryOpType, getBinaryOpString} from './binary_op_util';\n\nimport {WebGPUProgram} from './webgpu_program';\n\nexport class BinaryOpProgram implements WebGPUProgram {\n outputShape: number[];\n shaderKey: string;\n dispatchLayout: {x: number[]};\n dispatch: [number, number, number];\n variableNames = ['A', 'B'];\n workGroupSize: [number, number, number];\n op: BinaryOpType;\n size = true;\n\n constructor(op: BinaryOpType, aShape: number[], bShape: number[]) {\n // TODO(jiajia.qin@intel.com): Heuristically select a good work group size.\n const workGroupSizeX = 128;\n this.workGroupSize = [workGroupSizeX, 1, 1];\n this.outputShape = backend_util.assertAndGetBroadcastShape(aShape, bShape);\n this.dispatchLayout = flatDispatchLayout(this.outputShape);\n\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize);\n this.shaderKey = `binary_${op}`;\n this.op = op;\n }\n\n getUserCode(): string {\n const opStr = getBinaryOpString(this.op, false);\n const userCode = `\n fn binaryOperation(a : f32, b : f32) -> f32 {\n ${opStr}\n }\n ${getMainHeaderAndGlobalIndexString()}\n if (index < uniforms.size) {\n let a = getAByOutputIndex(index);\n let b = getBByOutputIndex(index);\n setOutputAtIndex(index, binaryOperation(a, b));\n }\n }\n `;\n return userCode;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {util} from '@tensorflow/tfjs-core';\nimport {BinaryOpSharedProgram} from './binary_op_shared_webgpu';\nimport {BinaryOpVec4Program} from './binary_op_vec4_webgpu';\nimport {BinaryOpProgram} from './binary_op_webgpu';\nimport {BinaryOpType} from './binary_op_util';\n\nexport function getBinaryProgram(\n op: BinaryOpType, aShape: number[], bShape: number[]) {\n const useVec4 =\n util.arraysEqual(aShape, bShape) && util.sizeFromShape(aShape) % 4 === 0;\n if (useVec4) {\n return new BinaryOpVec4Program(op, aShape, bShape);\n }\n const useSharedMemoryWithA =\n aShape.length === 1 && bShape.length > 1 && aShape[0] < 1024;\n const useSharedMemoryWithB =\n bShape.length === 1 && aShape.length > 1 && bShape[0] < 1024;\n if (useSharedMemoryWithA || useSharedMemoryWithB) {\n return new BinaryOpSharedProgram(op, aShape, bShape, useSharedMemoryWithB);\n } else {\n return new BinaryOpProgram(op, aShape, bShape);\n }\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Identity, IdentityInputs, KernelConfig, KernelFunc, TensorInfo} from '@tensorflow/tfjs-core';\nimport {WebGPUBackend} from '../backend_webgpu';\n\nexport function identity(\n args: {inputs: IdentityInputs, backend: WebGPUBackend}): TensorInfo {\n const {inputs} = args;\n const {x} = inputs;\n\n args.backend.incRef(x.dataId);\n return {dataId: x.dataId, shape: x.shape, dtype: x.dtype};\n}\n\nexport const identityConfig: KernelConfig = {\n kernelName: Identity,\n backendName: 'webgpu',\n kernelFunc: identity as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Complex, ComplexInputs, KernelConfig, KernelFunc, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {identity} from './Identity';\n\n/**\n * Complex tensors share data with their real and imaginary components. Complex\n * tensors' reference to the components is tracked by refCount on the individual\n * component. The refCounts are increased by the identity call.\n *\n * When a complex tensor is disposed, it will reduce the refCount on the\n * components by calling disposeData on each.\n */\nexport function complex(args: {inputs: ComplexInputs, backend: WebGPUBackend}):\n TensorInfo {\n const {inputs, backend} = args;\n const {real, imag} = inputs;\n\n const complexInfo = backend.makeTensorInfo(real.shape, 'complex64');\n const complex = backend.tensorMap.get(complexInfo.dataId);\n\n const realTensorInfo = identity({inputs: {x: real}, backend});\n\n const imagTensorInfo = identity({inputs: {x: imag}, backend});\n\n complex.complexTensorInfos = {real: realTensorInfo, imag: imagTensorInfo};\n\n return complexInfo;\n}\n\nexport const complexConfig: KernelConfig = {\n kernelName: Complex,\n backendName: 'webgpu',\n kernelFunc: complex as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {getMainHeaderAndGlobalIndexString} from './shader_preprocessor';\nimport {computeDispatch, flatDispatchLayout} from './webgpu_util';\n\nimport {getUnaryOpString, UnaryOpType} from './unary_op_util';\nimport {WebGPUProgram} from './webgpu_program';\n\nexport class UnaryOpProgram implements WebGPUProgram {\n outputShape: number[];\n shaderKey: string;\n dispatchLayout: {x: number[]};\n dispatch: [number, number, number];\n variableNames = ['A'];\n workGroupSize: [number, number, number];\n op: UnaryOpType;\n uniforms?: string;\n size = true;\n\n constructor(outputShape: number[], op: UnaryOpType) {\n // TODO(jiajia.qin@intel.com): Heuristically select a good work group size.\n const workGroupSizeX = 128;\n this.workGroupSize = [workGroupSizeX, 1, 1];\n this.outputShape = outputShape;\n this.dispatchLayout = flatDispatchLayout(this.outputShape);\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize);\n this.op = op;\n this.shaderKey = `unary_${op}`;\n }\n\n getUserCode(): string {\n return `\n fn unaryOperation(a : f32) -> f32 {\n ${getUnaryOpString(this.op, false)}\n }\n ${getMainHeaderAndGlobalIndexString()}\n if (index < uniforms.size) {\n let a = getAByOutputIndex(index);\n setOutputAtIndex(index, unaryOperation(a));\n }\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, BinaryInputs, DataType, KernelFunc, TensorInfo, TypedArray, UnaryInputs, upcastType} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {BinaryOpComplexProgram} from '../binary_op_complex_webgpu';\nimport {getBinaryProgram} from '../binary_ops';\nimport {complex} from '../kernels/Complex';\nimport {BinaryOpType} from '../binary_op_util';\nimport {UnaryOpType} from '../unary_op_util';\nimport {UnaryOpProgram} from '../unary_op_webgpu';\n\nimport {SimpleBinaryKernelImplCPU, SimpleUnaryKernelImplCPU} from './shared';\n\ntype UnaryKernelFuncConfig = {\n opType: UnaryOpType,\n cpuKernelImpl?: SimpleUnaryKernelImplCPU,\n dtype?: DataType\n};\n\n/**\n * Template that creates a `KernelFunc` for unary ops.\n * @param opSnippet Op snippet to create `UnaryOpProgram`.\n * @param cpuKernelImpl Optional. Shared functionality from tfjs-backend-cpu, it\n * will be involved when necessary.\n * @param dtype Optional. If set, the result has this dtype. Otherwise, the\n * result has the same dtype as the first input. This is mainly used in\n * comparison kernels, such as Equal, Less, Greater, etc.\n */\nexport function unaryKernelFunc(\n {opType, cpuKernelImpl, dtype}: UnaryKernelFuncConfig): KernelFunc {\n return ({inputs, backend}) => {\n const {x} = inputs as UnaryInputs;\n const webgpuBackend = backend as WebGPUBackend;\n\n const $dtype = dtype || x.dtype;\n if (webgpuBackend.shouldExecuteOnCPU([x]) && cpuKernelImpl != null) {\n const xData = webgpuBackend.tensorMap.get(x.dataId);\n const outValues = cpuKernelImpl(xData.values as TypedArray, $dtype);\n return webgpuBackend.makeTensorInfo(x.shape, $dtype, outValues);\n }\n\n const program: UnaryOpProgram = new UnaryOpProgram(x.shape, opType);\n return webgpuBackend.runWebGPUProgram(program, [x], $dtype);\n };\n}\n\ntype BinaryKernelFuncConfig = {\n opSnippet: number,\n cpuKernelImpl?: SimpleBinaryKernelImplCPU,\n supportsComplex?: boolean,\n dtype?: DataType\n};\n\n/**\n * Template that creates a `KernelFunc` for binary ops.\n * @param opSnippet Op snippet to create `BinaryOpProgram`.\n * @param cpuKernelImpl Optional. Shared functionality from tfjs-backend-cpu, it\n * will be involved when necessary.\n * @param dtype Optional. If set, the result has this dtype. Otherwise, the\n * result has the same dtype as the first input. This is mainly used in\n * comparison kernels, such as Equal, Less, Greater, etc.\n */\nexport function binaryKernelFunc(\n {opSnippet, cpuKernelImpl, supportsComplex = false, dtype}:\n BinaryKernelFuncConfig): KernelFunc {\n return ({inputs, backend}) => {\n const {a, b} = inputs as BinaryInputs;\n const webgpuBackend = backend as WebGPUBackend;\n\n if (supportsComplex && a.dtype === 'complex64') {\n const aData = webgpuBackend.tensorMap.get(a.dataId);\n const bData = webgpuBackend.tensorMap.get(b.dataId);\n let real: TensorInfo, imag: TensorInfo;\n if (opSnippet !== BinaryOpType.MUL) {\n [real, imag] = [\n [aData.complexTensorInfos.real, bData.complexTensorInfos.real],\n [aData.complexTensorInfos.imag, bData.complexTensorInfos.imag]\n ].map(complexParts => {\n const [aPart, bPart] = complexParts;\n\n const aHandle = {\n dataId: aPart.dataId,\n dtype: aPart.dtype,\n shape: a.shape\n };\n const bHandle = {\n dataId: bPart.dataId,\n dtype: bPart.dtype,\n shape: b.shape\n };\n\n const program = getBinaryProgram(opSnippet, a.shape, b.shape);\n return webgpuBackend.runWebGPUProgram(\n program, [aHandle, bHandle],\n upcastType(aPart.dtype, bPart.dtype));\n });\n } else {\n const realProgram = new BinaryOpComplexProgram(\n BinaryOpType.COMPLEX_MULTIPLY_REAL, a.shape, b.shape);\n const imagProgram = new BinaryOpComplexProgram(\n BinaryOpType.COMPLEX_MULTIPLY_IMAG, a.shape, b.shape);\n\n const inputs = [\n {\n dataId: aData.complexTensorInfos.real.dataId,\n dtype: aData.complexTensorInfos.real.dtype,\n shape: a.shape\n },\n {\n dataId: aData.complexTensorInfos.imag.dataId,\n dtype: aData.complexTensorInfos.imag.dtype,\n shape: a.shape\n },\n {\n dataId: bData.complexTensorInfos.real.dataId,\n dtype: bData.complexTensorInfos.real.dtype,\n shape: b.shape\n },\n {\n dataId: bData.complexTensorInfos.imag.dataId,\n dtype: bData.complexTensorInfos.imag.dtype,\n shape: b.shape\n }\n ];\n\n real = webgpuBackend.runWebGPUProgram(realProgram, inputs, 'float32');\n imag = webgpuBackend.runWebGPUProgram(imagProgram, inputs, 'float32');\n }\n\n const complexOutput =\n complex({inputs: {real, imag}, backend: webgpuBackend});\n\n webgpuBackend.disposeData(real.dataId);\n webgpuBackend.disposeData(imag.dataId);\n\n // TODO: Implement CPU forwarding for complex inputs.\n\n return complexOutput;\n }\n\n const $dtype = dtype || upcastType(a.dtype, b.dtype);\n if ((a.dtype === 'string' || b.dtype === 'string' ||\n webgpuBackend.shouldExecuteOnCPU([a, b])) &&\n cpuKernelImpl != null) {\n const aData = webgpuBackend.tensorMap.get(a.dataId).values as TypedArray;\n const bData = webgpuBackend.tensorMap.get(b.dataId).values as TypedArray;\n const decodedAVals = a.dtype === 'string' ?\n // tslint:disable-next-line: no-any\n backend_util.fromUint8ToStringArray(aData as any as Uint8Array[]) :\n aData;\n const decodedBVals = a.dtype === 'string' ?\n // tslint:disable-next-line: no-any\n backend_util.fromUint8ToStringArray(bData as any as Uint8Array[]) :\n bData;\n const [outValues, outShape] =\n cpuKernelImpl(a.shape, b.shape, decodedAVals, decodedBVals, $dtype);\n\n return webgpuBackend.makeTensorInfo(outShape, $dtype, outValues);\n }\n const program = getBinaryProgram(opSnippet, a.shape, b.shape);\n return webgpuBackend.runWebGPUProgram(program, [a, b], $dtype);\n };\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Abs, AbsInputs, KernelConfig, KernelFunc, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nexport function simpleAbsImpl(vals: TypedArray): Float32Array {\n const resultValues = new Float32Array(vals.length);\n for (let i = 0; i < vals.length; ++i) {\n resultValues[i] = Math.abs(vals[i]);\n }\n return resultValues;\n}\n\nexport const abs = (args: {inputs: AbsInputs, backend: MathBackendCPU}) => {\n const {x} = args.inputs;\n const cpuBackend = args.backend;\n\n assertNotComplex(x, 'abs');\n\n let resultValues = new Float32Array(util.sizeFromShape(x.shape));\n const values = cpuBackend.data.get(x.dataId).values as TypedArray;\n resultValues = simpleAbsImpl(values);\n\n return cpuBackend.makeOutput(resultValues, x.shape, x.dtype);\n};\n\nexport const absConfig: KernelConfig = {\n kernelName: Abs,\n backendName: 'cpu',\n kernelFunc: abs as {} as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, DataType, DataValues, NumericDataType, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {SimpleBinaryKernelImpl, SimpleBinaryOperation} from './binary_types';\n\n/**\n * Template that creates implementation for binary ops. Supports broadcast.\n */\nexport function createSimpleBinaryKernelImpl(op: SimpleBinaryOperation):\n SimpleBinaryKernelImpl {\n return (aShape: number[], bShape: number[], aVals: DataValues,\n bVals: DataValues, dtype: DataType): [TypedArray, number[]] => {\n const newShape = backend_util.assertAndGetBroadcastShape(aShape, bShape);\n\n const resultRank = newShape.length;\n const resultStrides = util.computeStrides(newShape);\n const resultSize = util.sizeFromShape(newShape);\n\n const result =\n util.getTypedArrayFromDType(dtype as NumericDataType, resultSize);\n\n const aRank = aShape.length;\n const bRank = bShape.length;\n\n const aStrides = util.computeStrides(aShape);\n const bStrides = util.computeStrides(bShape);\n\n const aBroadcastDims = backend_util.getBroadcastDims(aShape, newShape);\n const bBroadcastDims = backend_util.getBroadcastDims(bShape, newShape);\n\n if (aBroadcastDims.length + bBroadcastDims.length === 0) {\n for (let i = 0; i < result.length; ++i) {\n result[i] = op(aVals[i % aVals.length], bVals[i % bVals.length]);\n }\n } else {\n for (let i = 0; i < result.length; ++i) {\n const loc = util.indexToLoc(i, resultRank, resultStrides);\n\n const aLoc = loc.slice(-aRank);\n aBroadcastDims.forEach(d => aLoc[d] = 0);\n const aIndex = util.locToIndex(aLoc, aRank, aStrides);\n\n const bLoc = loc.slice(-bRank);\n bBroadcastDims.forEach(d => bLoc[d] = 0);\n const bIndex = util.locToIndex(bLoc, bRank, bStrides);\n\n result[i] = op(aVals[aIndex], bVals[bIndex]);\n }\n }\n\n return [result, newShape];\n };\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Add, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {createSimpleBinaryKernelImpl} from '../utils/binary_impl';\nimport {binaryKernelFunc, createComplexBinaryKernelImpl} from '../utils/binary_utils';\n\nexport const addImpl =\n createSimpleBinaryKernelImpl(((a: number, b: number) => a + b));\nexport const addComplexImpl =\n createComplexBinaryKernelImpl(((aReal, aImag, bReal, bImag) => {\n return {real: aReal + bReal, imag: aImag + bImag};\n }));\n\nexport const add = binaryKernelFunc(Add, addImpl, addComplexImpl);\n\nexport const addConfig: KernelConfig = {\n kernelName: Add,\n backendName: 'cpu',\n kernelFunc: add\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {NumericDataType, util} from '@tensorflow/tfjs-core';\n\nimport {SimpleUnaryImpl, SimpleUnaryOperation} from './unary_types';\n\n/**\n * Template that creates implementation for unary op.\n */\nexport function createSimpleUnaryImpl(op: SimpleUnaryOperation):\n SimpleUnaryImpl {\n return (values, dtype, attrs) => {\n const newValues =\n util.getTypedArrayFromDType(dtype as NumericDataType, values.length);\n for (let i = 0; i < values.length; ++i) {\n newValues[i] = op(values[i], attrs);\n }\n return newValues;\n };\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Ceil, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {createSimpleUnaryImpl} from '../utils/unary_impl';\nimport {unaryKernelFuncFromImpl} from '../utils/unary_utils';\n\nexport const ceilImpl = createSimpleUnaryImpl((xi) => Math.ceil(xi));\nexport const ceil = unaryKernelFuncFromImpl(Ceil, ceilImpl);\n\nexport const ceilConfig: KernelConfig = {\n kernelName: Ceil,\n backendName: 'cpu',\n kernelFunc: ceil,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, BackendValues, DataType, TypedArray, util} from '@tensorflow/tfjs-core';\n\nexport function concatImpl(\n inputs: Array<{vals: BackendValues, shape: number[]}>, outShape: number[],\n dtype: DataType, simplyConcat: boolean): TypedArray|string[] {\n const outVals = util.getArrayFromDType(dtype, util.sizeFromShape(outShape));\n\n if (simplyConcat && dtype !== 'string') {\n // Use built-in TypedArray.set() method for speed.\n let offset = 0;\n inputs.forEach(input => {\n const size = util.sizeFromShape(input.shape);\n\n (outVals as TypedArray).set(input.vals as TypedArray, offset);\n offset += size;\n });\n } else {\n let colOffset = 0;\n\n inputs.forEach(input => {\n const decodedData = dtype === 'string' ?\n backend_util.fromUint8ToStringArray(input.vals as Uint8Array[]) :\n input.vals as TypedArray;\n\n let tIdx = 0;\n\n for (let row = 0; row < input.shape[0]; ++row) {\n const resIdx = row * outShape[1] + colOffset;\n for (let col = 0; col < input.shape[1]; ++col) {\n outVals[resIdx + col] = decodedData[tIdx++];\n }\n }\n\n colOffset += input.shape[1];\n });\n }\n\n return outVals;\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Equal, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {createSimpleBinaryKernelImpl} from '../utils/binary_impl';\nimport {binaryKernelFunc} from '../utils/binary_utils';\n\nexport const equalImpl =\n createSimpleBinaryKernelImpl((a: number, b: number) => (a === b) ? 1 : 0);\nexport const equal =\n binaryKernelFunc(Equal, equalImpl, null /* complexImpl */, 'bool');\n\nexport const equalConfig: KernelConfig = {\n kernelName: Equal,\n backendName: 'cpu',\n kernelFunc: equal\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Exp, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {createSimpleUnaryImpl} from '../utils/unary_impl';\nimport {unaryKernelFuncFromImpl} from '../utils/unary_utils';\n\nexport const expImpl = createSimpleUnaryImpl((xi) => Math.exp(xi));\nexport const exp = unaryKernelFuncFromImpl(Exp, expImpl, 'float32');\n\nexport const expConfig: KernelConfig = {\n kernelName: Exp,\n backendName: 'cpu',\n kernelFunc: exp,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Expm1, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {createSimpleUnaryImpl} from '../utils/unary_impl';\nimport {unaryKernelFuncFromImpl} from '../utils/unary_utils';\n\nexport const expm1Impl = createSimpleUnaryImpl((xi) => Math.expm1(xi));\nexport const expm1 = unaryKernelFuncFromImpl(Expm1, expm1Impl);\n\nexport const expm1Config: KernelConfig = {\n kernelName: Expm1,\n backendName: 'cpu',\n kernelFunc: expm1,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Floor, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {createSimpleUnaryImpl} from '../utils/unary_impl';\nimport {unaryKernelFuncFromImpl} from '../utils/unary_utils';\n\nexport const floorImpl = createSimpleUnaryImpl((xi) => Math.floor(xi));\nexport const floor = unaryKernelFuncFromImpl(Floor, floorImpl);\n\nexport const floorConfig: KernelConfig = {\n kernelName: Floor,\n backendName: 'cpu',\n kernelFunc: floor,\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {buffer, DataType, Rank, TensorBuffer, TypedArray} from '@tensorflow/tfjs-core';\n\nexport function gatherNdImpl(\n indicesData: TypedArray, paramsBuf: TensorBuffer, dtype: DataType,\n numSlices: number, sliceRank: number, sliceSize: number, strides: number[],\n paramsShape: number[], paramsSize: number): TensorBuffer {\n const outBuf = buffer([numSlices, sliceSize], dtype);\n\n for (let i = 0; i < numSlices; i++) {\n const index = [];\n let flattenIndex = 0;\n for (let j = 0; j < sliceRank; j++) {\n const dim = indicesData[i * sliceRank + j];\n flattenIndex += dim * strides[j];\n index.push(dim);\n }\n if (flattenIndex < 0 || flattenIndex >= paramsSize / sliceSize) {\n throw new Error(\n `Invalid indices: ${index} does not index into ${paramsShape}`);\n }\n\n for (let k = 0; k < sliceSize; k++) {\n outBuf.values[i * sliceSize + k] =\n paramsBuf.get(...paramsBuf.indexToLoc(flattenIndex * sliceSize + k));\n }\n }\n\n return outBuf as TensorBuffer;\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {buffer, DataType, Rank, TensorBuffer} from '@tensorflow/tfjs-core';\n\nexport function gatherV2Impl(\n xBuf: TensorBuffer, indicesBuf: TensorBuffer,\n flattenOutputShape: number[]): TensorBuffer {\n const outBuf = buffer(flattenOutputShape, xBuf.dtype);\n for (let i = 0; i < outBuf.size; ++i) {\n const newLoc = outBuf.indexToLoc(i);\n\n const originalLoc: number[] = newLoc.slice();\n const batchIdx = originalLoc[0];\n const indicesIdx = originalLoc[2];\n const indicesIndex = indicesBuf.locToIndex([batchIdx, indicesIdx]);\n originalLoc[2] = indicesBuf.values[indicesIndex] as number;\n\n const originalIndex = xBuf.locToIndex(originalLoc);\n\n if (0 <= originalIndex && originalIndex < xBuf.values.length) {\n outBuf.values[i] = xBuf.values[originalIndex];\n } // Else, index is out of bounds, so leave the default zero val in outBuf.\n }\n\n return outBuf as TensorBuffer;\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Greater, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {createSimpleBinaryKernelImpl} from '../utils/binary_impl';\nimport {binaryKernelFunc} from '../utils/binary_utils';\n\nexport const greaterImpl =\n createSimpleBinaryKernelImpl((a: number, b: number) => (a > b) ? 1 : 0);\nexport const greater =\n binaryKernelFunc(Greater, greaterImpl, null /* complexImpl */, 'bool');\n\nexport const greaterConfig: KernelConfig = {\n kernelName: Greater,\n backendName: 'cpu',\n kernelFunc: greater\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GreaterEqual, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {createSimpleBinaryKernelImpl} from '../utils/binary_impl';\nimport {binaryKernelFunc} from '../utils/binary_utils';\n\nexport const greaterEqualImpl =\n createSimpleBinaryKernelImpl((a: number, b: number) => (a >= b) ? 1 : 0);\nexport const greaterEqual = binaryKernelFunc(\n GreaterEqual, greaterEqualImpl, null /* complexImpl */, 'bool');\n\nexport const greaterEqualConfig: KernelConfig = {\n kernelName: GreaterEqual,\n backendName: 'cpu',\n kernelFunc: greaterEqual\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Less} from '@tensorflow/tfjs-core';\n\nimport {createSimpleBinaryKernelImpl} from '../utils/binary_impl';\nimport {binaryKernelFunc} from '../utils/binary_utils';\n\nexport const lessImpl =\n createSimpleBinaryKernelImpl((a: number, b: number) => (a < b) ? 1 : 0);\nexport const less =\n binaryKernelFunc(Less, lessImpl, null /* complexImpl */, 'bool');\n\nexport const lessConfig: KernelConfig = {\n kernelName: Less,\n backendName: 'cpu',\n kernelFunc: less\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, LessEqual} from '@tensorflow/tfjs-core';\n\nimport {createSimpleBinaryKernelImpl} from '../utils/binary_impl';\nimport {binaryKernelFunc} from '../utils/binary_utils';\n\nexport const lessEqualImpl =\n createSimpleBinaryKernelImpl((a: number, b: number) => (a <= b) ? 1 : 0);\nexport const lessEqual =\n binaryKernelFunc(LessEqual, lessEqualImpl, null /* complexImpl */, 'bool');\n\nexport const lessEqualConfig: KernelConfig = {\n kernelName: LessEqual,\n backendName: 'cpu',\n kernelFunc: lessEqual\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Log} from '@tensorflow/tfjs-core';\n\nimport {createSimpleUnaryImpl} from '../utils/unary_impl';\nimport {unaryKernelFuncFromImpl} from '../utils/unary_utils';\n\nexport const logImpl = createSimpleUnaryImpl((xi) => Math.log(xi));\nexport const log = unaryKernelFuncFromImpl(Log, logImpl);\n\nexport const logConfig: KernelConfig = {\n kernelName: Log,\n backendName: 'cpu',\n kernelFunc: log,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {DataType, NumericDataType, TypedArray, util} from '@tensorflow/tfjs-core';\n\nexport function maxImpl(\n aVals: TypedArray, reduceSize: number, outShape: number[],\n dtype: DataType): TypedArray {\n const vals = util.getTypedArrayFromDType(\n dtype as NumericDataType, util.sizeFromShape(outShape));\n\n for (let i = 0; i < vals.length; ++i) {\n const offset = i * reduceSize;\n let max = aVals[offset];\n for (let j = 0; j < reduceSize; ++j) {\n const value = aVals[offset + j];\n if (Number.isNaN(value) ||\n value > max) { // comparison with NaN always return false\n max = value;\n }\n }\n vals[i] = max;\n }\n return vals;\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Maximum} from '@tensorflow/tfjs-core';\n\nimport {createSimpleBinaryKernelImpl} from '../utils/binary_impl';\nimport {binaryKernelFunc} from '../utils/binary_utils';\n\nexport const maximumImpl = createSimpleBinaryKernelImpl(\n ((aValue, bValue) => Math.max(aValue as number, bValue as number)));\nexport const maximum = binaryKernelFunc(Maximum, maximumImpl);\n\nexport const maximumConfig: KernelConfig = {\n kernelName: Maximum,\n backendName: 'cpu',\n kernelFunc: maximum\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Minimum} from '@tensorflow/tfjs-core';\n\nimport {createSimpleBinaryKernelImpl} from '../utils/binary_impl';\nimport {binaryKernelFunc} from '../utils/binary_utils';\n\nexport const minimumImpl = createSimpleBinaryKernelImpl(\n ((aValue, bValue) => Math.min(aValue as number, bValue as number)));\nexport const minimum = binaryKernelFunc(Minimum, minimumImpl);\n\nexport const minimumConfig: KernelConfig = {\n kernelName: Minimum,\n backendName: 'cpu',\n kernelFunc: minimum\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Multiply} from '@tensorflow/tfjs-core';\nimport {createSimpleBinaryKernelImpl} from '../utils/binary_impl';\nimport {binaryKernelFunc, createComplexBinaryKernelImpl} from '../utils/binary_utils';\n\nexport const multiplyImpl = createSimpleBinaryKernelImpl(\n ((aValue: number, bValue: number) => aValue * bValue));\nexport const multiplyComplexImpl =\n createComplexBinaryKernelImpl(((aReal, aImag, bReal, bImag) => {\n return {\n real: aReal * bReal - aImag * bImag,\n imag: aReal * bImag + aImag * bReal\n };\n }));\n\nexport const multiply =\n binaryKernelFunc(Multiply, multiplyImpl, multiplyComplexImpl);\n\nexport const multiplyConfig: KernelConfig = {\n kernelName: Multiply,\n backendName: 'cpu',\n kernelFunc: multiply\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {DataType, KernelConfig, KernelFunc, Neg, TensorInfo, TypedArray, UnaryInputs, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\nimport {multiplyImpl} from './Multiply';\n\nexport function negImpl(xVals: TypedArray, xShape: number[], xDtype: DataType):\n [TypedArray, number[]] {\n const minusOne =\n util.createScalarValue(-1 as {} as 'float32', xDtype) as TypedArray;\n return multiplyImpl([], xShape, minusOne, xVals, xDtype);\n}\n\nexport function neg(args: {inputs: UnaryInputs, backend: MathBackendCPU}):\n TensorInfo {\n const {inputs, backend} = args;\n const {x} = inputs;\n\n assertNotComplex(x, 'neg');\n\n const xVals = backend.data.get(x.dataId).values as TypedArray;\n const [res, newShape] = negImpl(xVals, x.shape, x.dtype);\n\n return backend.makeTensorInfo(newShape, x.dtype, res);\n}\n\nexport const negConfig: KernelConfig = {\n kernelName: Neg,\n backendName: 'cpu',\n kernelFunc: neg as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, NotEqual} from '@tensorflow/tfjs-core';\n\nimport {createSimpleBinaryKernelImpl} from '../utils/binary_impl';\nimport {binaryKernelFunc} from '../utils/binary_utils';\n\nexport const notEqualImpl =\n createSimpleBinaryKernelImpl(((a, b) => (a !== b) ? 1 : 0));\nexport const notEqual =\n binaryKernelFunc(NotEqual, notEqualImpl, null /* complexOp */, 'bool');\n\nexport const notEqualConfig: KernelConfig = {\n kernelName: NotEqual,\n backendName: 'cpu',\n kernelFunc: notEqual\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {DataType, NumericDataType, TypedArray} from '@tensorflow/tfjs-core';\nimport {util} from '@tensorflow/tfjs-core';\n\nexport function transposeImpl(\n xVals: TypedArray, xShape: number[], dtype: DataType, perm: number[],\n newShape: number[]): TypedArray {\n const xRank = xShape.length;\n const xSize = util.sizeFromShape(xShape);\n const xStrides = util.computeStrides(xShape);\n const newStrides = util.computeStrides(newShape);\n\n const result = util.getTypedArrayFromDType(\n dtype as NumericDataType, util.sizeFromShape(newShape));\n\n for (let i = 0; i < xSize; ++i) {\n const loc = util.indexToLoc(i, xRank, xStrides);\n\n // Permute location.\n const newLoc: number[] = new Array(loc.length);\n for (let i = 0; i < newLoc.length; i++) {\n newLoc[i] = loc[perm[i]];\n }\n\n const newIndex = util.locToIndex(newLoc, xRank, newStrides);\n result[newIndex] = xVals[i];\n }\n return result;\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, DataType, KernelConfig, KernelFunc, Prod, ProdAttrs, ProdInputs, TensorInfo, TypedArray, upcastType, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\nimport {transpose} from './Transpose';\n\nexport function prodImpl(\n xShape: number[], xDtype: DataType, xVals: TypedArray,\n reductionAxes: number[]):\n {outVals: TypedArray, outShape: number[], outDtype: DataType} {\n const [outShape, reduceShape] =\n backend_util.computeOutAndReduceShapes(xShape, reductionAxes);\n const outDtype = upcastType(xDtype, 'int32');\n const outVals = util.makeZerosTypedArray(\n util.sizeFromShape(outShape), outDtype) as TypedArray;\n const reduceSize = util.sizeFromShape(reduceShape);\n\n for (let i = 0; i < outVals.length; ++i) {\n const offset = i * reduceSize;\n let prod = 1;\n for (let j = 0; j < reduceSize; ++j) {\n prod *= xVals[offset + j];\n }\n outVals[i] = prod;\n }\n\n return {outVals, outShape, outDtype};\n}\n\nexport function prod(\n args: {inputs: ProdInputs, backend: MathBackendCPU, attrs: ProdAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {axis, keepDims} = attrs;\n\n assertNotComplex(x, 'prod');\n\n const xRank = x.shape.length;\n const axes = util.parseAxisParam(axis, x.shape);\n\n const permutation = backend_util.getAxesPermutation(axes, xRank);\n let reductionAxes = axes;\n let permutedX = x;\n const intermediateTensorInfos = [];\n if (permutation != null) {\n permutedX = transpose({inputs: {x}, backend, attrs: {perm: permutation}});\n intermediateTensorInfos.push(permutedX);\n reductionAxes = backend_util.getInnerMostAxes(reductionAxes.length, xRank);\n }\n\n const xVals = backend.data.get(permutedX.dataId).values as TypedArray;\n const {outVals, outShape, outDtype} =\n prodImpl(permutedX.shape, permutedX.dtype, xVals, reductionAxes);\n\n let resultShape = outShape;\n if (keepDims) {\n resultShape = backend_util.expandShapeToKeepDim(outShape, axes);\n }\n\n intermediateTensorInfos.forEach(\n t => backend.disposeIntermediateTensorInfo(t));\n\n return backend.makeTensorInfo(resultShape, outDtype, outVals);\n}\n\nexport const prodConfig: KernelConfig = {\n kernelName: Prod,\n backendName: 'cpu',\n kernelFunc: prod as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {DataTypeMap, util} from '@tensorflow/tfjs-core';\n\nexport function rangeImpl(\n start: number, stop: number, step: number,\n dtype: 'float32'|'int32'): DataTypeMap['float32' | 'int32'] {\n const sameStartStop = start === stop;\n const increasingRangeNegativeStep = start < stop && step < 0;\n const decreasingRangePositiveStep = stop < start && step > 1;\n\n if (sameStartStop || increasingRangeNegativeStep ||\n decreasingRangePositiveStep) {\n return util.makeZerosTypedArray(0, dtype);\n }\n\n const numElements = Math.abs(Math.ceil((stop - start) / step));\n const values = util.makeZerosTypedArray(numElements, dtype);\n\n if (stop < start && step === 1) {\n // Auto adjust the step's sign if it hasn't been set\n // (or was set to 1)\n step = -1;\n }\n\n values[0] = start;\n for (let i = 1; i < values.length; i++) {\n values[i] = values[i - 1] + step;\n }\n return values;\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Rsqrt} from '@tensorflow/tfjs-core';\n\nimport {createSimpleUnaryImpl} from '../utils/unary_impl';\nimport {unaryKernelFuncFromImpl} from '../utils/unary_utils';\n\nexport const rsqrtImpl = createSimpleUnaryImpl((xi) => 1 / Math.sqrt(xi));\nexport const rsqrt = unaryKernelFuncFromImpl(Rsqrt, rsqrtImpl);\n\nexport const rsqrtConfig: KernelConfig = {\n kernelName: Rsqrt,\n backendName: 'cpu',\n kernelFunc: rsqrt,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, BackendValues, buffer, DataType, KernelConfig, KernelFunc, Slice, slice_util, SliceAttrs, SliceInputs, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nexport function sliceImpl(\n vals: BackendValues, begin: number[], size: number[], shape: number[],\n dtype: DataType): BackendValues {\n const isContinous = slice_util.isSliceContinous(shape, begin, size);\n const length = util.sizeFromShape(size);\n const xStrides = util.computeStrides(shape);\n\n if (isContinous) {\n const flatOffset = slice_util.computeFlatOffset(begin, xStrides);\n\n if (dtype === 'string') {\n return (vals as Uint8Array[]).slice(flatOffset, flatOffset + length);\n }\n\n return (vals as TypedArray).subarray(flatOffset, flatOffset + length);\n }\n\n const decodedData = dtype === 'string' ?\n backend_util.fromUint8ToStringArray(vals as Uint8Array[]) :\n vals as TypedArray;\n\n const inBuf = buffer(shape, dtype, decodedData);\n const outBuf = buffer(size, dtype);\n for (let i = 0; i < outBuf.size; ++i) {\n const outLoc = outBuf.indexToLoc(i);\n const inLoc = outLoc.map((idx: number, j) => idx + begin[j]);\n outBuf.set(inBuf.get(...inLoc), ...outLoc);\n }\n\n if (dtype === 'string') {\n return backend_util.fromStringArrayToUint8(outBuf.values as string[]);\n }\n return outBuf.values as TypedArray;\n}\n\nexport function slice(\n args: {inputs: SliceInputs, backend: MathBackendCPU, attrs: SliceAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {begin, size} = attrs;\n\n assertNotComplex(x, 'slice');\n\n const [$begin, $size] = slice_util.parseSliceParams(x, begin, size);\n slice_util.assertParamsValid(x, $begin, $size);\n\n const vals = backend.data.get(x.dataId).values;\n const outVals = sliceImpl(vals, $begin, $size, x.shape, x.dtype);\n return backend.makeTensorInfo($size, x.dtype, outVals);\n}\n\nexport const sliceConfig: KernelConfig = {\n kernelName: Slice,\n backendName: 'cpu',\n kernelFunc: slice as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {buffer, Rank, TensorBuffer} from '@tensorflow/tfjs-core';\n\nexport function stridedSliceImpl(\n outShape: number[], xBuf: TensorBuffer, strides: number[],\n begin: number[]): TensorBuffer {\n const outBuf = buffer(outShape, xBuf.dtype);\n\n for (let i = 0; i < outBuf.size; i++) {\n const loc = outBuf.indexToLoc(i);\n\n const newLoc: number[] = new Array(loc.length);\n for (let j = 0; j < newLoc.length; j++) {\n newLoc[j] = loc[j] * strides[j] + begin[j];\n }\n outBuf.set(xBuf.get(...newLoc), ...loc);\n }\n\n return outBuf as TensorBuffer;\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {util} from '@tensorflow/tfjs-core';\n\n/**\n * The StringNGramsOp class creates ngrams from ragged string data.\n * The constructor contains all attributes related to the operation such as\n * padding widths and strings, and the compute function can be used to\n * compute the ngrams for different ragged tensor inputs.\n */\nclass StringNGramsOp {\n private separator: Uint8Array;\n private nGramWidths: number[];\n private padWidth: number;\n private leftPad: Uint8Array;\n private rightPad: Uint8Array;\n private preserveShort: boolean;\n\n constructor(\n separator: string, nGramWidths: number[], leftPad: string,\n rightPad: string, padWidth: number, preserveShortSequences: boolean) {\n this.separator = util.encodeString(separator);\n this.nGramWidths = nGramWidths;\n this.leftPad = util.encodeString(leftPad);\n this.rightPad = util.encodeString(rightPad);\n this.padWidth = padWidth;\n this.preserveShort = preserveShortSequences;\n }\n\n private getPadWidth(nGramWidth: number) {\n // Ngrams can be padded with either a fixed pad width or a dynamic pad\n // width depending on the 'padWidth' arg, but in no case should the padding\n // ever be wider than 'nGramWidth' - 1.\n return Math.min(\n this.padWidth < 0 ? nGramWidth - 1 : this.padWidth, nGramWidth - 1);\n }\n\n private getNumNGrams(length: number, nGramWidth: number) {\n const padWidth = this.getPadWidth(nGramWidth);\n return Math.max(0, ((length + 2 * padWidth) - nGramWidth) + 1);\n }\n\n private createNGrams(\n data: Uint8Array[], splitIndex: number, output: Uint8Array[],\n outputStartIndex: number, numNGrams: number, nGramWidth: number) {\n for (let nGramIndex = 0; nGramIndex < numNGrams; ++nGramIndex) {\n const padWidth = this.getPadWidth(nGramWidth);\n const leftPadding = Math.max(0, padWidth - nGramIndex);\n const rightPadding =\n Math.max(0, padWidth - (numNGrams - (nGramIndex + 1)));\n const numTokens = nGramWidth - (leftPadding + rightPadding);\n const dataStartIndex =\n splitIndex + (leftPadding > 0 ? 0 : nGramIndex - padWidth);\n\n // Calculate the total expected size of the nGram so we can reserve the\n // correct amount of space in the string.\n let nGramSize = 0;\n // Size of the left padding.\n nGramSize += leftPadding * this.leftPad.length;\n // Size of the tokens.\n for (let n = 0; n < numTokens; ++n) {\n nGramSize += data[dataStartIndex + n].length;\n }\n // Size of the right padding.\n nGramSize += rightPadding * this.rightPad.length;\n // Size of the separators.\n const numSeparators = leftPadding + rightPadding + numTokens - 1;\n nGramSize += numSeparators * this.separator.length;\n\n // Build the nGram.\n output[outputStartIndex + nGramIndex] = new Uint8Array(nGramSize);\n const nGram = output[outputStartIndex + nGramIndex];\n\n let nextNGramIndex = 0;\n const appendToNGram = (str: Uint8Array) =>\n str.forEach((value) => nGram[nextNGramIndex++] = value);\n\n for (let n = 0; n < leftPadding; ++n) {\n appendToNGram(this.leftPad);\n appendToNGram(this.separator);\n }\n // Only output first numTokens - 1 pairs of data and separator\n for (let n = 0; n < numTokens - 1; ++n) {\n appendToNGram(data[dataStartIndex + n]);\n appendToNGram(this.separator);\n }\n // Handle case when there are no tokens or no right padding as these\n // can result in consecutive separators.\n if (numTokens > 0) {\n // If we have tokens, then output last and then pair each separator\n // with the right padding that follows, to ensure nGram ends either with\n // the token or with the right pad.\n appendToNGram(data[dataStartIndex + numTokens - 1]);\n for (let n = 0; n < rightPadding; ++n) {\n appendToNGram(this.separator);\n appendToNGram(this.rightPad);\n }\n } else {\n // If we don't have tokens, then the last item inserted into the nGram\n // has been the separator from the left padding loop above. Hence,\n // output right pad and separator and make sure to finish with a\n // padding, not a separator.\n for (let n = 0; n < rightPadding - 1; ++n) {\n appendToNGram(this.rightPad);\n appendToNGram(this.separator);\n }\n appendToNGram(this.rightPad);\n }\n }\n }\n\n // Data and splits together form the definition of the ragged tensor,\n // where data is 1 dimensional and contains the values of the tensor\n // and splits denotes the indices at which each row starts.\n public compute(data: Uint8Array[], splits: Int32Array):\n [Uint8Array[], Int32Array] {\n // Validate that the splits are valid indices into data, only if there are\n // splits specified.\n const inputDataSize = data.length;\n const splitsSize = splits.length;\n if (splitsSize > 0) {\n let prevSplit = splits[0];\n if (prevSplit !== 0) {\n throw new Error(`First split value must be 0, got ${prevSplit}`);\n }\n for (let i = 1; i < splitsSize; ++i) {\n let validSplits = splits[i] >= prevSplit;\n validSplits = validSplits && (splits[i] <= inputDataSize);\n if (!validSplits) {\n throw new Error(`Invalid split value ${splits[i]}, must be in [${\n prevSplit}, ${inputDataSize}]`);\n }\n prevSplit = splits[i];\n }\n if (prevSplit !== inputDataSize) {\n throw new Error(`Last split value must be data size. Expected ${\n inputDataSize}, got ${prevSplit}`);\n }\n }\n\n const numBatchItems = splitsSize - 1;\n const nGramsSplits = util.getArrayFromDType('int32', splitsSize);\n // If there is no data or size, return an empty ragged tensor.\n if (inputDataSize === 0 || splitsSize === 0) {\n const empty: Uint8Array[] = new Array(inputDataSize);\n for (let i = 0; i <= numBatchItems; ++i) {\n nGramsSplits[i] = 0;\n }\n return [empty, nGramsSplits];\n }\n\n nGramsSplits[0] = 0;\n for (let i = 1; i <= numBatchItems; ++i) {\n const length = splits[i] - splits[i - 1];\n let numNGrams = 0;\n this.nGramWidths.forEach((nGramWidth) => {\n numNGrams += this.getNumNGrams(length, nGramWidth);\n });\n if (this.preserveShort && length > 0 && numNGrams === 0) {\n numNGrams = 1;\n }\n nGramsSplits[i] = nGramsSplits[i - 1] + numNGrams;\n }\n\n const nGrams: Uint8Array[] = new Array(nGramsSplits[numBatchItems]);\n\n for (let i = 0; i < numBatchItems; ++i) {\n const splitIndex = splits[i];\n let outputStartIdx = nGramsSplits[i];\n this.nGramWidths.forEach((nGramWidth) => {\n const length = splits[i + 1] - splits[i];\n const numNGrams = this.getNumNGrams(length, nGramWidth);\n this.createNGrams(\n data, splitIndex, nGrams, outputStartIdx, numNGrams, nGramWidth);\n outputStartIdx += numNGrams;\n });\n // If we're preserving short sequences, check to see if no sequence was\n // generated by comparing the current output start idx to the original\n // one (nGramSplitsdata). If no ngrams were generated, then they will\n // be equal (since we increment outputStartIdx by numNGrams every\n // time we create a set of ngrams.)\n if (this.preserveShort && outputStartIdx === nGramsSplits[i]) {\n const dataLength = splits[i + 1] - splits[i];\n // One legitimate reason to not have any ngrams when this.preserveShort\n // is true is if the sequence itself is empty. In that case, move on.\n if (dataLength === 0) {\n continue;\n }\n // We don't have to worry about dynamic padding sizes here: if padding\n // was dynamic, every sequence would have had sufficient padding to\n // generate at least one nGram.\n const nGramWidth = dataLength + 2 * this.padWidth;\n const numNGrams = 1;\n this.createNGrams(\n data, splitIndex, nGrams, outputStartIdx, numNGrams, nGramWidth);\n }\n }\n return [nGrams, nGramsSplits];\n }\n}\n\nexport function stringNGramsImpl(\n data: Uint8Array[], dataSplits: Int32Array, separator: string,\n nGramWidths: number[], leftPad: string, rightPad: string, padWidth: number,\n preserveShortSequences: boolean): [Uint8Array[], Int32Array] {\n return new StringNGramsOp(\n separator, nGramWidths, leftPad, rightPad, padWidth,\n preserveShortSequences)\n .compute(data, dataSplits);\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Sub} from '@tensorflow/tfjs-core';\n\nimport {createSimpleBinaryKernelImpl} from '../utils/binary_impl';\nimport {binaryKernelFunc, createComplexBinaryKernelImpl} from '../utils/binary_utils';\n\nexport const subImpl = createSimpleBinaryKernelImpl(\n ((aValue: number, bValue: number) => aValue - bValue));\nexport const subComplexImpl =\n createComplexBinaryKernelImpl(((aReal, aImag, bReal, bImag) => {\n return {real: aReal - bReal, imag: aImag - bImag};\n }));\nexport const sub = binaryKernelFunc(Sub, subImpl, subComplexImpl);\n\nexport const subConfig: KernelConfig = {\n kernelName: Sub,\n backendName: 'cpu',\n kernelFunc: sub\n};\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {buffer, DataType, Rank, TensorBuffer} from '@tensorflow/tfjs-core';\n\n/**\n * An implementation of the tile kernel shared between webgl and cpu for string\n * tensors only.\n */\n\nexport function tileImpl(\n xBuf: TensorBuffer,\n reps: number[]): TensorBuffer {\n const newShape: number[] = new Array(xBuf.rank);\n for (let i = 0; i < newShape.length; i++) {\n newShape[i] = xBuf.shape[i] * reps[i];\n }\n const result = buffer(newShape, xBuf.dtype);\n for (let i = 0; i < result.values.length; ++i) {\n const newLoc = result.indexToLoc(i);\n\n const originalLoc: number[] = new Array(xBuf.rank);\n for (let j = 0; j < originalLoc.length; j++) {\n originalLoc[j] = newLoc[j] % xBuf.shape[j];\n }\n\n const originalIndex = xBuf.locToIndex(originalLoc);\n\n result.values[i] = xBuf.values[originalIndex];\n }\n return result as TensorBuffer;\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n/** An implementation of the TopK kernel shared between webgl and cpu. */\n\nimport {buffer, NumericDataType, Rank, ShapeMap, Tensor, TensorBuffer, TypedArray, util} from '@tensorflow/tfjs-core';\n\ntype Pair = {\n value: number,\n index: number\n};\n\nconst comparePair = (a: Pair, b: Pair) => {\n const valueDiff = b.value - a.value;\n return valueDiff === 0 ? a.index - b.index : valueDiff;\n};\n\n/**\n * Partitions array where all elements smaller than the (k+1) smallest element\n * are found to the left of it, and all larger to the right of it.\n * Based on the Floyd-Rivest Algorithm, ref:\n * https://en.wikipedia.org/wiki/Floyd%E2%80%93Rivest_algorithm\n * @param array: Array to partition\n * @param left: Left index for the interval\n * @param right: Right index for the interval\n * @param k: Desired index value, where array[k] is the (k+1)th smallest element\n * when left = 0\n */\nfunction select(array: Pair[], k: number, left = 0, right = array.length - 1) {\n while (right > left) {\n // Use select recursively to sample a smaller set of size s\n // the arbitrary constants 600 and 0.5 are used in the original\n // version to minimize execution time.\n if (right - left > 600) {\n const n = right - left + 1;\n const i = k - left + 1;\n const z = Math.log(n);\n const s = 0.5 * Math.exp(2 * z / 3);\n const sd = 0.5 * Math.sqrt(z * s * (n - s) / n) * Math.sign(i - n / 2);\n const newLeft = Math.max(left, Math.floor(k - i * s / n + sd));\n const newRight = Math.min(right, Math.floor(k + (n - i) * s / n + sd));\n select(array, k, newLeft, newRight);\n }\n // partition the elements between left and right around t\n const t = array[k];\n let i = left;\n let j = right;\n\n util.swap(array, left, k);\n\n if (comparePair(array[right], t) > 0) {\n util.swap(array, left, right);\n }\n while (i < j) {\n util.swap(array, i, j);\n i++;\n j--;\n while (comparePair(array[i], t) < 0) {\n i = i + 1;\n }\n while (comparePair(array[j], t) > 0) {\n j = j - 1;\n }\n }\n if (comparePair(array[left], t) === 0) {\n util.swap(array, left, j);\n } else {\n j = j + 1;\n util.swap(array, j, right);\n }\n // Adjust left and right towards the boundaries of the subset\n // containing the (k - left + 1)th smallest element.\n if (j <= k) {\n left = j + 1;\n }\n if (k <= j) {\n right = j - 1;\n }\n }\n}\n\nexport function topKImpl(\n x: TypedArray, xShape: number[], xDtype: NumericDataType, k: number,\n sorted: boolean):\n [TensorBuffer, TensorBuffer] {\n // Reshape into a 2d tensor [batch, lastDim] and compute topk along lastDim.\n const lastDim = xShape[xShape.length - 1];\n const [batch, size] = [x.length / lastDim, lastDim];\n const allTopKVals = util.getTypedArrayFromDType(xDtype, batch * k);\n const allTopKIndices = util.getTypedArrayFromDType('int32', batch * k);\n\n for (let b = 0; b < batch; b++) {\n const offset = b * size;\n const vals = x.subarray(offset, offset + size);\n\n let valAndInd: Pair[] = new Array(vals.length);\n vals.forEach(\n (value: number, index: number) => valAndInd[index] = {value, index});\n\n if (k < valAndInd.length) {\n select(valAndInd, k);\n valAndInd = valAndInd.slice(0, k);\n }\n\n if (sorted) {\n valAndInd.sort(comparePair);\n }\n \n const outOffset = b * k;\n const topKVals = allTopKVals.subarray(outOffset, outOffset + k);\n const topKIndices = allTopKIndices.subarray(outOffset, outOffset + k);\n for (let i = 0; i < k; i++) {\n topKVals[i] = valAndInd[i].value;\n topKIndices[i] = valAndInd[i].index;\n }\n }\n // Reshape back to the original input shape, except that the last\n // dimension is k.\n const outputShape = xShape.slice();\n outputShape[outputShape.length - 1] = k;\n\n return [\n buffer(outputShape as ShapeMap[R], xDtype, allTopKVals),\n buffer(outputShape as ShapeMap[R], 'int32', allTopKIndices)\n ];\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// Import shared functionality from tfjs-backend-cpu without triggering\n// side effects.\n// tslint:disable-next-line: no-imports-from-dist\nimport * as shared from '@tensorflow/tfjs-backend-cpu/dist/shared';\n// tslint:disable-next-line: no-imports-from-dist\nimport {SimpleBinaryKernelImpl} from '@tensorflow/tfjs-backend-cpu/dist/shared';\n// tslint:disable-next-line: no-imports-from-dist\nimport {SimpleUnaryImpl} from '@tensorflow/tfjs-backend-cpu/dist/utils/unary_types';\n\nexport type SimpleBinaryKernelImplCPU = SimpleBinaryKernelImpl;\nexport type SimpleUnaryKernelImplCPU = SimpleUnaryImpl;\nconst {\n addImpl: addImplCPU,\n ceilImpl: ceilImplCPU,\n concatImpl: concatImplCPU,\n equalImpl: equalImplCPU,\n expImpl: expImplCPU,\n expm1Impl: expm1ImplCPU,\n floorImpl: floorImplCPU,\n gatherNdImpl: gatherNdImplCPU,\n gatherV2Impl: gatherV2ImplCPU,\n greaterEqualImpl: greaterEqualImplCPU,\n greaterImpl: greaterImplCPU,\n lessEqualImpl: lessEqualImplCPU,\n lessImpl: lessImplCPU,\n logImpl: logImplCPU,\n maxImpl: maxImplCPU,\n maximumImpl: maximumImplCPU,\n minimumImpl: minimumImplCPU,\n multiplyImpl: multiplyImplCPU,\n negImpl: negImplCPU,\n notEqualImpl: notEqualImplCPU,\n prodImpl: prodImplCPU,\n rangeImpl: rangeImplCPU,\n rsqrtImpl: rsqrtImplCPU,\n simpleAbsImpl: simpleAbsImplCPU,\n sliceImpl: sliceImplCPU,\n stridedSliceImpl: stridedSliceImplCPU,\n stringNGramsImpl: stringNGramsImplCPU,\n subImpl: subImplCPU,\n tileImpl: tileImplCPU,\n topKImpl: topKImplCPU,\n transposeImpl: transposeImplCPU,\n uniqueImpl: uniqueImplCPU,\n} = shared;\n\nexport {\n addImplCPU,\n ceilImplCPU,\n concatImplCPU,\n equalImplCPU,\n expImplCPU,\n expm1ImplCPU,\n floorImplCPU,\n gatherNdImplCPU,\n gatherV2ImplCPU,\n greaterEqualImplCPU,\n greaterImplCPU,\n lessEqualImplCPU,\n lessImplCPU,\n logImplCPU,\n maxImplCPU,\n maximumImplCPU,\n minimumImplCPU,\n multiplyImplCPU,\n prodImplCPU,\n negImplCPU,\n notEqualImplCPU,\n simpleAbsImplCPU,\n sliceImplCPU,\n stridedSliceImplCPU,\n stringNGramsImplCPU,\n subImplCPU,\n rangeImplCPU,\n rsqrtImplCPU,\n tileImplCPU,\n topKImplCPU,\n transposeImplCPU,\n uniqueImplCPU,\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Abs, KernelConfig} from '@tensorflow/tfjs-core';\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {simpleAbsImplCPU} from '../kernel_utils/shared';\nimport {UnaryOpType} from '../unary_op_util';\n\nexport const abs =\n unaryKernelFunc({opType: UnaryOpType.ABS, cpuKernelImpl: simpleAbsImplCPU});\n\nexport const absConfig: KernelConfig = {\n kernelName: Abs,\n backendName: 'webgpu',\n kernelFunc: abs\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Add, KernelConfig} from '@tensorflow/tfjs-core';\nimport {binaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {addImplCPU as cpuAdd} from '../kernel_utils/shared';\nimport {BinaryOpType} from '../binary_op_util';\n\nexport const addKernelFunc = binaryKernelFunc({\n opSnippet: BinaryOpType.ADD,\n cpuKernelImpl: cpuAdd,\n supportsComplex: true\n});\n\nexport const addConfig: KernelConfig = {\n kernelName: Add,\n backendName: 'webgpu',\n kernelFunc: addKernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {getMainHeaderAndGlobalIndexString} from './shader_preprocessor';\nimport {computeDispatch, flatDispatchLayout} from './webgpu_util';\n\nimport {WebGPUProgram} from './webgpu_program';\n\nexport class AddNPackedProgram implements WebGPUProgram {\n outputShape: number[];\n shaderKey: string;\n dispatchLayout: {x: number[]};\n dispatch: [number, number, number];\n variableNames: string[];\n workPerThread = 4;\n workGroupSize: [number, number, number] = [64, 1, 1];\n size = true;\n\n constructor(shapes: number[][]) {\n this.outputShape = shapes[0];\n this.variableNames = shapes.map((_, i) => `T${i}`);\n this.dispatchLayout = flatDispatchLayout(this.outputShape);\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize,\n [this.workPerThread, 1, 1]);\n this.shaderKey = 'addN';\n }\n\n getUserCode(): string {\n const snippets: string[] = [];\n // Get target elements from every input tensor.\n this.variableNames.forEach(variable => {\n snippets.push(\n `let v${variable} = get${variable}ByOutputCoords(coords);`);\n });\n // Calculate the sum of all elements.\n const operation = this.variableNames\n .map(variable => {\n return `v${variable}`;\n })\n .join(' + ');\n\n const userCode = `\n ${getMainHeaderAndGlobalIndexString()}\n for (var i = 0; i < ${this.workPerThread}; i = i + 1) {\n let flatIndex = index * ${this.workPerThread} + i;\n if (flatIndex < uniforms.size) {\n let coords = getCoordsFromIndex(flatIndex);\n ${snippets.join('\\n ')}\n setOutputAtIndex(flatIndex, ${operation});\n }\n }\n }\n `;\n return userCode;\n }\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {AddN, AddNInputs, KernelConfig, KernelFunc, TensorInfo, upcastType} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\n\nimport {AddNPackedProgram} from '../addn_packed_webgpu';\nimport {identity} from './Identity';\n\nexport function addN(args: {inputs: AddNInputs, backend: WebGPUBackend}):\n TensorInfo {\n const {inputs, backend} = args;\n\n const tensors = inputs;\n if (tensors.length === 1) {\n return identity({inputs: {x: tensors[0]}, backend});\n }\n\n const dtype =\n tensors.map(t => t.dtype).reduce((d1, d2) => upcastType(d1, d2));\n const shapes = tensors.map(t => t.shape);\n const program = new AddNPackedProgram(shapes);\n return backend.runWebGPUProgram(program, tensors, dtype);\n}\n\nexport const addNConfig: KernelConfig = {\n kernelName: AddN,\n backendName: 'webgpu',\n kernelFunc: addN as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util} from '@tensorflow/tfjs-core';\n\nimport {getCoordsXYZ, getMainHeaderAndGlobalIndexString} from './shader_preprocessor';\nimport {WebGPUProgram} from './webgpu_program';\nimport {computeDispatch, flatDispatchLayout} from './webgpu_util';\n\nexport class ArgMinMaxProgram implements WebGPUProgram {\n outputShape: number[];\n shaderKey: string;\n dispatchLayout: {x: number[]};\n dispatch: [number, number, number];\n workGroupSize: [number, number, number] = [64, 1, 1];\n variableNames = ['x'];\n uniforms = 'infinityValue : f32,';\n inputShape: number[];\n reductionFactor: number;\n op: string;\n size = true;\n\n constructor(inputShape: number[], axis: number, reduceType: 'min'|'max') {\n const axes = [axis];\n backend_util.assertAxesAreInnerMostDims(\n 'arg' + reduceType.charAt(0).toUpperCase() + reduceType.slice(1), axes,\n inputShape.length);\n\n this.op = reduceType === 'min' ? '<' : '>';\n\n // |outShape| is the shape with the removed axis\n const [outputShape] =\n backend_util.computeOutAndReduceShapes(inputShape, axes);\n\n this.outputShape = outputShape.length === 0 ? [1] : outputShape;\n\n this.dispatchLayout = flatDispatchLayout(this.outputShape);\n // A work group only outputs a data, so we transfer [1, 1, 1] to compute\n // dispatch size.\n this.dispatch =\n computeDispatch(this.dispatchLayout, this.outputShape, [1, 1, 1]);\n\n this.inputShape = inputShape;\n this.shaderKey = `argMinMax${this.op}`;\n }\n\n getUserCode(): string {\n const sharedMemorySnippet = `\n var xBestIndices : array;\n var xBestValues : array;\n `;\n\n const getInputShapeLastDim = () => {\n if (this.inputShape.length === 1) {\n return 'uniforms.xShape';\n } else {\n return `uniforms.xShape.${getCoordsXYZ(this.inputShape.length - 1)}`;\n }\n };\n\n const splitOutputCoords = () => {\n let snippet = '';\n if (this.outputShape.length === 1) {\n if (this.inputShape.length !== 1) {\n snippet += 'outputCoords,';\n }\n } else {\n for (let i = 0; i < this.outputShape.length; i++) {\n snippet += `outputCoords.${getCoordsXYZ(i)},`;\n }\n }\n return snippet;\n };\n\n const userCode = `\n fn DIV_CEIL(a : u32, b : u32) -> u32 {\n return ((a - 1u) / b + 1u);\n }\n\n ${sharedMemorySnippet}\n\n ${getMainHeaderAndGlobalIndexString()}\n let outputIndex = index / i32(workGroupSizeX);\n let reduceLength = ${getInputShapeLastDim()};\n\n var bestIndex = i32(localId.x);\n var bestValue = uniforms.infinityValue;\n let outputCoords = getCoordsFromIndex(outputIndex);\n for (var k = i32(localId.x); k < reduceLength && outputIndex < uniforms.size;\n k = k + i32(workGroupSizeX)) {\n let candidate = getX(${splitOutputCoords()} k);\n if (!isnan(candidate) && candidate ${this.op} bestValue) {\n bestValue = candidate;\n bestIndex = k;\n }\n }\n xBestValues[localId.x] = bestValue;\n xBestIndices[localId.x] = bestIndex;\n workgroupBarrier();\n\n var reduceSize = min(u32(reduceLength), workGroupSizeX);\n for (var currentSize = reduceSize / 2u; reduceSize > 1u;\n currentSize = reduceSize / 2u) {\n let interval = DIV_CEIL(reduceSize, 2u);\n if (localId.x < currentSize) {\n let candidate = xBestValues[localId.x + interval];\n if (candidate ${this.op} bestValue) {\n bestValue = candidate;\n xBestValues[localId.x] = bestValue;\n xBestIndices[localId.x] = xBestIndices[localId.x + interval];\n }\n }\n reduceSize = interval;\n workgroupBarrier();\n }\n\n if (localId.x == 0u && outputIndex < uniforms.size) {\n setOutputAtIndexI32(outputIndex, xBestIndices[localId.x]);\n }\n }\n `;\n return userCode;\n }\n}\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {getWorkGroupSizeString} from './shader_preprocessor';\nimport {WebGPUProgram} from './webgpu_program';\nimport {computeDispatch} from './webgpu_util';\n\nexport class TransposeSharedProgram implements WebGPUProgram {\n variableNames = ['A'];\n outputShape: number[];\n shaderKey: string;\n dispatchLayout: {x: number[], y: number[]};\n dispatch: [number, number, number];\n // Note that the maximum number of workgroup invocations by webgpu is 256.\n workGroupSize: [number, number, number] = [16, 16, 1];\n\n constructor(aShape: number[], newDim: number[]) {\n const outputShape: number[] = new Array(aShape.length);\n for (let i = 0; i < outputShape.length; i++) {\n outputShape[i] = aShape[newDim[i]];\n }\n this.outputShape = outputShape;\n this.dispatchLayout = {x: [0], y: [1]};\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize, [1, 1, 1]);\n\n this.shaderKey = 'transposeShared';\n }\n\n getUserCode(): string {\n const userCode = `\n let TILE_DIM = ${this.workGroupSize[0]};\n var tile : array, ${\n this.workGroupSize[0]}>;\n ${getWorkGroupSizeString()}\n fn main(@builtin(local_invocation_id) localId : vec3,\n @builtin(workgroup_id) workgroupId : vec3) {\n var x = i32(workgroupId.x) * TILE_DIM + i32(localId.x);\n var y = i32(workgroupId.y) * TILE_DIM + i32(localId.y);\n let width = uniforms.outShape[0];\n let height = uniforms.outShape[1];\n if (x < width && y < height) {\n tile[localId.y][localId.x] = A[y * width + x];\n }\n workgroupBarrier();\n\n x = i32(workgroupId.y) * TILE_DIM + i32(localId.x);\n y = i32(workgroupId.x) * TILE_DIM + i32(localId.y);\n if (x < height && y < width) {\n setOutputAtIndex((y * height + x), tile[localId.x]\n [localId.y]);\n }\n }\n `;\n return userCode;\n }\n}\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {getCoordsDataType, getCoordsXYZ, getMainHeaderAndGlobalIndexString} from './shader_preprocessor';\nimport {WebGPUProgram} from './webgpu_program';\nimport {computeDispatch, flatDispatchLayout} from './webgpu_util';\n\nexport class TransposeProgram implements WebGPUProgram {\n variableNames = ['A'];\n shaderKey: string;\n outputShape: number[];\n dispatchLayout: {x: number[]};\n dispatch: [number, number, number];\n workPerThread = 4;\n workGroupSize: [number, number, number] = [64, 1, 1];\n newDim: number[];\n size = true;\n\n constructor(aShape: number[], newDim: number[]) {\n const outputShape: number[] = new Array(aShape.length);\n for (let i = 0; i < outputShape.length; i++) {\n outputShape[i] = aShape[newDim[i]];\n }\n this.outputShape = outputShape;\n this.dispatchLayout = flatDispatchLayout(this.outputShape);\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize,\n [this.workPerThread, 1, 1]);\n\n this.newDim = newDim;\n this.shaderKey = `transpose_${newDim}`;\n }\n\n getUserCode(): string {\n const dtype = getCoordsDataType(this.outputShape.length);\n const switched = getSwitchedCoords(this.newDim);\n\n const userCode = `\n ${getMainHeaderAndGlobalIndexString()}\n\n for(var i = 0; i < ${this.workPerThread}; i = i + 1) {\n let flatIndex = index * ${this.workPerThread} + i;\n if(flatIndex < uniforms.size) {\n let resRC = getCoordsFromIndex(flatIndex);\n setOutputAtIndex(flatIndex, A[getIndexFromCoords${\n this.outputShape.length}D(\n ${dtype}(${switched}), uniforms.aShape)]);\n }\n }\n }\n `;\n return userCode;\n }\n}\n\nfunction getSwitchedCoords(newDim: number[]): string {\n const rank = newDim.length;\n if (rank > 6) {\n throw Error(`Transpose for rank ${rank} is not yet supported`);\n }\n const switchedCoords = new Array(rank);\n for (let i = 0; i < newDim.length; i++) {\n switchedCoords[newDim[i]] = `resRC.${getCoordsXYZ(i)}`;\n }\n\n return switchedCoords.join();\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, Transpose, TransposeAttrs, TransposeInputs, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {transposeImplCPU as cpuTranspose} from '../kernel_utils/shared';\n\nimport {TransposeSharedProgram} from '../transpose_shared_webgpu';\nimport {TransposeProgram} from '../transpose_webgpu';\n\nexport function transpose(args: {\n inputs: TransposeInputs,\n attrs: TransposeAttrs,\n backend: WebGPUBackend\n}) {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {perm} = attrs;\n const webgpuBackend = backend;\n\n const xRank = x.shape.length;\n const newShape: number[] = new Array(xRank);\n for (let i = 0; i < newShape.length; i++) {\n newShape[i] = x.shape[perm[i]];\n }\n if (backend.shouldExecuteOnCPU([x])) {\n const xData = webgpuBackend.tensorMap.get(x.dataId);\n const values = xData.values as TypedArray;\n const outValues = cpuTranspose(values, x.shape, x.dtype, perm, newShape);\n return backend.makeTensorInfo(newShape, x.dtype, outValues);\n }\n if (x.shape.length === 2 && util.arraysEqual(perm, [1, 0])) {\n const program = new TransposeSharedProgram(x.shape, perm);\n return webgpuBackend.runWebGPUProgram(program, [x], x.dtype);\n }\n const program = new TransposeProgram(x.shape, perm);\n return webgpuBackend.runWebGPUProgram(program, [x], x.dtype);\n}\n\nexport const transposeConfig: KernelConfig = {\n kernelName: Transpose,\n backendName: 'webgpu',\n kernelFunc: transpose as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ArgMax, ArgMaxAttrs, ArgMaxInputs, backend_util, KernelConfig, KernelFunc, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {ArgMinMaxProgram} from '../argminmax_webgpu';\nimport {WebGPUBackend} from '../backend_webgpu';\n\nimport {transpose} from './Transpose';\n\nexport function argMax(\n args: {inputs: ArgMaxInputs, backend: WebGPUBackend, attrs: ArgMaxAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {axis} = attrs;\n\n let axes = util.parseAxisParam(axis, x.shape);\n const permutedAxes = backend_util.getAxesPermutation(axes, x.shape.length);\n let $x = x;\n const intermediateTensorInfos = [];\n if (permutedAxes != null) {\n $x = transpose({inputs: {x}, backend, attrs: {perm: permutedAxes}});\n intermediateTensorInfos.push($x);\n axes = backend_util.getInnerMostAxes(axes.length, $x.shape.length);\n }\n\n backend_util.assertAxesAreInnerMostDims('argMax', [axes[0]], $x.shape.length);\n const program = new ArgMinMaxProgram($x.shape, axes[0], 'max');\n const uniformData = [{type: 'float32', data: [Number.NEGATIVE_INFINITY]}];\n const out = backend.runWebGPUProgram(program, [$x], 'int32', uniformData);\n intermediateTensorInfos.forEach(t => backend.disposeData(t.dataId));\n return out;\n}\n\nexport const argMaxConfig: KernelConfig = {\n kernelName: ArgMax,\n backendName: 'webgpu',\n kernelFunc: argMax as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ArgMin, ArgMinAttrs, ArgMinInputs, backend_util, KernelConfig, KernelFunc, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {ArgMinMaxProgram} from '../argminmax_webgpu';\nimport {WebGPUBackend} from '../backend_webgpu';\n\nimport {transpose} from './Transpose';\n\nexport function argMin(\n args: {inputs: ArgMinInputs, backend: WebGPUBackend, attrs: ArgMinAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {axis} = attrs;\n\n let axes = util.parseAxisParam(axis, x.shape);\n const permutedAxes = backend_util.getAxesPermutation(axes, x.shape.length);\n let $x = x;\n const intermediateTensorInfos = [];\n if (permutedAxes != null) {\n $x = transpose({inputs: {x}, backend, attrs: {perm: permutedAxes}});\n intermediateTensorInfos.push($x);\n axes = backend_util.getInnerMostAxes(axes.length, $x.shape.length);\n }\n\n backend_util.assertAxesAreInnerMostDims('argMin', [axes[0]], $x.shape.length);\n const program = new ArgMinMaxProgram($x.shape, axes[0], 'min');\n const uniformData = [{type: 'float32', data: [Number.POSITIVE_INFINITY]}];\n const out = backend.runWebGPUProgram(program, [$x], 'int32', uniformData);\n intermediateTensorInfos.forEach(t => backend.disposeData(t.dataId));\n return out;\n}\n\nexport const argMinConfig: KernelConfig = {\n kernelName: ArgMin,\n backendName: 'webgpu',\n kernelFunc: argMin as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util} from '@tensorflow/tfjs-core';\n\nimport {getMainHeaderAndGlobalIndexString} from './shader_preprocessor';\nimport {WebGPUProgram} from './webgpu_program';\nimport {computeDispatch, flatDispatchLayout} from './webgpu_util';\n\nexport class Pool2DProgram implements WebGPUProgram {\n outputShape: number[];\n shaderKey: string;\n dispatchLayout: {x: number[]};\n dispatch: [number, number, number];\n variableNames = ['x'];\n uniforms =\n `stride : vec2, pad : vec2, dilation : vec2, convDims : vec2, filterDims : vec2,`;\n // TODO(jiajia.qin@intel.com): Dynamically choose different workGroupSize for\n // different output shapes.\n workGroupSize: [number, number, number] = [128, 1, 1];\n poolType: 'max'|'avg';\n size = true;\n\n constructor(convInfo: backend_util.Conv2DInfo, poolType: 'max'|'avg') {\n this.outputShape = convInfo.outShape;\n\n this.dispatchLayout = flatDispatchLayout(this.outputShape);\n\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize);\n\n this.shaderKey = `pool2D_${poolType}`;\n this.poolType = poolType;\n }\n\n getUserCode(): string {\n let updateSnippet = `resultValue = max(value, resultValue);`;\n if (this.poolType === 'avg') {\n updateSnippet = `resultValue = resultValue + value; count = count + 1.0;`;\n }\n\n let returnValue = `resultValue`;\n if (this.poolType === 'avg') {\n returnValue = `resultValue / count`;\n }\n\n const userCode = `\n ${getMainHeaderAndGlobalIndexString()}\n if (index < uniforms.size) {\n let coords = getCoordsFromIndex(index);\n let batch = coords[0];\n let xRCCorner = vec2(coords.yz) * uniforms.stride - uniforms.pad;\n let xRCorner = xRCCorner.x;\n let xCCorner = xRCCorner.y;\n\n var resultValue = ${\n this.poolType === 'avg' ? '0.0' : '-1.0 / pow(10.0, -20.0)'};\n var count = 0.0;\n\n for (var wR = 0; wR < uniforms.filterDims.x; wR = wR + uniforms.dilation.x) {\n let xR = xRCorner + wR;\n\n if (xR < 0 || xR >= uniforms.convDims.x) {\n continue;\n }\n\n for (var wC = 0; wC < uniforms.filterDims.y; wC = wC + uniforms.dilation.y) {\n let xC = xCCorner + wC;\n if (xC < 0 || xC >= uniforms.convDims.y) {\n continue;\n }\n\n let value = getX(batch, xR, xC, coords[3]);\n ${updateSnippet}\n }\n }\n\n setOutputAtIndex(index, ${returnValue});\n }\n }\n `;\n return userCode;\n }\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util} from '@tensorflow/tfjs-core';\n\nimport {getMainHeaderAndGlobalIndexString} from './shader_preprocessor';\nimport {WebGPUProgram} from './webgpu_program';\nimport {computeDispatch, flatDispatchLayout} from './webgpu_util';\n\nexport class PoolWithFilterSizeEqualsOneProgram implements WebGPUProgram {\n outputShape: number[];\n shaderKey: string;\n dispatchLayout: {x: number[]};\n dispatch: [number, number, number];\n variableNames = ['x'];\n uniforms = `stride : vec2,`;\n workGroupSize: [number, number, number] = [256, 1, 1];\n size = true;\n\n constructor(convInfo: backend_util.Conv2DInfo) {\n this.outputShape = convInfo.outShape;\n this.dispatchLayout = flatDispatchLayout(this.outputShape);\n\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize);\n\n this.shaderKey = 'poolWithFilterSizeEqualsOne';\n }\n\n getUserCode(): string {\n const userCode = `\n ${getMainHeaderAndGlobalIndexString()}\n if (index < uniforms.size) {\n let coords = getCoordsFromIndex(index);\n let batch = coords[0];\n let d = coords[3];\n\n let xRCCorner = coords.yz * uniforms.stride;\n let xRCorner = xRCCorner.x;\n let xCCorner = xRCCorner.y;\n\n let value = getX(batch, xRCorner, xCCorner, d);\n setOutputAtIndex(index, value);\n }\n }\n `;\n return userCode;\n }\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {AvgPool, AvgPoolAttrs, AvgPoolInputs, backend_util, KernelConfig, KernelFunc, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\n\nimport {identity} from './Identity';\nimport {Pool2DProgram} from '../pool2d_webgpu';\nimport {PoolWithFilterSizeEqualsOneProgram} from '../pool_filtersizeone_webgpu';\n\nexport function avgPool(\n args: {inputs: AvgPoolInputs, backend: WebGPUBackend, attrs: AvgPoolAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {filterSize, strides, pad, dimRoundingMode} = attrs;\n const dilations = 1;\n const convInfo = backend_util.computePool2DInfo(\n x.shape as [number, number, number, number], filterSize, strides,\n dilations, pad, dimRoundingMode);\n if (convInfo.filterWidth === 1 && convInfo.filterHeight === 1 &&\n util.arraysEqual(convInfo.inShape, convInfo.outShape)) {\n return identity({inputs: {x}, backend});\n }\n\n let program: Pool2DProgram|PoolWithFilterSizeEqualsOneProgram;\n const dimensions =\n [{type: 'int32', data: [convInfo.strideHeight, convInfo.strideWidth]}];\n if (convInfo.filterHeight === 1 && convInfo.filterWidth === 1) {\n program = new PoolWithFilterSizeEqualsOneProgram(convInfo);\n } else {\n program = new Pool2DProgram(convInfo, 'avg');\n dimensions.push(\n {type: 'int32', data: [convInfo.padInfo.top, convInfo.padInfo.left]}, {\n type: 'int32',\n data: [convInfo.dilationHeight, convInfo.dilationWidth]\n },\n {type: 'int32', data: [convInfo.inHeight, convInfo.inWidth]}, {\n type: 'int32',\n data: [convInfo.effectiveFilterHeight, convInfo.effectiveFilterWidth]\n });\n }\n\n return backend.runWebGPUProgram(program, [x], x.dtype, dimensions);\n}\n\nexport const avgPoolConfig: KernelConfig = {\n kernelName: AvgPool,\n backendName: 'webgpu',\n kernelFunc: avgPool as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {BatchMatMul, BatchMatMulAttrs, BatchMatMulInputs, KernelConfig, KernelFunc} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {batchMatMulImpl} from './BatchMatMul_impl';\n\nexport function batchMatMul(args: {\n inputs: BatchMatMulInputs,\n attrs: BatchMatMulAttrs,\n backend: WebGPUBackend\n}) {\n const {inputs, backend, attrs} = args;\n const {a, b} = inputs;\n const {transposeA, transposeB} = attrs;\n\n return batchMatMulImpl({a, b, transposeA, transposeB, backend});\n}\n\nexport const batchMatMulConfig: KernelConfig = {\n kernelName: BatchMatMul,\n backendName: 'webgpu',\n kernelFunc: batchMatMul as {} as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {getCoordsDataType, getMainHeaderAndGlobalIndexString} from './shader_preprocessor';\nimport {WebGPUProgram} from './webgpu_program';\nimport {computeDispatch, flatDispatchLayout} from './webgpu_util';\n\nexport class SliceProgram implements WebGPUProgram {\n variableNames = ['source'];\n uniforms: string;\n outputShape: number[];\n shaderKey: string;\n rank: number;\n dispatchLayout: {x: number[]};\n dispatch: [number, number, number];\n workPerThread = 1;\n workGroupSize: [number, number, number] = [64, 1, 1];\n start: number[];\n size = true;\n\n constructor(start: number[], destSize: number[]) {\n this.outputShape = destSize;\n this.rank = destSize.length;\n this.dispatchLayout = flatDispatchLayout(this.outputShape);\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize,\n [this.workPerThread, 1, 1]);\n\n this.start = start;\n this.uniforms = `start : ${getCoordsDataType(start.length)}, `;\n this.shaderKey = 'slice';\n }\n\n getUserCode(): string {\n const dtype = getCoordsDataType(this.rank);\n const sourceCoords = getCoords(this.rank);\n let coordSum;\n if (this.start.length === 1) {\n coordSum = this.outputShape.map((_, i) => {\n return `sourceLoc = uniforms.start + coords;`;\n });\n } else {\n coordSum = this.outputShape.map((_, i) => {\n return `sourceLoc.${coords[i]} = uniforms.start[${i}] + coords.${\n coords[i]};`;\n });\n }\n\n const userCode = `\n ${getMainHeaderAndGlobalIndexString()}\n if (index < uniforms.size) {\n var sourceLoc : ${dtype};\n let coords = getCoordsFromIndex(index);\n ${coordSum.join('\\n')}\n setOutputAtIndex(index, getSource(${sourceCoords}));\n }\n }\n `;\n return userCode;\n }\n}\n\nconst coords = ['x', 'y', 'z', 'w', 'u', 'v'];\n\nfunction getCoords(rank: number): string {\n if (rank === 1) {\n return 'sourceLoc';\n } else if (rank <= 6) {\n return coords.slice(0, rank).map(coord => `sourceLoc.${coord}`).join(',');\n } else {\n throw Error(`Slicing for rank ${rank} is not yet supported`);\n }\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, Slice, slice_util, SliceAttrs, SliceInputs, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {sliceImplCPU} from '../kernel_utils/shared';\nimport {SliceProgram} from '../slice_webgpu';\n\nexport function slice(\n args: {inputs: SliceInputs, backend: WebGPUBackend, attrs: SliceAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {begin, size} = attrs;\n\n const [$begin, $size] = slice_util.parseSliceParams(x, begin, size);\n slice_util.assertParamsValid(x, $begin, $size);\n\n if (backend.shouldExecuteOnCPU([x]) || x.dtype === 'string') {\n const xBufferInfo = backend.tensorMap.get(x.dataId);\n const outValues = sliceImplCPU(\n xBufferInfo.values as TypedArray, $begin, $size, x.shape, x.dtype);\n return backend.makeTensorInfo($size, x.dtype, outValues);\n }\n\n if (util.sizeFromShape($size) === 0) {\n return backend.makeTensorInfo($size, x.dtype, []);\n }\n\n // TODO(xing.xu): Add shadow slice support.\n const program = new SliceProgram($begin, $size);\n const uniformData = [{type: 'int32', data: $begin}];\n return backend.runWebGPUProgram(program, [x], x.dtype, uniformData);\n}\n\nexport const sliceConfig: KernelConfig = {\n kernelName: Slice,\n backendName: 'webgpu',\n kernelFunc: slice as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, BatchToSpaceND, BatchToSpaceNDAttrs, BatchToSpaceNDInputs, KernelConfig, KernelFunc, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\n\nimport {reshape} from './Reshape';\nimport {slice} from './Slice';\nimport {transpose} from './Transpose';\n\nexport const batchToSpaceND = (args: {\n inputs: BatchToSpaceNDInputs,\n backend: WebGPUBackend,\n attrs: BatchToSpaceNDAttrs\n}): TensorInfo => {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {blockShape, crops} = attrs;\n\n util.assert(\n x.shape.length <= 4,\n () => 'batchToSpaceND for rank > 4 with a WebGPU backend not ' +\n 'implemented yet');\n const prod = blockShape.reduce((a, b) => a * b);\n\n const reshaped = backend_util.getReshaped(x.shape, blockShape, prod);\n const permuted = backend_util.getPermuted(reshaped.length, blockShape.length);\n const reshapedPermuted =\n backend_util.getReshapedPermuted(x.shape, blockShape, prod);\n const sliceBeginCoords =\n backend_util.getSliceBeginCoords(crops, blockShape.length);\n const sliceSize =\n backend_util.getSliceSize(reshapedPermuted, crops, blockShape.length);\n\n const toDispose = [];\n\n const reshapedIntermediate =\n reshape({inputs: {x}, backend, attrs: {shape: reshaped}});\n const transposedIntermediate = transpose(\n {inputs: {x: reshapedIntermediate}, backend, attrs: {perm: permuted}});\n const reshapedIntermediate2 = reshape({\n inputs: {x: transposedIntermediate},\n backend,\n attrs: {shape: reshapedPermuted}\n });\n const sliced = slice({\n inputs: {x: reshapedIntermediate2},\n backend,\n attrs: {begin: sliceBeginCoords, size: sliceSize}\n });\n\n toDispose.push(reshapedIntermediate);\n toDispose.push(transposedIntermediate);\n toDispose.push(reshapedIntermediate2);\n\n toDispose.forEach(t => backend.disposeData(t.dataId));\n\n return sliced;\n};\n\nexport const batchToSpaceNDConfig: KernelConfig = {\n kernelName: BatchToSpaceND,\n backendName: 'webgpu',\n kernelFunc: batchToSpaceND as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, NotEqual} from '@tensorflow/tfjs-core';\n\nimport {binaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {notEqualImplCPU as cpuNotEqual} from '../kernel_utils/shared';\n\nimport {BinaryOpType} from '../binary_op_util';\n\nexport const notEqual = binaryKernelFunc({\n opSnippet: BinaryOpType.NOT_EQUAL,\n dtype: 'bool',\n cpuKernelImpl: cpuNotEqual\n});\n\nexport const notEqualConfig: KernelConfig = {\n kernelName: NotEqual,\n backendName: 'webgpu',\n kernelFunc: notEqual\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, Real, RealInputs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {identity} from './Identity';\n\nexport function real(args: {inputs: RealInputs, backend: WebGPUBackend}):\n TensorInfo {\n const {inputs, backend} = args;\n const {input} = inputs;\n const inputData = backend.tensorMap.get(input.dataId);\n\n return identity({inputs: {x: inputData.complexTensorInfos.real}, backend});\n}\n\nexport const realConfig: KernelConfig = {\n kernelName: Real,\n backendName: 'webgpu',\n kernelFunc: real as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {TensorInfo} from '@tensorflow/tfjs-core';\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {UnaryOpProgram} from '../unary_op_webgpu';\nimport {UnaryOpType} from '../unary_op_util';\n\nexport function int(input: TensorInfo, backend: WebGPUBackend): TensorInfo {\n const program = new UnaryOpProgram(input.shape, UnaryOpType.TO_INT);\n const output = backend.runWebGPUProgram(program, [input], 'int32');\n return {dataId: output.dataId, shape: output.shape, dtype: output.dtype};\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport * as tf from '@tensorflow/tfjs-core';\nimport {BinaryInputs, Cast, CastAttrs, CastInputs, KernelConfig, KernelFunc, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\n\nimport {complex} from './Complex';\nimport {identity} from './Identity';\nimport {notEqual} from './NotEqual';\nimport {real} from './Real';\n\nimport {int} from '../kernel_utils/int';\n\nexport function cast(\n args: {inputs: CastInputs, backend: WebGPUBackend, attrs: CastAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {dtype} = attrs;\n\n // Casting to complex64.\n if (dtype === 'complex64') {\n if (x.dtype === 'complex64') {\n return identity({inputs: {x}, backend});\n }\n\n // TODO: Import kernel function once zeros is modularized.\n const zerosTensor = tf.zeros(x.shape);\n const floatX = cast({inputs: {x}, backend, attrs: {dtype: 'float32'}});\n\n const result =\n complex({inputs: {real: floatX, imag: zerosTensor}, backend});\n\n zerosTensor.dispose();\n backend.disposeData(floatX.dataId);\n\n return result;\n }\n\n // Casting from complex64\n if (x.dtype === 'complex64') {\n const realPart = real({inputs: {input: x}, backend});\n const result = cast({inputs: {x: realPart}, backend, attrs: {dtype}});\n backend.disposeData(realPart.dataId);\n return result;\n }\n\n if (!util.hasEncodingLoss(x.dtype, dtype)) {\n // We don't change the underlying data, since we cast to higher\n // precision.\n const result = identity({inputs: {x}, backend});\n return {dataId: result.dataId, shape: result.shape, dtype};\n }\n\n if (dtype === 'int32') {\n return int(x, backend);\n }\n\n if (dtype === 'bool') {\n const zerosTensorInfo = backend.makeTensorInfo(\n [], 'bool', util.getTypedArrayFromDType('bool', 1));\n\n const binaryInputs: BinaryInputs = {a: x, b: zerosTensorInfo};\n\n const result = notEqual({inputs: binaryInputs, backend}) as TensorInfo;\n backend.disposeData(zerosTensorInfo.dataId);\n return result;\n }\n\n throw new Error(`Error in Cast: failed to cast ${x.dtype} to ${dtype}`);\n}\n\nexport const castConfig: KernelConfig = {\n kernelName: Cast,\n backendName: 'webgpu',\n kernelFunc: cast as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Ceil, KernelConfig} from '@tensorflow/tfjs-core';\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {ceilImplCPU} from '../kernel_utils/shared';\nimport {UnaryOpType} from '../unary_op_util';\n\nexport const ceil =\n unaryKernelFunc({opType: UnaryOpType.CEIL, cpuKernelImpl: ceilImplCPU});\n\nexport const ceilConfig: KernelConfig = {\n kernelName: Ceil,\n backendName: 'webgpu',\n kernelFunc: ceil\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {getMainHeaderAndGlobalIndexString} from './shader_preprocessor';\nimport {WebGPUProgram} from './webgpu_program';\nimport {computeDispatch, flatDispatchLayout} from './webgpu_util';\n\nexport class ClipVec4Program implements WebGPUProgram {\n outputShape: number[];\n shaderKey: string;\n variableNames = ['A'];\n uniforms = 'minVal : f32, maxVal : f32,';\n dispatchLayout: {x: number[]};\n dispatch: [number, number, number];\n workPerThread = 4;\n workGroupSize: [number, number, number] = [64, 1, 1];\n isVec4 = true;\n size = true;\n\n constructor(outputShape: number[]) {\n this.outputShape = outputShape;\n this.dispatchLayout = flatDispatchLayout(this.outputShape);\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize,\n [this.workPerThread, 1, 1]);\n this.shaderKey = 'clipVec4';\n }\n\n getUserCode(): string {\n const userCode = `\n ${getMainHeaderAndGlobalIndexString()}\n if(index < uniforms.size) {\n let value = getAByOutputIndex(index);\n var clampedValue : vec4;\n for (var i = 0; i < 4; i = i + 1) {\n if (isnan(value[i])) {\n clampedValue[i] = value[i];\n } else {\n clampedValue[i] = clamp(value[i], uniforms.minVal, uniforms.maxVal);\n }\n }\n\n setOutputAtIndex(index, clampedValue);\n }\n }\n `;\n return userCode;\n }\n}\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {getMainHeaderAndGlobalIndexString} from './shader_preprocessor';\nimport {WebGPUProgram} from './webgpu_program';\nimport {computeDispatch, flatDispatchLayout} from './webgpu_util';\n\nexport class ClipProgram implements WebGPUProgram {\n outputShape: number[];\n shaderKey: string;\n variableNames = ['A'];\n uniforms = 'minVal : f32, maxVal : f32,';\n dispatchLayout: {x: number[]};\n dispatch: [number, number, number];\n workGroupSize: [number, number, number] = [64, 1, 1];\n minVal: number;\n maxVal: number;\n size = true;\n\n constructor(outputShape: number[]) {\n this.outputShape = outputShape;\n this.dispatchLayout = flatDispatchLayout(this.outputShape);\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize);\n\n this.shaderKey = 'clip';\n }\n\n getUserCode(): string {\n const userCode = `\n ${getMainHeaderAndGlobalIndexString()}\n if(index < uniforms.size) {\n let value = getAByOutputIndex(index);\n if (isnan(value)) {\n setOutputAtIndex(index, value);\n return;\n }\n setOutputAtIndex(index, clamp(value, uniforms.minVal, uniforms.maxVal));\n }\n }\n `;\n return userCode;\n }\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ClipByValue, ClipByValueAttrs, ClipByValueInputs, KernelConfig, KernelFunc, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\n\nimport {ClipVec4Program} from '../clip_vec4_webgpu';\nimport {ClipProgram} from '../clip_webgpu';\n\nexport function clipByValue(args: {\n inputs: ClipByValueInputs,\n backend: WebGPUBackend,\n attrs: ClipByValueAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {clipValueMin, clipValueMax} = attrs;\n\n let program: ClipProgram|ClipVec4Program;\n const uniformData = [\n {type: 'float32', data: [clipValueMin]},\n {type: 'float32', data: [clipValueMax]}\n ];\n if (util.sizeFromShape(x.shape) % 4 === 0) {\n program = new ClipVec4Program(x.shape);\n } else {\n program = new ClipProgram(x.shape);\n }\n return backend.runWebGPUProgram(program, [x], x.dtype, uniformData);\n}\n\nexport const clipByValueConfig: KernelConfig = {\n kernelName: ClipByValue,\n backendName: 'webgpu',\n kernelFunc: clipByValue as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util} from '@tensorflow/tfjs-core';\n\nimport {getMainHeaderAndGlobalIndexString} from './shader_preprocessor';\nimport {WebGPUProgram} from './webgpu_program';\nimport {computeDispatch, flatDispatchLayout} from './webgpu_util';\n\nexport class ConcatProgram implements WebGPUProgram {\n outputShape: number[];\n shaderKey: string;\n dispatchLayout: {x: number[]};\n dispatch: [number, number, number];\n variableNames: string[];\n uniforms = '';\n workPerThread = 4;\n workGroupSize: [number, number, number] = [64, 1, 1];\n size = true;\n offsetLength: number;\n\n constructor(shapes: Array<[number, number]>) {\n this.outputShape =\n backend_util.computeOutShape(shapes, 1 /* axis */) as [number, number];\n this.variableNames = shapes.map((_, i) => `T${i}`);\n this.dispatchLayout = flatDispatchLayout(this.outputShape);\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize,\n [this.workPerThread, 1, 1]);\n\n this.offsetLength = shapes.length - 1;\n for (let i = 0; i < this.offsetLength; i++) {\n this.uniforms += `offset${i} : i32,`;\n }\n this.shaderKey = 'concat';\n }\n\n getUserCode(): string {\n const snippets: string[] = [];\n if (this.offsetLength > 0) {\n snippets.push(\n `if (yC < uniforms.offset0){ setOutputAtCoords(coords.x, coords.y, getT0(yR, yC)); }`);\n for (let i = 1; i < this.offsetLength; i++) {\n snippets.push(\n `else if (yC < uniforms.offset${[i]}){ ` +\n `setOutputAtCoords(coords.x, coords.y, getT${\n i}(yR, yC - uniforms.offset${i - 1})); }`);\n }\n const lastIndex = this.offsetLength;\n const lastShiftIndex = this.offsetLength - 1;\n snippets.push(`else { setOutputAtCoords(coords.x, coords.y, getT${\n lastIndex}(yR, yC - uniforms.offset${lastShiftIndex})); }`);\n } else {\n snippets.push(`setOutputAtCoords(coords.x, coords.y, getT0(yR, yC));`);\n }\n\n const userCode = `\n ${getMainHeaderAndGlobalIndexString()}\n for(var i = 0; i < ${this.workPerThread}; i = i + 1) {\n let flatIndex = index * ${this.workPerThread} + i;\n if(flatIndex < uniforms.size) {\n let coords = getCoordsFromIndex(flatIndex);\n let yR = coords.x;\n let yC = coords.y;\n\n ${snippets.join('\\n ')}\n }\n }\n }\n `;\n return userCode;\n }\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Imag, ImagInputs, KernelConfig, KernelFunc, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {identity} from './Identity';\n\nexport function imag(args: {inputs: ImagInputs, backend: WebGPUBackend}):\n TensorInfo {\n const {inputs, backend} = args;\n const {input} = inputs;\n const inputData = backend.tensorMap.get(input.dataId);\n\n return identity({inputs: {x: inputData.complexTensorInfos.imag}, backend});\n}\n\nexport const imagConfig: KernelConfig = {\n kernelName: Imag,\n backendName: 'webgpu',\n kernelFunc: imag as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, ConcatInputs, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {concatImplCPU} from '../kernel_utils/shared';\n\nimport {complex} from './Complex';\nimport {ConcatProgram} from '../concat_webgpu';\nimport {imag} from './Imag';\nimport {real} from './Real';\nimport {reshape} from './Reshape';\n\nexport function concatImpl(\n inputs: ConcatInputs, axis: number, backend: WebGPUBackend): TensorInfo {\n const dtype = inputs[0].dtype;\n if (dtype === 'complex64') {\n const reals = inputs.map((t) => real({inputs: {input: t}, backend}));\n const imags = inputs.map((t) => imag({inputs: {input: t}, backend}));\n\n const realConcated = concatImpl(reals, axis, backend);\n const imagConcated = concatImpl(imags, axis, backend);\n\n const result =\n complex({inputs: {real: realConcated, imag: imagConcated}, backend});\n\n reals.forEach(r => backend.disposeData(r.dataId));\n imags.forEach(i => backend.disposeData(i.dataId));\n backend.disposeData(realConcated.dataId);\n backend.disposeData(imagConcated.dataId);\n\n return result;\n }\n\n let runOnCpu = backend.shouldExecuteOnCPU(inputs);\n\n // Run on cpu if dtype is string. For string, the backend represents it\n // as Uint8Array[], where each Uint8Array is a character. Given that the\n // computation is only on the outer array, uploading the whole data onto\n // gpu is wasteful. Also, currently webgpu doesn't have a design to\n // upload and retrieve Uint8Array[] between cpu and gpu. Therefore, we\n // just run the kernel on cpu if dtype is string.\n if (dtype === 'string') {\n runOnCpu = true;\n }\n\n if (runOnCpu) {\n // Any concat of n-dimensional tensors across any axis can be reduced to\n // a concatenation of two-dimensional tensors across the axis 1 by first\n // partitioning the axes of the original tensors into those less than the\n // axis to be concatenated and the rest. Then reshape the tensors\n // into a two-dimensional tensor by collapsing these two sets of axes and\n // concatenate the resulting matrices across the axis 1, finally reshaping\n // the result to have the proper shape.\n const tensors2D = inputs.map(t => {\n const innerSize = util.sizeFromShape(t.shape.slice(axis));\n const shape = [-1, innerSize];\n return reshape({inputs: {x: t}, backend, attrs: {shape}});\n });\n\n const inputsValShapes = tensors2D.map(t => {\n return {vals: backend.readSync(t.dataId), shape: t.shape};\n });\n\n // Concats 2d tensors along axis=1.\n const outShape =\n backend_util.computeOutShape(tensors2D.map(t => t.shape), 1 /* axis */);\n const simplyConcat = tensors2D[0].shape[0] === 1;\n const outVals =\n concatImplCPU(inputsValShapes, outShape, dtype, simplyConcat);\n\n const finalOutShape =\n backend_util.computeOutShape(inputs.map(t => t.shape), axis);\n\n const outInfo = backend.makeTensorInfo(finalOutShape, dtype, outVals);\n\n tensors2D.forEach(t => backend.disposeData(t.dataId));\n\n return outInfo;\n }\n\n const {tensors2D, outShape} = computeTensors2D(inputs, axis, backend);\n const shapes = (tensors2D).map(t => t.shape as [number, number]);\n const program = new ConcatProgram(shapes);\n\n const uniformData: Array<{type: string; data: number[]}> = [];\n const offsets: number[] = new Array(shapes.length - 1);\n if (offsets.length > 0) {\n offsets[0] = shapes[0][1];\n uniformData.push({type: 'int32', data: [offsets[0]]});\n for (let i = 1; i < offsets.length; i++) {\n offsets[i] = offsets[i - 1] + shapes[i][1];\n uniformData.push({type: 'int32', data: [offsets[i]]});\n }\n }\n\n const res = backend.runWebGPUProgram(\n program, tensors2D, tensors2D[0].dtype, uniformData);\n tensors2D.forEach(r => backend.disposeData(r.dataId));\n\n const reshapedResult =\n reshape({inputs: {x: res}, backend, attrs: {shape: outShape}});\n backend.disposeData(res.dataId);\n return reshapedResult;\n}\n\nfunction computeTensors2D(\n inputs: ConcatInputs, axis: number, backend: WebGPUBackend) {\n const outShape = backend_util.computeOutShape(inputs.map(t => t.shape), axis);\n const tensors2D = inputs.map(t => reshape({\n inputs: {x: t},\n backend,\n attrs: {\n shape: [\n util.sizeFromShape(t.shape.slice(0, axis)),\n util.sizeFromShape(t.shape.slice(axis))\n ]\n }\n }));\n\n return {tensors2D, outShape};\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, Concat, ConcatAttrs, ConcatInputs, KernelConfig, KernelFunc, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\n\nimport {concatImpl} from './Concat_impl';\nimport {identity} from './Identity';\n\nexport function concat(\n args: {inputs: ConcatInputs, attrs: ConcatAttrs, backend: WebGPUBackend}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {axis} = attrs;\n\n const $axis = util.parseAxisParam(axis, inputs[0].shape)[0];\n const outShape =\n backend_util.computeOutShape(inputs.map(t => t.shape), $axis);\n if (util.sizeFromShape(outShape) === 0) {\n return backend.makeTensorInfo(outShape, inputs[0].dtype, []);\n }\n\n // Keep only non-empty tensors (ignore tensors with 0 in their shape).\n const $inputs = inputs.filter(t => util.sizeFromShape(t.shape) > 0);\n if ($inputs.length === 1) {\n return identity({inputs: {x: $inputs[0]}, backend});\n }\n\n const shapes = $inputs.map(t => t.shape);\n backend_util.assertParamsConsistent(shapes, $axis);\n\n return concatImpl($inputs, $axis, backend);\n}\n\nexport const concatConfig: KernelConfig = {\n kernelName: Concat,\n backendName: 'webgpu',\n kernelFunc: concat as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, util} from '@tensorflow/tfjs-core';\n\nimport {mapActivationToShaderProgram} from './activation_util';\nimport {makeMatMulPackedVec4Source} from './matmul_packed_vec4_webgpu';\nimport {WebGPUProgram} from './webgpu_program';\nimport {computeDispatch, tilesFitEvenlyIntoShape} from './webgpu_util';\n\nexport class Conv2DMMVec4Program implements WebGPUProgram {\n outputShape: number[];\n shaderKey: string;\n dispatchLayout: {x: number[], y: number[], z: number[]};\n dispatch: [number, number, number];\n variableNames = ['x', 'W'];\n uniforms =\n `filterDims : vec2, pad : vec2, stride : vec2, dilation : vec2,\n dimAOuter : i32, dimBOuter : i32, dimInner : i32,`;\n workGroupSize: [number, number, number] = [8, 8, 1];\n elementsPerThread: [number, number, number];\n isVec4 = true;\n convInfo: backend_util.Conv2DInfo;\n addBias: boolean;\n activation: backend_util.Activation;\n hasPreluActivationWeights: boolean;\n hasLeakyreluAlpha: boolean;\n tileAOuter: number;\n tileBOuter: number;\n tileInner: number;\n fitA: boolean;\n fitB: boolean;\n\n constructor(\n convInfo: backend_util.Conv2DInfo, addBias = false,\n activation: backend_util.Activation = null,\n hasPreluActivationWeights = false, hasLeakyreluAlpha = false) {\n this.outputShape = convInfo.outShape;\n\n util.assert(\n convInfo.dataFormat === 'channelsLast',\n () => 'TODO: NCHW is unimplemented');\n this.dispatchLayout = {x: [3], y: [1, 2], z: [0]};\n // The first element in elementsPerThread must be 4.\n if (this.outputShape[1] === 1) {\n this.elementsPerThread = [4, 1, 1];\n } else {\n this.elementsPerThread = [4, 4, 1];\n }\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize,\n this.elementsPerThread);\n this.convInfo = convInfo;\n this.addBias = addBias;\n this.activation = activation;\n this.hasPreluActivationWeights = hasPreluActivationWeights;\n this.hasLeakyreluAlpha = hasLeakyreluAlpha;\n if (this.addBias) {\n this.variableNames.push('bias');\n }\n\n if (this.hasPreluActivationWeights) {\n this.variableNames.push('preluActivationWeights');\n }\n\n if (this.hasLeakyreluAlpha) {\n this.variableNames.push('leakyreluAlpha');\n }\n\n this.tileAOuter = this.outputShape[1] === 1 ?\n 1 :\n this.workGroupSize[1] * this.elementsPerThread[1];\n this.tileBOuter = this.workGroupSize[0] * this.elementsPerThread[0];\n this.tileInner = this.tileBOuter;\n [this.fitA, this.fitB] = this.getShapeFit();\n this.shaderKey = `conv2DMMVec4_${this.activation}_${this.fitA}_${\n this.fitB}_${this.elementsPerThread}`;\n }\n\n getShapeFit(): boolean[] {\n const tileSizeA = [this.tileAOuter, this.tileInner];\n const tileSizeB = [this.tileInner, this.tileBOuter];\n const dimAOuter = this.outputShape[1] * this.outputShape[2];\n const dimBOuter = this.outputShape[3];\n const dimInner = this.convInfo.filterHeight * this.convInfo.filterWidth *\n this.convInfo.inChannels;\n return [\n tilesFitEvenlyIntoShape(tileSizeA, [dimAOuter, dimInner]),\n tilesFitEvenlyIntoShape(tileSizeB, [dimInner, dimBOuter])\n ];\n }\n\n // index is used to avoid repeated definition error.\n getSampleAWithRemainder(index: number): string {\n return `let flatIndex${\n index} = getIndexFromCoords4D(coord, uniforms.xShape);\n let divBy4Remainder${index} = flatIndex${index} % 4;\n let divBy4Index${index} = flatIndex${index} / 4;\n let curData${index} = x[divBy4Index${index}];\n if (divBy4Remainder${index} == 0) {\n temp = curData${index};\n } else {\n // TODO: This could end up being a redundant load with another one in\n // the same shader invocation. Perhaps there's an opportunity for\n // optimization\n let nextData${index} = x[divBy4Index${index} + 1];\n if (divBy4Remainder${index} == 1) {\n temp = vec4(curData${index}.yzw, nextData${index}.x);\n } else if (divBy4Remainder${index} == 2) {\n temp = vec4(curData${index}.zw, nextData${index}.xy);\n } else if (divBy4Remainder${index} == 3) {\n temp = vec4(curData${index}.w, nextData${index}.xyz);\n }\n }\n `;\n }\n\n getUserCode(): string {\n const matMulSource = makeMatMulPackedVec4Source(\n this.elementsPerThread, this.tileAOuter, this.tileBOuter,\n this.tileInner);\n\n const remainder = this.convInfo.inChannels % 4;\n // Below code only applys to valid padding type.\n const remainderSnippet = remainder === 0 ?\n `// The bounds checking is always needed since we use it to pad zero for\n // the 'same' padding type.\n if (coordsInBounds4D(coord, uniforms.xShape)) {\n resData = x[getIndexFromCoords4D(coord, uniforms.xShape) / 4];\n } else {\n resData = vec4(0.0); }` :\n `var temp = vec4(0.0);\n ${this.getSampleAWithRemainder(1)}\n resData = temp;\n if (WCol == (uniforms.filterDims[1] - 1)) {\n coord = vec4(\n coord.x, coord.y + 1, coord.z + 1 - uniforms.filterDims[1], 0);\n ${this.getSampleAWithRemainder(2)}\n if (inChCoord == 0) {\n resData = vec4(resData.xyz, temp.x);\n } else if (inChCoord == 1) {\n resData = vec4(resData.xy, temp.xy);\n } else {\n resData = vec4(resData.x, temp.xyz);\n }\n }\n `;\n\n const readASnippet = `let outRow = r / uniforms.outShape[2];\n let outCol = r % uniforms.outShape[2];\n let WRow = c / (uniforms.filterDims[1] * uniforms.xShape[3]);\n let WCol = c / uniforms.xShape[3] % uniforms.filterDims[1];\n let inChCoord = c % uniforms.xShape[3];\n var coord = vec4(\n batch,\n outRow * uniforms.stride[0] + uniforms.dilation[0] * WRow - uniforms.pad[0],\n outCol * uniforms.stride[1] + uniforms.dilation[1] * WCol - uniforms.pad[1],\n inChCoord);\n var resData = vec4(0.0);\n ${remainderSnippet}\n return resData;`;\n\n const sampleA = this.fitA ?\n `${readASnippet}` :\n `if (r < uniforms.dimAOuter && c < uniforms.dimInner) {\n ${readASnippet}\n }\n return vec4(0.0);\n `;\n\n const sampleB = this.fitB ?\n `return W[row * uniforms.dimBOuter / 4 + col];` :\n `if(coordsInBounds2D(vec2(row, col * 4), vec2(uniforms.dimInner, uniforms.dimBOuter))) {\n return W[row * uniforms.dimBOuter / 4 + col];\n }\n return vec4(0.0);\n `;\n let activationSnippet = '', applyActivationSnippet = '';\n if (this.activation) {\n const activationOp =\n mapActivationToShaderProgram(this.activation, this.isVec4);\n if (this.hasPreluActivationWeights) {\n activationSnippet =\n `fn activation(a : vec4, outCoord : vec4) -> vec4 {\n let b = getPreluActivationWeightsByOutputCoords(outCoord);\n ${activationOp}\n }`;\n } else if (this.hasLeakyreluAlpha) {\n activationSnippet = `fn activation(outCoord: vec4) -> vec4 {\n let b = getLeakyreluAlphaByOutputCoords(outCoord);\n ${activationOp}\n }`;\n throw new Error('Leakyrelu is not supported.');\n } else {\n activationSnippet = `\n fn activation(a : vec4, outCoord : vec4) -> vec4 {\n ${activationOp}\n }`;\n }\n\n applyActivationSnippet = `value = activation(value, outCoord);`;\n }\n\n const addBiasSnippet =\n this.addBias ? 'value = value + getBiasByOutputCoords(outCoord);' : '';\n\n const userCode = `\n ${activationSnippet}\n fn mm_readA(row : i32, col : i32, globalId : vec3) -> vec4 {\n let r = row;\n let c = col * 4;\n var batch = i32(globalId.z);\n ${sampleA}\n }\n\n fn mm_readB(row : i32, col : i32, globalId : vec3) -> vec4 {\n ${sampleB}\n }\n\n fn mm_write(row : i32, col : i32, valueInput : vec4, globalId : vec3) {\n var batch = i32(globalId.z);\n var value = valueInput;\n if (row < uniforms.dimAOuter && col * 4 < uniforms.dimBOuter)\n {\n let outCoord = vec4(\n batch,\n row / uniforms.outShape[2],\n row % uniforms.outShape[2],\n col * 4);\n ${addBiasSnippet}\n ${applyActivationSnippet}\n setOutputAtCoords(outCoord[0], outCoord[1], outCoord[2], outCoord[3],\n value);\n }\n }\n ${matMulSource}\n `;\n return userCode;\n }\n}\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, util} from '@tensorflow/tfjs-core';\n\nimport {mapActivationToShaderProgram} from './activation_util';\nimport {makeMatMulPackedSource} from './matmul_packed_webgpu';\nimport {WebGPUProgram} from './webgpu_program';\nimport {computeDispatch, computeWorkGroupSizeForConv2d, computeWorkPerThreadForConv2d, tilesFitEvenlyIntoShape} from './webgpu_util';\n\nexport class Conv2DMMProgram implements WebGPUProgram {\n outputShape: number[];\n shaderKey: string;\n dispatchLayout: {x: number[], y: number[], z: number[]};\n dispatch: [number, number, number];\n variableNames = ['x', 'W'];\n uniforms =\n `filterDims : vec2, pad : vec2, stride : vec2, dilation : vec2, dimAOuter : i32, dimBOuter : i32, dimInner : i32,`;\n workGroupSize: [number, number, number];\n elementsPerThread: [number, number, number];\n convInfo: backend_util.Conv2DInfo;\n addBias: boolean;\n activation: backend_util.Activation;\n hasPreluActivationWeights: boolean;\n fitA: boolean;\n fitB: boolean;\n isChannelsLast: boolean;\n\n constructor(\n convInfo: backend_util.Conv2DInfo, addBias = false,\n activation: backend_util.Activation = null,\n hasPreluActivationWeights = false) {\n this.outputShape = convInfo.outShape;\n this.isChannelsLast = convInfo.dataFormat === 'channelsLast';\n this.dispatchLayout = this.isChannelsLast ? {x: [3], y: [1, 2], z: [0]} :\n {x: [1], y: [2, 3], z: [0]};\n this.workGroupSize =\n computeWorkGroupSizeForConv2d(this.dispatchLayout, this.outputShape);\n this.elementsPerThread =\n computeWorkPerThreadForConv2d(this.dispatchLayout, this.outputShape);\n\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize,\n this.elementsPerThread);\n\n if (addBias) {\n this.variableNames.push('bias');\n }\n\n if (hasPreluActivationWeights) {\n this.variableNames.push('preluActivationWeights');\n }\n this.convInfo = convInfo;\n this.addBias = addBias;\n this.activation = activation;\n this.hasPreluActivationWeights = hasPreluActivationWeights;\n\n [this.fitA, this.fitB] = this.getShapeFit();\n this.shaderKey = `conv2DMM_${this.elementsPerThread}_${this.activation}_${\n this.fitA}_${this.fitB}_${this.isChannelsLast}`;\n }\n\n getShapeFit(): boolean[] {\n const tileAOuter = this.workGroupSize[1] * this.elementsPerThread[1];\n const tileBOuter = this.workGroupSize[0] * this.elementsPerThread[0];\n const tileInner = tileAOuter > tileBOuter ? tileAOuter : tileBOuter;\n util.assert(\n tileInner % this.workGroupSize[0] === 0 &&\n tileInner % this.workGroupSize[1] === 0,\n () =>\n // tslint:disable-next-line: max-line-length\n 'tileInner must be multiple of workgroupsize.x and workgroupsize.y');\n const tileSizeA = [tileAOuter, tileInner];\n const tileSizeB = [tileInner, tileBOuter];\n const dimAOuter = this.convInfo.outHeight * this.convInfo.outWidth;\n const dimBOuter = this.convInfo.outChannels;\n const dimInner = this.convInfo.filterHeight * this.convInfo.filterWidth *\n this.convInfo.inChannels;\n\n return [\n tilesFitEvenlyIntoShape(tileSizeA, [dimAOuter, dimInner]),\n tilesFitEvenlyIntoShape(tileSizeB, [dimInner, dimBOuter])\n ];\n }\n\n getUserCode(): string {\n const coordASnippet = this.isChannelsLast ? `\n let coord = vec4(batch, xRow, xCol, col % inChannels);\n ` :\n `\n let coord = vec4(batch, col % inChannels, xRow, xCol);\n `;\n\n const coordResSnippet = this.isChannelsLast ? `\n let outCoord = vec4(\n batch,\n row / outWidth,\n row % outWidth,\n col);\n ` :\n `\n let outCoord = vec4(\n batch,\n col,\n row / outWidth,\n row % outWidth);\n `;\n\n const matMulSource =\n makeMatMulPackedSource(this.elementsPerThread, this.workGroupSize);\n\n const readASnippet = `\n let inChannels = uniforms.wShape[2];\n let outWidth = ${\n this.isChannelsLast ? 'uniforms.outShape[2]' : 'uniforms.outShape[3]'};\n let outRow = row / outWidth;\n let outCol = row % outWidth;\n\n let WRow = col / (uniforms.filterDims[1] * inChannels);\n let WCol = col / inChannels % uniforms.filterDims[1];\n let xRow = outRow * uniforms.stride[0] + uniforms.dilation[0] * WRow - uniforms.pad[0];\n let xCol = outCol * uniforms.stride[1] + uniforms.dilation[1] * WCol - uniforms.pad[1];\n ${coordASnippet}\n // The bounds checking is always needed since we use it to pad zero for the\n // 'same' padding type.\n if(coordsInBounds4D(coord, uniforms.xShape)) {\n return x[getIndexFromCoords4D(coord, uniforms.xShape)];\n }\n return 0.0;`;\n\n const sampleA = this.fitA ?\n `${readASnippet}` :\n `if (row < uniforms.dimAOuter && col < uniforms.dimInner) {\n ${readASnippet}\n }\n return 0.0;\n `;\n\n const sampleB = this.fitB ?\n `return W[row * uniforms.dimBOuter + col];` :\n `if(coordsInBounds2D(vec2(row, col), vec2(uniforms.dimInner, uniforms.dimBOuter))) {\n return W[row * uniforms.dimBOuter + col];\n\t }\n\t return 0.0;\n\t `;\n\n let activationSnippet = '', applyActivationSnippet = '';\n if (this.activation) {\n const activationOp = mapActivationToShaderProgram(this.activation, false);\n if (this.hasPreluActivationWeights) {\n activationSnippet =\n `fn activation(a: f32, outCoord : vec4) -> f32 {\n let b = getPreluActivationWeightsByOutputCoords(outCoord);\n ${activationOp}\n }`;\n } else {\n activationSnippet = `\n fn activation(a : f32, outCoord : vec4) -> f32 {\n ${activationOp}\n }\n `;\n }\n\n applyActivationSnippet = `value = activation(value, outCoord);`;\n }\n\n const addBiasSnippet =\n this.addBias ? 'value = value + getBiasByOutputCoords(outCoord);' : '';\n\n const userCode = `\n ${activationSnippet}\n fn mm_readA(row : i32, col : i32, globalId : vec3) -> f32 {\n var batch = i32(globalId.z);\n ${sampleA}\n }\n\n fn mm_readB(row : i32, col : i32, globalId : vec3) -> f32 {\n ${sampleB}\n }\n\n fn mm_write(row : i32, col : i32, valueInput : f32, globalId : vec3) {\n var batch = i32(globalId.z);\n var value = valueInput;\n let outWidth = ${\n this.isChannelsLast ? 'uniforms.outShape[2]' : 'uniforms.outShape[3]'};\n ${coordResSnippet}\n ${addBiasSnippet}\n ${applyActivationSnippet}\n result[getIndexFromCoords4D(outCoord, uniforms.outShape)] = value;\n }\n ${matMulSource}\n `;\n return userCode;\n }\n}\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, util} from '@tensorflow/tfjs-core';\n\nimport {mapActivationToShaderProgram} from './activation_util';\nimport {getMainHeaderString} from './shader_preprocessor';\nimport {WebGPUProgram} from './webgpu_program';\nimport {computeDispatch, flatDispatchLayout} from './webgpu_util';\n\nexport class Conv2DNaiveProgram implements WebGPUProgram {\n outputShape: number[];\n shaderKey: string;\n dispatchLayout: {x: number[]};\n dispatch: [number, number, number];\n variableNames = ['x', 'W'];\n uniforms =\n `filterDims : vec2, pad : vec2, stride : vec2, dilation : vec2,`;\n workGroupSize: [number, number, number] = [128, 1, 1];\n convInfo: backend_util.Conv2DInfo;\n addBias: boolean;\n activation: backend_util.Activation;\n hasPreluActivationWeights: boolean;\n\n constructor(\n convInfo: backend_util.Conv2DInfo, addBias = false,\n activation: backend_util.Activation = null,\n hasPreluActivationWeights = false) {\n this.outputShape = convInfo.outShape;\n this.dispatchLayout = flatDispatchLayout(this.outputShape);\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize);\n\n util.assert(\n convInfo.dataFormat === 'channelsLast',\n () => 'TODO: NCHW is unimplemented');\n if (addBias) {\n this.variableNames.push('bias');\n }\n\n if (hasPreluActivationWeights) {\n this.variableNames.push('preluActivationWeights');\n }\n\n this.convInfo = convInfo;\n this.addBias = addBias;\n this.activation = activation;\n this.hasPreluActivationWeights = hasPreluActivationWeights;\n\n this.shaderKey = `conv2DNaive_${this.activation}`;\n }\n\n getUserCode(): string {\n let activationSnippet = '', applyActivationSnippet = '';\n if (this.activation) {\n const activationOp = mapActivationToShaderProgram(this.activation);\n if (this.hasPreluActivationWeights) {\n activationSnippet =\n `fn activation(a : f32, outCoord : vec4) -> f32{\n let b = getPreluActivationWeightsByOutputCoords(outCoord);\n ${activationOp}\n }`;\n } else {\n activationSnippet = `\n fn activation(a : f32, outCoord : vec4) -> f32{\n ${activationOp}\n }\n `;\n }\n\n applyActivationSnippet = `value = activation(value, outCoord);`;\n }\n\n const addBiasSnippet =\n this.addBias ? 'value = value + getBiasByOutputCoords(outCoord);' : '';\n\n const userCode = `\n ${activationSnippet}\n fn readInp(batch : i32, row : i32, col : i32, chan : i32) -> f32 {\n let coord = vec4(batch, row, col, chan);\n if(coordsInBounds4D(coord, uniforms.xShape)) {\n return getX(batch, row, col, chan);\n }\n return 0.0;\n }\n\n fn readFilt(row : i32, col : i32, xChannel : i32, outChannel : i32) -> f32{\n let coord = vec4(row, col, xChannel, outChannel);\n if(coordsInBounds4D(coord, uniforms.wShape)) {\n return getW(row, col, xChannel, outChannel);\n }\n return 0.0;\n }\n\n fn writeResult(batch : i32, row : i32, col : i32, chan : i32, value : f32) {\n let coord = vec4(batch, row, col, chan);\n if (coordsInBounds4D(coord, uniforms.outShape)) {\n ${addBiasSnippet}\n ${applyActivationSnippet}\n setOutputAtCoords(batch, row, col, chan, value);\n }\n }\n\n ${getMainHeaderString()}\n let coords = getOutputCoords();\n let batch = coords[0];\n let outChannel = coords[3];\n\n var acc = 0.0;\n\n for (var row = 0; row < uniforms.filterDims[0]; row = row + 1) {\n for (var col = 0; col < uniforms.filterDims[1]; col = col + 1) {\n for (var xChannel = 0; xChannel < uniforms.xShape[3]; xChannel = xChannel + 1) {\n let coordRow = coords[1] * uniforms.stride[0] + uniforms.dilation[0] * row - uniforms.pad[0];\n let coordCol = coords[2] * uniforms.stride[1] + uniforms.dilation[1] * col - uniforms.pad[1];\n let v = readInp(batch, coordRow, coordCol, xChannel);\n let f = readFilt(row, col, xChannel, outChannel);\n acc = acc + v * f;\n }\n }\n }\n\n writeResult(batch, coords[1], coords[2], outChannel, acc);\n }\n `;\n return userCode;\n }\n}\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {getMainHeaderAndGlobalIndexString} from './shader_preprocessor';\nimport {WebGPUProgram} from './webgpu_program';\nimport {computeDispatch, flatDispatchLayout} from './webgpu_util';\n\nexport class Im2ColProgram implements WebGPUProgram {\n variableNames = ['A'];\n uniforms =\n `pad : vec2, stride : vec2, dilation : vec2, outWidth : i32, itemsPerBlockRow : i32,\n inChannels : i32,`;\n outputShape: number[];\n shaderKey: string;\n dispatchLayout: {x: number[]};\n dispatch: [number, number, number];\n workPerThread = 4;\n workGroupSize: [number, number, number] = [64, 1, 1];\n isChannelsLast: boolean;\n size = true;\n\n constructor(outputShape: number[], isChannelsLast: boolean) {\n this.outputShape = outputShape;\n this.dispatchLayout = flatDispatchLayout(this.outputShape);\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize,\n [this.workPerThread, 1, 1]);\n this.isChannelsLast = isChannelsLast;\n this.shaderKey = `im2col_${this.isChannelsLast}`;\n }\n\n getUserCode(): string {\n const rowDim = this.isChannelsLast ? 0 : 1;\n const colDim = this.isChannelsLast ? 1 : 2;\n\n const userCode = `\n ${getMainHeaderAndGlobalIndexString()}\n\n for(var i = 0; i<${this.workPerThread}; i = i + 1) {\n let flatIndex = index * ${this.workPerThread} + i;\n\n let rc = getCoordsFromIndex(flatIndex);\n\n if(flatIndex < uniforms.size) {\n let blockIndex = rc[0];\n let pos = rc[1];\n\n let offsetY = blockIndex / uniforms.outWidth * uniforms.stride[1] - uniforms.pad[1];\n let d0 = offsetY + uniforms.dilation[1] * pos / uniforms.itemsPerBlockRow;\n var value = 0.0;\n if(d0 < uniforms.aShape[${rowDim}] && d0 >= 0) {\n let offsetX = (blockIndex % uniforms.outWidth) * uniforms.stride[0] -\n uniforms.pad[0];\n let d1 = offsetX + uniforms.dilation[0] * ((pos %\n uniforms.itemsPerBlockRow) / uniforms.inChannels);\n let ch = pos % uniforms.inChannels;\n if(d1 < uniforms.aShape[${colDim}] && d1 >= 0) {\n value = getA(d0, d1, ch);\n }\n }\n setOutputAtIndex(flatIndex, value);\n }\n }\n }\n `;\n return userCode;\n }\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, env, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {Conv2DMMVec4Program} from '../conv2d_mm_vec4_webgpu';\nimport {Conv2DMMProgram} from '../conv2d_mm_webgpu';\nimport {Conv2DNaiveProgram} from '../conv2d_naive_webgpu';\nimport {Im2ColProgram} from '../im2col_webgpu';\nimport {MatMulPackedProgram} from '../matmul_packed_webgpu';\n\nimport {batchMatMulImpl} from './BatchMatMul_impl';\nimport {reshape} from './Reshape';\n\ntype Conv2DConfig = {\n x: TensorInfo,\n filter: TensorInfo,\n convInfo: backend_util.Conv2DInfo,\n backend: WebGPUBackend,\n bias?: TensorInfo,\n preluActivationWeights?: TensorInfo,\n leakyreluAlpha?: number,\n activation?: backend_util.Activation\n};\n\n// For 1x1 kernels that iterate through every point in the input, convolution\n// can be expressed as matrix multiplication (without need for memory\n// remapping).\nfunction conv2dByMatMul({\n x,\n filter,\n convInfo,\n backend,\n bias = null,\n preluActivationWeights = null,\n leakyreluAlpha = 0,\n activation = null\n}: Conv2DConfig) {\n const isChannelsLast = convInfo.dataFormat === 'channelsLast';\n const transposeA = isChannelsLast ? false : true;\n const transposeB = false;\n\n const sameSize = isChannelsLast &&\n convInfo.filterHeight === convInfo.inHeight &&\n convInfo.filterWidth === convInfo.inWidth &&\n convInfo.padInfo.type === 'VALID';\n let xReshaped;\n let filterReshaped;\n\n if (sameSize) {\n const sharedDim =\n convInfo.inHeight * convInfo.inWidth * convInfo.inChannels;\n xReshaped = reshape({\n inputs: {x},\n backend,\n attrs: {shape: [1, convInfo.batchSize, sharedDim]}\n });\n filterReshaped = reshape({\n inputs: {x: filter},\n backend,\n attrs: {shape: [1, sharedDim, convInfo.outChannels]}\n });\n } else {\n xReshaped = reshape({\n inputs: {x},\n backend,\n attrs: {\n shape: isChannelsLast ?\n [\n convInfo.batchSize, convInfo.inHeight * convInfo.inWidth,\n convInfo.inChannels\n ] :\n [\n convInfo.batchSize, convInfo.inChannels,\n convInfo.inHeight * convInfo.inWidth\n ]\n }\n });\n filterReshaped = reshape({\n inputs: {x: filter},\n backend,\n attrs: {shape: [1, convInfo.inChannels, convInfo.outChannels]}\n });\n }\n\n const result = batchMatMulImpl({\n a: isChannelsLast ? xReshaped : filterReshaped,\n b: isChannelsLast ? filterReshaped : xReshaped,\n transposeA,\n transposeB,\n backend,\n bias,\n activation,\n preluActivationWeights,\n leakyreluAlpha\n });\n const out = reshape(\n {inputs: {x: result}, backend, attrs: {shape: convInfo.outShape}});\n\n backend.disposeData(xReshaped.dataId);\n backend.disposeData(filterReshaped.dataId);\n backend.disposeData(result.dataId);\n\n return out;\n}\n\n// Implements the im2row algorithm as outlined in \"High Performance\n// Convolutional Neural Networks for Document Processing\" (Suvisoft, 2006)\nfunction conv2dWithIm2Col({\n x,\n filter,\n convInfo,\n backend,\n bias = null,\n preluActivationWeights = null,\n leakyreluAlpha = 0,\n activation = null\n}: Conv2DConfig) {\n // Rearranges conv2d input so each block to be convolved over forms the\n // column of a new matrix with shape [filterWidth * filterHeight *\n // inChannels, outHeight * outWidth]. The filter is also rearranged so each\n // output channel forms a row of a new matrix with shape [outChannels,\n // filterWidth * filterHeight * inChannels]. The convolution is then\n // computed by multiplying these matrices and reshaping the result.\n const {\n filterWidth,\n filterHeight,\n inChannels,\n strideWidth,\n strideHeight,\n padInfo,\n outWidth,\n outHeight,\n dilationWidth,\n dilationHeight,\n dataFormat\n } = convInfo;\n\n const isChannelsLast = dataFormat === 'channelsLast';\n\n const sharedDim = filterWidth * filterHeight * inChannels;\n const numCols = outHeight * outWidth;\n const x2ColShape = [numCols, sharedDim];\n const transposeA = false;\n const transposeB = false;\n\n const intermediates: TensorInfo[] = [];\n\n const xSqueezed =\n reshape({inputs: {x}, backend, attrs: {shape: x.shape.slice(1)}});\n const w2Row = reshape(\n {inputs: {x: filter}, backend, attrs: {shape: [1, sharedDim, -1]}});\n\n intermediates.push(xSqueezed);\n intermediates.push(w2Row);\n\n const im2ColProgram = new Im2ColProgram(x2ColShape, isChannelsLast);\n const dimensions = [\n {type: 'int32', data: [padInfo.left, padInfo.top]}, // Padding.\n {type: 'int32', data: [strideWidth, strideHeight]}, // Stride.\n {type: 'int32', data: [dilationWidth, dilationHeight]}, // Dilation.\n {type: 'int32', data: [outWidth]},\n {type: 'int32', data: [inChannels * filterWidth]}, // itemsPerBlockRow.\n {type: 'int32', data: [inChannels]}\n ];\n const im2Col = backend.runWebGPUProgram(\n im2ColProgram, [xSqueezed], xSqueezed.dtype, dimensions);\n const im2Col3D = reshape({\n inputs: {x: im2Col},\n backend,\n attrs: {shape: [1, x2ColShape[0], x2ColShape[1]]}\n });\n intermediates.push(im2Col);\n intermediates.push(im2Col3D);\n const a3dShape: [number, number, number] = [1, x2ColShape[0], x2ColShape[1]];\n const matMulProgram = new MatMulPackedProgram(\n a3dShape, [1, numCols, convInfo.outChannels],\n env().get('WEBGPU_MATMUL_WORK_PER_THREAD') as number, true, true,\n transposeA, transposeB, bias, activation, preluActivationWeights);\n const dimAOuter = a3dShape[1];\n const dimInner = a3dShape[2];\n const dimBOuter = convInfo.outChannels;\n const matmulDimensions = [\n {type: 'int32', data: [dimAOuter]}, {type: 'int32', data: [dimBOuter]},\n {type: 'int32', data: [dimInner]}\n ];\n const inputs: TensorInfo[] = [im2Col3D, w2Row];\n if (bias) {\n inputs.push(bias);\n }\n if (preluActivationWeights) {\n inputs.push(preluActivationWeights);\n }\n if (activation === 'leakyrelu') {\n dimensions.push({type: 'float32', data: [leakyreluAlpha]});\n matMulProgram.uniforms += ' alpha : f32,';\n }\n const result: TensorInfo = backend.runWebGPUProgram(\n matMulProgram, inputs, im2Col3D.dtype, matmulDimensions);\n\n const outShape = isChannelsLast ?\n [1, outHeight, outWidth, convInfo.outChannels] :\n [1, convInfo.outChannels, outHeight, outWidth];\n const out = reshape({inputs: {x: result}, backend, attrs: {shape: outShape}});\n\n intermediates.push(result);\n for (const i of intermediates) {\n backend.disposeData(i.dataId);\n }\n\n return out;\n}\n\nexport function conv2DImpl({\n x,\n filter,\n convInfo,\n backend,\n bias = null,\n preluActivationWeights = null,\n leakyreluAlpha = 0,\n activation = null\n}: Conv2DConfig) {\n const hasBias = bias != null;\n const hasPreluActivationWeights = preluActivationWeights != null;\n const isChannelsLast = convInfo.dataFormat === 'channelsLast';\n let program: Conv2DMMProgram|Conv2DNaiveProgram|Conv2DMMVec4Program;\n const sameSize = isChannelsLast &&\n convInfo.filterHeight === convInfo.inHeight &&\n convInfo.filterWidth === convInfo.inWidth &&\n convInfo.padInfo.type === 'VALID';\n if (sameSize ||\n (convInfo.filterHeight === 1 && convInfo.filterWidth === 1 &&\n convInfo.dilationHeight === 1 && convInfo.dilationWidth === 1 &&\n convInfo.strideHeight === 1 && convInfo.strideWidth === 1 &&\n (convInfo.padInfo.type === 'SAME' ||\n convInfo.padInfo.type === 'VALID'))) {\n return conv2dByMatMul({\n x,\n filter,\n convInfo,\n backend,\n bias,\n activation,\n preluActivationWeights,\n leakyreluAlpha\n });\n }\n\n if (env().getBool('WEBGPU_CONV_SEPARATE_IM2COL_SHADER') && x.shape[0] === 1) {\n util.assert(isChannelsLast, () => 'TODO: NCHW is unimplemented');\n return conv2dWithIm2Col({\n x,\n filter,\n convInfo,\n backend,\n bias,\n preluActivationWeights,\n leakyreluAlpha,\n activation\n });\n }\n const useNaive = env().getBool('WEBGPU_USE_NAIVE_CONV2D');\n const useVec4 =\n (convInfo.inChannels % 4 === 0 ||\n (convInfo.inChannels === 3 && convInfo.padInfo.type === 'VALID')) &&\n convInfo.outChannels % 4 === 0 && isChannelsLast;\n\n const padInfo = [convInfo.padInfo.top, convInfo.padInfo.left];\n const dimensions = [\n {type: 'int32', data: [convInfo.filterHeight, convInfo.filterWidth]},\n {type: 'int32', data: [...padInfo]},\n {type: 'int32', data: [convInfo.strideHeight, convInfo.strideWidth]},\n {type: 'int32', data: [convInfo.dilationHeight, convInfo.dilationWidth]}\n ];\n if (useNaive) {\n util.assert(isChannelsLast, () => 'TODO: NCHW is unimplemented');\n // TODO(kainino0x): This may be obsolete, but is kept for reference.\n program = new Conv2DNaiveProgram(\n convInfo, hasBias, activation, hasPreluActivationWeights);\n } else {\n if (useVec4) {\n program = new Conv2DMMVec4Program(\n convInfo, hasBias, activation, hasPreluActivationWeights);\n } else {\n program = new Conv2DMMProgram(\n convInfo, hasBias, activation, hasPreluActivationWeights);\n }\n\n const dimAOuter = convInfo.outHeight * convInfo.outWidth;\n const dimBOuter = convInfo.outChannels;\n const dimInner =\n convInfo.filterHeight * convInfo.filterWidth * convInfo.inChannels;\n dimensions.push(\n {type: 'int32', data: [dimAOuter]}, {type: 'int32', data: [dimBOuter]},\n {type: 'int32', data: [dimInner]});\n }\n\n const inputVar: TensorInfo[] = [x, filter];\n if (hasBias) {\n inputVar.push(bias);\n }\n if (hasPreluActivationWeights) {\n inputVar.push(preluActivationWeights);\n }\n if (activation === 'leakyrelu') {\n dimensions.push({type: 'float32', data: [leakyreluAlpha]});\n program.uniforms += ' alpha : f32,';\n }\n return backend.runWebGPUProgram(program, inputVar, x.dtype, dimensions);\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, Conv2D, Conv2DAttrs, Conv2DInputs, KernelConfig, KernelFunc} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\n\nimport {conv2DImpl} from './Conv2D_impl';\n\nexport function conv2d(\n args: {inputs: Conv2DInputs, attrs: Conv2DAttrs, backend: WebGPUBackend}) {\n const {inputs, attrs, backend} = args;\n const {x, filter} = inputs;\n const {strides, pad, dataFormat, dilations, dimRoundingMode} = attrs;\n const $dataFormat = backend_util.convertConv2DDataFormat(dataFormat);\n const convInfo = backend_util.computeConv2DInfo(\n x.shape as [number, number, number, number],\n filter.shape as [number, number, number, number], strides, dilations, pad,\n dimRoundingMode, false /* depthwise */, $dataFormat);\n return conv2DImpl({x, filter, convInfo, backend});\n}\n\nexport const conv2DConfig: KernelConfig = {\n kernelName: Conv2D,\n backendName: 'webgpu',\n kernelFunc: conv2d as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, util} from '@tensorflow/tfjs-core';\n\nimport {makeMatMulPackedSource} from './matmul_packed_webgpu';\nimport {WebGPUProgram} from './webgpu_program';\nimport {computeDispatch, computeWorkGroupSizeForConv2d, computeWorkPerThreadForConv2d} from './webgpu_util';\n\nexport class Conv2DDerInputMMProgram implements WebGPUProgram {\n outputShape: number[];\n shaderKey: string;\n dispatchLayout: {x: number[], y: number[], z: number[]};\n dispatch: [number, number, number];\n variableNames = ['x', 'W'];\n uniforms =\n 'filterDims : vec2, pads : vec2, stride : vec2, outBackprop : vec4, dimAOuter : i32, dimBOuter : i32, dimInner : i32,';\n workGroupSize: [number, number, number];\n elementsPerThread: [number, number, number];\n\n constructor(convInfo: backend_util.Conv2DInfo) {\n this.outputShape = convInfo.inShape;\n\n util.assert(\n convInfo.dataFormat === 'channelsLast',\n () => 'TODO: NCHW is unimplemented');\n this.dispatchLayout = {x: [3], y: [1, 2], z: [0]};\n this.workGroupSize =\n computeWorkGroupSizeForConv2d(this.dispatchLayout, this.outputShape);\n this.elementsPerThread =\n computeWorkPerThreadForConv2d(this.dispatchLayout, this.outputShape);\n\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize,\n this.elementsPerThread);\n\n this.shaderKey = `conv2DDerInputMM_${this.elementsPerThread}`;\n }\n\n getUserCode(): string {\n const matMulSource =\n makeMatMulPackedSource(this.elementsPerThread, this.workGroupSize);\n\n const readASnippet = `\n let outRow = row / uniforms.outShape[2];\n let outCol = row % uniforms.outShape[2];\n\n let WRow = col / (uniforms.filterDims[1] * uniforms.outBackprop[3]);\n let WCol = col / uniforms.outBackprop[3] % uniforms.filterDims[1];\n let xR = f32(outRow - uniforms.pads[0] + WRow) / f32(uniforms.stride[0]);\n let xC = f32(outCol - uniforms.pads[1] + WCol) / f32(uniforms.stride[1]);\n if (xR < 0.0 || xR >= f32(uniforms.outBackprop[1]) || fract(xR) > 0.0) {\n return 0.0;\n }\n if (xC < 0.0 || xC >= f32(uniforms.outBackprop[2]) || fract(xC) > 0.0) {\n return 0.0;\n }\n let coord = vec4(\n batch,\n i32(xR),\n i32(xC),\n col % uniforms.outBackprop[3]);\n return x[getIndexFromCoords4D(coord, uniforms.xShape)];`;\n\n const sampleA = `if (row < uniforms.dimAOuter && col < uniforms.dimInner) {\n ${readASnippet}\n }\n return 0.0;`;\n\n const userCode = `\n fn mm_readA(row : i32, col : i32, globalId : vec3) -> f32 {\n var batch = i32(globalId.z);\n ${sampleA}\n }\n\n fn mm_readB(row : i32, col : i32, globalId : vec3) -> f32 {\n let coordX = uniforms.filterDims.x - 1 -\n row / (uniforms.filterDims[1] * uniforms.outBackprop[3]);\n let coordY = uniforms.filterDims.y - 1 -\n (row / uniforms.outBackprop[3]) % uniforms.filterDims[1];\n if (row < uniforms.dimInner && col < uniforms.dimBOuter &&\n coordX >= 0 && coordY >= 0) {\n let coord = vec4(coordX, coordY, col,\n row % uniforms.outBackprop[3]);\n return W[getIndexFromCoords4D(coord, uniforms.wShape)];\n }\n return 0.0;\n }\n\n fn mm_write(row : i32, col : i32, valueInput : f32, globalId : vec3) {\n var batch = i32(globalId.z);\n var value = valueInput;\n let outCoord = vec4(\n batch,\n row / uniforms.outShape[2],\n row % uniforms.outShape[2],\n col);\n result[getIndexFromCoords4D(outCoord, uniforms.outShape)] = value;\n }\n\n ${matMulSource}\n `;\n return userCode;\n }\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util} from '@tensorflow/tfjs-core';\n\nimport {getMainHeaderAndGlobalIndexString} from './shader_preprocessor';\nimport {WebGPUProgram} from './webgpu_program';\nimport {computeDispatch, flatDispatchLayout} from './webgpu_util';\n\nexport class Conv2DDerInputProgram implements WebGPUProgram {\n variableNames = ['dy', 'W'];\n uniforms =\n 'filterDims : vec2, pads : vec2, stride : vec2, outBackprop : vec4,';\n outputShape: number[];\n shaderKey: string;\n dispatchLayout: {x: number[]};\n dispatch: [number, number, number];\n workGroupSize: [number, number, number] = [64, 1, 1];\n isChannelsLast: boolean;\n size = true;\n\n constructor(convInfo: backend_util.Conv2DInfo) {\n this.outputShape = convInfo.inShape;\n this.dispatchLayout = flatDispatchLayout(this.outputShape);\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize);\n this.isChannelsLast = convInfo.dataFormat === 'channelsLast';\n this.shaderKey = `conv2DDerInput_${this.isChannelsLast}`;\n }\n\n getUserCode(): string {\n const rowDim = this.isChannelsLast ? 1 : 2;\n const colDim = this.isChannelsLast ? 2 : 3;\n const channelDim = this.isChannelsLast ? 3 : 1;\n return `\n ${getMainHeaderAndGlobalIndexString()} {\n if(index < uniforms.size) {\n let coords = getCoordsFromIndex(index);\n let batch = coords[0];\n let d1 = coords[${channelDim}];\n\n let dyCorner = vec2(coords[${rowDim}]), coords[${\n colDim}]) - uniforms.pads;\n let dyRCorner = dyCorner.x;\n let dyCCorner = dyCorner.y;\n\n // Convolve dy(?, ?, d2) with w(:, :, d1, d2) to compute dx(xR, xC, d1).\n // ? = to be determined. : = across all values in that axis.\n var dotProd = 0.0;\n for (var wR = 0; wR < uniforms.filterDims.x; wR = wR + 1) {\n let dyR = (f32(dyRCorner) + f32(wR)) / f32(uniforms.stride.x);\n let wRPerm = uniforms.filterDims.x - 1 - wR;\n if (dyR < 0.0 || dyR >= f32(uniforms.outBackprop[1]) || fract(dyR) > 0.0 ||\n wRPerm < 0) {\n continue;\n }\n let idyR = dyR;\n\n for (var wC = 0; wC < uniforms.filterDims.y; wC = wC + 1) {\n let dyC = (f32(dyCCorner) + f32(wC)) / f32(uniforms.stride.y);\n let wCPerm = uniforms.filterDims.y - 1 - wC;\n if (dyC < 0.0 || dyC >= f32(uniforms.outBackprop[2]) ||\n fract(dyC) > 0.0 || wCPerm < 0) {\n continue;\n }\n let idyC = dyC;\n\n for (var d2 = 0; d2 < uniforms.outBackprop[3]; d2 = d2 + 1) {\n if (${this.isChannelsLast}) {\n let xValue = getDy(batch, idyR, idyC, d2);\n let wValue = getW(wRPerm, wCPerm, d1, d2);\n dotProd = dotProd + xValue * wValue;\n } else {\n let xValue = getDy(batch, d2, idyR, idyC);\n let wValue = getW(wRPerm, wCPerm, d1, d2);\n dotProd = dotProd + xValue * wValue;\n }\n\n }\n }\n }\n setOutputAtIndex(index, dotProd);\n }\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, Conv2DBackpropInput, Conv2DBackpropInputAttrs, Conv2DBackpropInputInputs, env, KernelConfig, KernelFunc} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {Conv2DDerInputMMProgram} from '../conv_backprop_mm_webgpu';\nimport {Conv2DDerInputProgram} from '../conv_backprop_webgpu';\n\nexport function conv2DBackpropInput(args: {\n inputs: Conv2DBackpropInputInputs,\n attrs: Conv2DBackpropInputAttrs,\n backend: WebGPUBackend\n}) {\n const {inputs, backend, attrs} = args;\n const {dy, filter} = inputs;\n const {inputShape, strides, pad, dataFormat, dimRoundingMode} = attrs;\n\n const $dataFormat = backend_util.convertConv2DDataFormat(dataFormat);\n const convInfo = backend_util.computeConv2DInfo(\n inputShape, filter.shape as [number, number, number, number], strides,\n 1 /* dilations */, pad, dimRoundingMode, false, $dataFormat);\n\n const dimensions = [\n {type: 'int32', data: [convInfo.filterHeight, convInfo.filterWidth]},\n {\n type: 'int32',\n data: [\n convInfo.filterHeight - 1 - convInfo.padInfo.top,\n convInfo.filterWidth - 1 - convInfo.padInfo.left\n ]\n },\n {type: 'int32', data: [convInfo.strideHeight, convInfo.strideWidth]},\n {\n type: 'int32',\n data: [\n convInfo.batchSize, convInfo.outHeight, convInfo.outWidth,\n convInfo.outChannels\n ]\n },\n ];\n let program: Conv2DDerInputProgram|Conv2DDerInputMMProgram;\n if (env().getBool('WEBGPU_USE_NAIVE_CONV2D_TRANSPOSE')) {\n // Keep Conv2DDerInputProgram for reference.\n program = new Conv2DDerInputProgram(convInfo);\n } else {\n program = new Conv2DDerInputMMProgram(convInfo);\n const dimAOuter = convInfo.inShape[1] * convInfo.inShape[2];\n const dimBOuter = convInfo.inShape[3];\n const dimInner =\n convInfo.filterHeight * convInfo.filterWidth * convInfo.outChannels;\n dimensions.push(\n {type: 'uint32', data: [dimAOuter]},\n {type: 'uint32', data: [dimBOuter]},\n {type: 'uint32', data: [dimInner]});\n }\n return backend.runWebGPUProgram(program, [dy, filter], 'float32', dimensions);\n}\n\nexport const conv2DBackpropInputConfig: KernelConfig = {\n kernelName: Conv2DBackpropInput,\n backendName: 'webgpu',\n kernelFunc: conv2DBackpropInput as {} as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Cos, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\n\nimport {UnaryOpType} from '../unary_op_util';\n\nexport const cos = unaryKernelFunc({opType: UnaryOpType.COS});\n\nexport const cosConfig: KernelConfig = {\n kernelName: Cos,\n backendName: 'webgpu',\n kernelFunc: cos\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Cosh, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\n\nimport {UnaryOpType} from '../unary_op_util';\n\nexport const cosh = unaryKernelFunc({opType: UnaryOpType.COSH});\n\nexport const coshConfig: KernelConfig = {\n kernelName: Cosh,\n backendName: 'webgpu',\n kernelFunc: cosh\n};\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {getMainHeaderAndGlobalIndexString} from './shader_preprocessor';\nimport {WebGPUProgram} from './webgpu_program';\nimport {computeDispatch, flatDispatchLayout} from './webgpu_util';\n\nexport class CropAndResizeProgram implements WebGPUProgram {\n outputShape: number[];\n shaderKey: string;\n dispatchLayout: {x: number[]};\n dispatch: [number, number, number];\n variableNames = ['Image', 'Boxes', 'BoxInd'];\n uniforms = 'extrapolationValue : f32,';\n workGroupSize: [number, number, number] = [64, 1, 1];\n methodId: number;\n cropHeightBiggerThan1: boolean;\n cropWidthBiggerThan1: boolean;\n size = true;\n\n constructor(\n channnel: number, boxShape: [number, number], cropSize: [number, number],\n method: 'bilinear'|'nearest') {\n const [numBoxes, ] = boxShape;\n this.outputShape = [numBoxes, cropSize[0], cropSize[1], channnel];\n this.dispatchLayout = flatDispatchLayout(this.outputShape);\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize);\n\n this.methodId = method === 'bilinear' ? 1 : 0;\n this.cropHeightBiggerThan1 = this.outputShape[1] > 1;\n this.cropWidthBiggerThan1 = this.outputShape[2] > 1;\n this.shaderKey = `cropAndResize_${this.methodId}_${\n this.cropHeightBiggerThan1}_${this.cropWidthBiggerThan1}`;\n }\n\n getUserCode(): string {\n const [inputHeightFloat, inputWidthFloat] =\n [`f32(uniforms.imageShape[1] - 1)`, `f32(uniforms.imageShape[2] - 1)`];\n\n const [heightRatio, heightScale, inY] = this.cropHeightBiggerThan1 ?\n [\n `(${inputHeightFloat} / f32(uniforms.outShape[1] - 1))`,\n '(y2-y1) * height_ratio',\n `y1*${inputHeightFloat} + f32(y)*(height_scale)`,\n ] :\n [\n '0.0',\n '0.0',\n `0.5 * (y1+y2) * ${inputHeightFloat}`,\n ];\n const [widthRatio, widthScale, inX] = this.cropWidthBiggerThan1 ?\n [\n `(${inputWidthFloat} / f32(uniforms.outShape[2] - 1))`,\n '(x2-x1) * width_ratio',\n `x1*${inputWidthFloat} + f32(x)*(width_scale)`,\n ] :\n [\n '0.0',\n '0.0',\n `0.5 * (x1+x2) * ${inputWidthFloat}`,\n ];\n\n // Reference implementation\n // tslint:disable-next-line:max-line-length\n // https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/crop_and_resize_op_gpu.cu.cc\n const userCode = `\n ${getMainHeaderAndGlobalIndexString()}\n if (index < uniforms.size) {\n let coords = getCoordsFromIndex(index);\n let height_ratio = f32(${heightRatio});\n let width_ratio = f32(${widthRatio});\n let b = coords[0];\n let y = coords[1];\n let x = coords[2];\n let d = coords[3];\n // get box vals\n let y1 = getBoxes(b, 0);\n let x1 = getBoxes(b, 1);\n let y2 = getBoxes(b, 2);\n let x2 = getBoxes(b, 3);\n // get image in batch index\n let bInd = i32(round(getBoxInd(b)));\n if(bInd < 0 || bInd >= uniforms.outShape[0]) {\n return;\n }\n let height_scale = ${heightScale};\n let width_scale = ${widthScale};\n let in_y = ${inY};\n if( in_y < 0.0 || in_y > ${inputHeightFloat} ) {\n setOutputAtIndex(index, uniforms.extrapolationValue);\n return;\n }\n let in_x = ${inX};\n if( in_x < 0.0 || in_x > ${inputWidthFloat} ) {\n setOutputAtIndex(index, uniforms.extrapolationValue);\n return;\n }\n let sourceFracIndexCR = vec2(in_x,in_y);\n if(${this.methodId} == 1) {\n // Compute the four integer indices.\n let sourceFloorCR = vec2(sourceFracIndexCR);\n let sourceCeilCR = vec2(ceil(sourceFracIndexCR));\n let topLeft = getImage(bInd, sourceFloorCR.y, sourceFloorCR.x, d);\n let bottomLeft = getImage(bInd, sourceCeilCR.y, sourceFloorCR.x, d);\n let topRight = getImage(bInd, sourceFloorCR.y, sourceCeilCR.x, d);\n let bottomRight = getImage(bInd, sourceCeilCR.y, sourceCeilCR.x, d);\n let fracCR = sourceFracIndexCR - vec2(sourceFloorCR);\n let top = topLeft + (topRight - topLeft) * fracCR.x;\n let bottom = bottomLeft + (bottomRight - bottomLeft) * fracCR.x;\n let newValue = top + (bottom - top) * fracCR.y;\n setOutputAtIndex(index, newValue);\n } else {\n // Compute the coordinators of nearest neighbor point.\n let sourceNearestCR = vec2(floor(\n sourceFracIndexCR + vec2(0.5,0.5)));\n let newValue = getImage(\n bInd, sourceNearestCR.y, sourceNearestCR.x, d);\n setOutputAtIndex(index, newValue);\n }\n }\n }\n `;\n return userCode;\n }\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {CropAndResize, CropAndResizeAttrs, CropAndResizeInputs, KernelConfig, KernelFunc, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {CropAndResizeProgram} from '../crop_and_resize_webgpu';\n\nexport const cropAndResize = (args: {\n inputs: CropAndResizeInputs,\n backend: WebGPUBackend,\n attrs: CropAndResizeAttrs\n}): TensorInfo => {\n const {inputs, backend, attrs} = args;\n const {image, boxes, boxInd} = inputs;\n const {cropSize, method, extrapolationValue} = attrs;\n\n const program = new CropAndResizeProgram(\n image.shape[3], boxes.shape as [number, number], cropSize, method);\n const uniformData = [{type: 'float32', data: [extrapolationValue]}];\n return backend.runWebGPUProgram(\n program, [image, boxes, boxInd], 'float32', uniformData);\n};\n\nexport const cropAndResizeConfig: KernelConfig = {\n kernelName: CropAndResize,\n backendName: 'webgpu',\n kernelFunc: cropAndResize as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2022 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {getMainHeaderAndGlobalIndexString} from './shader_preprocessor';\nimport {WebGPUProgram} from './webgpu_program';\nimport {computeDispatch, flatDispatchLayout} from './webgpu_util';\n\nexport enum CumOpType {\n Prod = '*',\n Sum = '+',\n}\n\nexport class CumProgram implements WebGPUProgram {\n outputShape: number[];\n shaderKey: string;\n dispatchLayout: {x: number[]};\n dispatch: [number, number, number];\n variableNames = ['x'];\n workGroupSize: [number, number, number];\n // pow(i32, i32) is not supported, use pow(f32, f32) instead.\n uniforms = 'index : f32,';\n size = true;\n exclusive: boolean;\n reverse: boolean;\n op: CumOpType;\n\n constructor(\n op: CumOpType, shape: number[], exclusive: boolean, reverse: boolean) {\n const workGroupSizeX = 128;\n this.workGroupSize = [workGroupSizeX, 1, 1];\n this.outputShape = shape;\n this.dispatchLayout = flatDispatchLayout(this.outputShape);\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize);\n this.exclusive = exclusive;\n this.reverse = reverse;\n this.op = op;\n this.shaderKey = `cum_${this.op}_${this.exclusive}_${this.reverse}`;\n }\n\n getUserCode(): string {\n const rank = this.outputShape.length;\n const initVal = this.op === CumOpType.Prod ? '1.0' : '0.0';\n const val = this.exclusive ? initVal :\n `getX(${getCoords(rank, 'coords', this.op)})`;\n const length = this.outputShape[this.outputShape.length - 1];\n let condition = '';\n let idxString = '';\n // When exclusive is set, the cum op becomes roll op that copies the\n // value from the previous index based on the direction specified by the\n // reverse flag.\n if (this.exclusive) {\n condition = this.reverse ? `end != ${length - 1}` : 'end != 0';\n idxString = this.reverse ? 'end + 1' : 'end - 1';\n } else {\n condition = this.reverse ? `end + pow2 < ${length}` : 'end >= pow2';\n idxString = (this.reverse ? 'end + pow2' : 'end - pow2');\n }\n return `\n ${getMainHeaderAndGlobalIndexString()}\n if (index < uniforms.size) {\n var coords = getCoordsFromIndex(index);\n\n let end = ${getFinalCoord(rank, 'coords', this.op)};\n var val = ${val};\n let pow2 = i32(pow(2.0, uniforms.index));\n if (${condition}) {\n let idx = ${idxString};\n ${getFinalCoord(rank, 'coords', this.op)} = idx;\n val ${this.op}= getX(${getCoords(rank, 'coords', this.op)});\n }\n setOutputAtIndex(index, val);\n }\n }\n `;\n }\n}\n\nfunction getCoords(rank: number, name: string, op: CumOpType): string {\n if (rank === 1) {\n return `${name}`;\n } else if (rank === 2) {\n return `${name}.x, ${name}.y`;\n } else if (rank === 3) {\n return `${name}.x, ${name}.y, ${name}.z`;\n } else if (rank === 4) {\n return `${name}.x, ${name}.y, ${name}.z, ${name}.w`;\n } else {\n throw Error(`Cumulative ${op} for rank ${rank} is not yet supported`);\n }\n}\n\nfunction getFinalCoord(rank: number, name: string, op: CumOpType): string {\n if (rank === 1) {\n return `${name}`;\n } else if (rank === 2) {\n return `${name}.y`;\n } else if (rank === 3) {\n return `${name}.z`;\n } else if (rank === 4) {\n return `${name}.w`;\n } else {\n throw Error(`Cumulative ${op} for rank ${rank} is not yet supported`);\n }\n}\n","/**\n * @license\n * Copyright 2022 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {CumOpType, CumProgram} from '../cum_webgpu';\n\nimport {identity} from './Identity';\nimport {transpose} from './Transpose';\n\nexport function cumImpl(\n op: CumOpType, x: TensorInfo, backend: WebGPUBackend, axis: number,\n exclusive: boolean, reverse: boolean): TensorInfo {\n const xRank = x.shape.length;\n const permutation = backend_util.getAxesPermutation([axis], xRank);\n let permutedX = x;\n if (permutation != null) {\n permutedX = transpose({inputs: {x}, backend, attrs: {perm: permutation}});\n }\n const permutedAxis = backend_util.getInnerMostAxes(1, xRank)[0];\n\n if (permutedAxis !== xRank - 1) {\n throw new Error(\n `WebGPU cumprod shader expects an inner-most axis=${\n x.shape.length - 1} ` +\n `but got axis=${axis}`);\n }\n const size = permutedX.shape[permutedAxis];\n let result = identity({inputs: {x: permutedX}, backend});\n // Use cum parallel algorithm, inspired by:\n // https://developer.nvidia.com/gpugems/gpugems3/part-vi-gpu-computing/chapter-39-parallel-prefix-sum-scan-cuda\n // Note: although the algorithm is called sum, it works for any associtative\n // operator with an identity.\n\n for (let i = 0; i <= Math.ceil(Math.log2(size)) - 1; i++) {\n const program = new CumProgram(op, permutedX.shape, false, reverse);\n const prevResult = result;\n const uniformData = [{type: 'float32', data: [i]}];\n result =\n backend.runWebGPUProgram(program, [result], result.dtype, uniformData);\n backend.disposeData(prevResult.dataId);\n }\n // For exclusive cum, shift the end result in the direction of product or sum\n // and add 1 for product or 0 for sum to the front index.\n if (exclusive) {\n const program = new CumProgram(op, permutedX.shape, exclusive, reverse);\n const prevResult = result;\n const uniformData = [{type: 'float32', data: [0]}];\n result =\n backend.runWebGPUProgram(program, [result], result.dtype, uniformData);\n backend.disposeData(prevResult.dataId);\n }\n\n if (permutation != null) {\n const reversePermutation = backend_util.getUndoAxesPermutation(permutation);\n const reverseTransposedResult = transpose(\n {inputs: {x: result}, backend, attrs: {perm: reversePermutation}});\n\n backend.disposeData(result.dataId);\n backend.disposeData(permutedX.dataId);\n\n return reverseTransposedResult;\n }\n\n return result;\n}\n","/**\n * @license\n * Copyright 2022 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Cumprod, CumprodAttrs, CumprodInputs, KernelConfig, KernelFunc, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {CumOpType} from '../cum_webgpu';\nimport {cumImpl} from './Cum_impl';\n\nexport function cumprod(\n args: {inputs: CumprodInputs, backend: WebGPUBackend, attrs: CumprodAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {axis, exclusive, reverse} = attrs;\n return cumImpl(CumOpType.Prod, x, backend, axis, exclusive, reverse);\n}\n\nexport const cumprodConfig: KernelConfig = {\n kernelName: Cumprod,\n backendName: 'webgpu',\n kernelFunc: cumprod as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2022 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Cumsum, CumsumAttrs, CumsumInputs, KernelConfig, KernelFunc, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {CumOpType} from '../cum_webgpu';\nimport {cumImpl} from './Cum_impl';\n\nexport function cumsum(\n args: {inputs: CumsumInputs, backend: WebGPUBackend, attrs: CumsumAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {axis, exclusive, reverse} = attrs;\n return cumImpl(CumOpType.Sum, x, backend, axis, exclusive, reverse);\n}\n\nexport const cumsumConfig: KernelConfig = {\n kernelName: Cumsum,\n backendName: 'webgpu',\n kernelFunc: cumsum as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {getMainHeaderAndGlobalIndexString} from './shader_preprocessor';\nimport {WebGPUProgram} from './webgpu_program';\nimport {computeDispatch, flatDispatchLayout} from './webgpu_util';\n\nexport class DepthToSpaceProgram implements WebGPUProgram {\n variableNames = ['x'];\n outputShape: number[];\n dataFormat: string;\n shaderKey: string;\n dispatchLayout: {x: number[]};\n dispatch: [number, number, number];\n workGroupSize: [number, number, number] = [64, 1, 1];\n size = true;\n uniforms = 'blockSize : i32,';\n\n constructor(outputShape: number[], dataFormat: 'NHWC'|'NCHW') {\n this.outputShape = outputShape;\n this.dispatchLayout = flatDispatchLayout(this.outputShape);\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize);\n this.shaderKey = `depthToSpace_${dataFormat}`;\n this.dataFormat = dataFormat;\n }\n\n getUserCode(): string {\n const userCode = `\n ${getMainHeaderAndGlobalIndexString()}\n if (index < uniforms.size) {\n let coords = getCoordsFromIndex(index);\n let b = coords[0];\n let h = ${this.getHeightCoordString()};\n let w = ${this.getWidthCoordString()};\n let d = ${this.getDepthCoordString()};\n\n let in_h = h / uniforms.blockSize;\n let offset_h = h % uniforms.blockSize;\n let in_w = w / uniforms.blockSize;\n let offset_w = w % uniforms.blockSize;\n let offset_d = (offset_h * uniforms.blockSize + offset_w) *\n ${this.getOutputDepthSize()};\n let in_d = d + offset_d;\n\n let rlt = ${this.getInputSamplingString()};\n setOutputAtIndex(index, rlt);\n }\n }`;\n return userCode;\n }\n\n private getHeightCoordString(): string {\n if (this.dataFormat === 'NHWC') {\n return `coords[1]`;\n } else {\n return `coords[2]`;\n }\n }\n\n private getWidthCoordString(): string {\n if (this.dataFormat === 'NHWC') {\n return `coords[2]`;\n } else {\n return `coords[3]`;\n }\n }\n\n private getDepthCoordString(): string {\n if (this.dataFormat === 'NHWC') {\n return `coords[3]`;\n } else {\n return `coords[1]`;\n }\n }\n\n private getOutputDepthSize(): string {\n if (this.dataFormat === 'NHWC') {\n return `uniforms.outShape[3]`;\n } else {\n return `uniforms.outShape[1]`;\n }\n }\n\n private getInputSamplingString(): string {\n if (this.dataFormat === 'NHWC') {\n return `getX(b, in_h, in_w, in_d)`;\n } else {\n return `getX(b, in_d, in_h, in_w)`;\n }\n }\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {DepthToSpace, DepthToSpaceAttrs, DepthToSpaceInputs, KernelConfig, KernelFunc, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {DepthToSpaceProgram} from '../depth_to_space_webgpu';\n\nexport function depthToSpace(args: {\n inputs: DepthToSpaceInputs,\n backend: WebGPUBackend,\n attrs: DepthToSpaceAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {blockSize, dataFormat} = attrs;\n\n const batchSize = x.shape[0];\n const inputHeight = (dataFormat === 'NHWC') ? x.shape[1] : x.shape[2];\n const inputWidth = (dataFormat === 'NHWC') ? x.shape[2] : x.shape[3];\n const inputDepth = (dataFormat === 'NHWC') ? x.shape[3] : x.shape[1];\n\n const outputHeight = inputHeight * blockSize;\n const outputWidth = inputWidth * blockSize;\n const outputDepth = inputDepth / (blockSize * blockSize);\n\n const outputShape = (dataFormat === 'NHWC') ?\n [batchSize, outputHeight, outputWidth, outputDepth] :\n [batchSize, outputDepth, outputHeight, outputWidth];\n\n const uniformData = [\n {type: 'int32', data: [blockSize]},\n ];\n\n const program = new DepthToSpaceProgram(outputShape, dataFormat);\n return backend.runWebGPUProgram(program, [x], x.dtype, uniformData);\n}\n\nexport const depthToSpaceConfig: KernelConfig = {\n kernelName: DepthToSpace,\n backendName: 'webgpu',\n kernelFunc: depthToSpace as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, util} from '@tensorflow/tfjs-core';\n\nimport {mapActivationToShaderProgram} from './activation_util';\nimport {getWorkGroupSizeString} from './shader_preprocessor';\nimport {WebGPUProgram} from './webgpu_program';\nimport {computeDispatch} from './webgpu_util';\n\nexport class DepthwiseConv2D3x3Program implements WebGPUProgram {\n outputShape: number[];\n shaderKey: string;\n dispatchLayout: {x: number[], y: number[], z: number[]};\n dispatch: [number, number, number];\n variableNames = ['x', 'W'];\n uniforms =\n 'pad : vec2, stride : vec2, dilation : vec2, inDims : vec2,';\n workGroupSize: [number, number, number] = [4, 4, 4];\n convInfo: backend_util.Conv2DInfo;\n addBias: boolean;\n activation: backend_util.Activation;\n hasPreluActivation: boolean;\n isVec4 = true;\n\n constructor(\n convInfo: backend_util.Conv2DInfo, addBias = false,\n activation: backend_util.Activation = null, hasPreluActivation = false) {\n this.outputShape = convInfo.outShape;\n this.dispatchLayout = {x: [0, 1], y: [2], z: [3]};\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize, [1, 4, 4]);\n\n util.assert(\n convInfo.dataFormat === 'channelsLast',\n () => 'TODO: NCHW is unimplemented');\n\n if (addBias) {\n this.variableNames.push('bias');\n }\n if (hasPreluActivation) {\n this.variableNames.push('preluActivationWeights');\n }\n\n this.convInfo = convInfo;\n this.addBias = addBias;\n this.activation = activation;\n this.hasPreluActivation = hasPreluActivation;\n\n this.shaderKey = `depthwise3x3_${activation}`;\n }\n\n getUserCode(): string {\n let activationSnippet = '', applyActivationSnippet = '';\n if (this.activation) {\n const activationOp =\n mapActivationToShaderProgram(this.activation, this.isVec4);\n if (this.hasPreluActivation) {\n activationSnippet =\n `fn activation(a : vec4, outCoord : vec4) -> vec4 {\n let b = getPreluActivationWeightsByOutputCoords(outCoord);\n ${activationOp}\n }`;\n } else {\n activationSnippet = `\n fn activation(a : vec4, outCoord : vec4) -> vec4 {\n ${activationOp}\n }\n `;\n }\n\n applyActivationSnippet = `dotProd[i] = activation(dotProd[i], coords);`;\n }\n\n const addBiasSnippet = this.addBias ?\n 'dotProd[i] = dotProd[i] + getBiasByOutputCoords(coords);' :\n '';\n\n const userCode = `\n ${activationSnippet}\n\n ${getWorkGroupSizeString()}\n fn main(@builtin(global_invocation_id) globalId: vec3) {\n let batch = 0;\n let r = i32(globalId.x);\n let c = i32(globalId.y) * 4;\n let d2 = i32(globalId.z) * 4;\n let xRCCorner = vec2(r, c) * uniforms.stride - uniforms.pad;\n let d1 = d2;\n let q = 0;\n\n let xRCorner = xRCCorner.x;\n let xCCorner = xRCCorner.y;\n\n var wVals : array, 9>;\n wVals[0] = getW(0, 0, d1, q);\n wVals[1] = getW(0, 1, d1, q);\n wVals[2] = getW(0, 2, d1, q);\n wVals[3] = getW(1, 0, d1, q);\n wVals[4] = getW(1, 1, d1, q);\n wVals[5] = getW(1, 2, d1, q);\n wVals[6] = getW(2, 0, d1, q);\n wVals[7] = getW(2, 1, d1, q);\n wVals[8] = getW(2, 2, d1, q);\n\n var xVals : array, 6>, 3>;\n for (var wR = 0; wR < 3; wR = wR + 1) {\n let xR = xRCorner + wR * uniforms.dilation[0];\n for (var wC = 0; wC < 6; wC = wC + 1) {\n let xC = xCCorner + wC * uniforms.dilation[1];\n if (xR < 0 || xR >= uniforms.inDims[0] || xC < 0 || xC >= uniforms.inDims[1]) {\n xVals[wR][wC] = vec4(0.0);\n } else {\n xVals[wR][wC] = getX(batch, xR, xC, d1);\n }\n }\n }\n\n var dotProd : array, 4>;\n dotProd[0] = vec4(0.0);\n dotProd[1] = vec4(0.0);\n dotProd[2] = vec4(0.0);\n dotProd[3] = vec4(0.0);\n\n for (var wR = 0; wR < 3; wR = wR + 1) {\n for (var wC = 0; wC < 3; wC = wC + 1) {\n let indexW = wR * 3 + wC;\n dotProd[0] = dotProd[0] + xVals[wR][0 + wC] * wVals[indexW];\n dotProd[1] = dotProd[1] + xVals[wR][1 + wC] * wVals[indexW];\n dotProd[2] = dotProd[2] + xVals[wR][2 + wC] * wVals[indexW];\n dotProd[3] = dotProd[3] + xVals[wR][3 + wC] * wVals[indexW];\n }\n }\n\n for (var i = 0; i < 4; i = i + 1) {\n let coords = vec4(batch, r, c + i, d2);\n if (coordsInBounds4D(coords, uniforms.outShape)) {\n ${addBiasSnippet}\n ${applyActivationSnippet}\n setOutputAtCoords(coords[0], coords[1], coords[2], coords[3], dotProd[i]);\n }\n }\n }\n `;\n return userCode;\n }\n}\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, util} from '@tensorflow/tfjs-core';\n\nimport {mapActivationToShaderProgram} from './activation_util';\nimport {getMainHeaderString} from './shader_preprocessor';\nimport {WebGPUProgram} from './webgpu_program';\nimport {computeDispatch, flatDispatchLayout} from './webgpu_util';\n\nexport class DepthwiseConv2DProgram implements WebGPUProgram {\n outputShape: number[];\n shaderKey: string;\n dispatchLayout: {x: number[], y?: number[], z?: number[]};\n dispatch: [number, number, number];\n variableNames = ['x', 'W'];\n uniforms = `pad : vec2, stride : vec2, dilation : vec2,\n inDims : vec2, filterHeight : i32, filterWidth : i32,\n channelMul : i32,`;\n // This is an experimental value.\n workGroupSize: [number, number, number] = [256, 1, 1];\n convInfo: backend_util.Conv2DInfo;\n addBias: boolean;\n activation: backend_util.Activation;\n hasPreluActivation: boolean;\n\n constructor(\n convInfo: backend_util.Conv2DInfo, addBias = false,\n activation: backend_util.Activation = null, hasPreluActivation = false) {\n this.outputShape = convInfo.outShape;\n this.dispatchLayout = flatDispatchLayout(this.outputShape);\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize);\n\n util.assert(\n convInfo.dataFormat === 'channelsLast',\n () => 'TODO: NCHW is unimplemented');\n\n if (addBias) {\n this.variableNames.push('bias');\n }\n if (hasPreluActivation) {\n this.variableNames.push('preluActivationWeights');\n }\n\n this.convInfo = convInfo;\n this.addBias = addBias;\n this.activation = activation;\n this.hasPreluActivation = hasPreluActivation;\n this.shaderKey = `depthwise_${this.activation}`;\n }\n\n getUserCode(): string {\n let activationSnippet = '', applyActivationSnippet = '';\n if (this.activation) {\n const activationOp = mapActivationToShaderProgram(this.activation, false);\n if (this.hasPreluActivation) {\n activationSnippet =\n `fn activation(a : f32, outCoord : vec4) -> f32 {\n let b = getPreluActivationWeightsByOutputCoords(outCoord);\n ${activationOp}\n }`;\n } else {\n activationSnippet = `\n fn activation(a : f32, outCoord : vec4) -> f32 {\n ${activationOp}\n }\n `;\n }\n\n applyActivationSnippet = `dotProd = activation(dotProd, coords);`;\n }\n\n const addBiasSnippet = this.addBias ?\n 'dotProd = dotProd + getBiasByOutputCoords(coords);' :\n '';\n\n const userCode = `\n ${activationSnippet}\n\n fn writeResult(batch : i32, row : i32, col : i32, chan : i32,\n value : f32) {\n let coord = vec4(batch, row, col, chan);\n if (coordsInBounds4D(coord, uniforms.outShape)) {\n setOutputAtCoords(batch, row, col, chan, value);\n }\n }\n\n ${getMainHeaderString()}\n let coords = getOutputCoords();\n let batch = coords[0];\n let xRCCorner = vec2(coords.yz) * uniforms.stride - uniforms.pad;\n let d2 = coords[3];\n let d1 = d2 / uniforms.channelMul;\n let q = d2 - d1 * uniforms.channelMul;\n\n let inputRowStart = xRCCorner.x;\n let inputColStart = xRCCorner.y;\n let inputRowEnd = inputRowStart + uniforms.filterHeight *\n uniforms.dilation[0];\n let inputColEnd = inputColStart + uniforms.filterWidth *\n uniforms.dilation[1];\n\n // Convolve x(?, ?, d1) with w(:, :, d1, q) to get y(yR, yC, d2).\n // ? = to be determined. : = across all values in that axis.\n var dotProd = 0.0;\n\n // Extract if checking out of for loop for performance.\n if (inputRowStart >= 0 && inputColStart >= 0 &&\n inputRowEnd < uniforms.inDims[0] &&\n inputColEnd < uniforms.inDims[1]) {\n // Here using a constant value |this.convInfo.filterHeight| instead\n // of uniform value is in order to loop unrolling.\n for (var wR = 0; wR < uniforms.filterHeight; wR = wR + 1) {\n let xR = inputRowStart + wR * uniforms.dilation[0];\n\n for (var wC = 0; wC < uniforms.filterWidth; wC = wC + 1) {\n let xC = inputColStart + wC * uniforms.dilation[1];\n\n let xVal = getX(batch, xR, xC, d1);\n let wVal = getW(wR, wC, d1, q);\n dotProd = dotProd + xVal * wVal;\n }\n }\n } else {\n for (var wR = 0; wR < uniforms.filterHeight; wR = wR + 1) {\n let xR = inputRowStart + wR * uniforms.dilation[0];\n\n if (xR < 0 || xR >= uniforms.inDims[0]) {\n continue;\n }\n\n for (var wC = 0; wC < uniforms.filterWidth; wC = wC + 1) {\n let xC = inputColStart + wC * uniforms.dilation[1];\n\n if (xC < 0 || xC >= uniforms.inDims[1]) {\n continue;\n }\n\n let xVal = getX(batch, xR, xC, d1);\n let wVal = getW(wR, wC, d1, q);\n dotProd = dotProd + xVal * wVal;\n }\n }\n }\n\n ${addBiasSnippet}\n ${applyActivationSnippet}\n writeResult(batch, coords[1], coords[2], d2, dotProd);\n }\n `;\n return userCode;\n }\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, DepthwiseConv2dNative, DepthwiseConv2dNativeAttrs, DepthwiseConv2dNativeInputs, KernelConfig, KernelFunc} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {DepthwiseConv2D3x3Program} from '../depthwise_conv2d_3x3_webgpu';\nimport {DepthwiseConv2DProgram} from '../depthwise_conv2d_webgpu';\n\nexport function depthwiseConv2dNative(args: {\n inputs: DepthwiseConv2dNativeInputs,\n attrs: DepthwiseConv2dNativeAttrs,\n backend: WebGPUBackend\n}) {\n const {inputs, backend, attrs} = args;\n const {x, filter} = inputs;\n const {strides, pad, dilations, dimRoundingMode} = attrs;\n\n let $dilations = dilations;\n if ($dilations == null) {\n $dilations = [1, 1];\n }\n\n const convInfo = backend_util.computeConv2DInfo(\n x.shape as [number, number, number, number],\n filter.shape as [number, number, number, number], strides, $dilations,\n pad, dimRoundingMode, true /* depthwise */);\n\n const dimensions = [\n {type: 'int32', data: [convInfo.padInfo.top, convInfo.padInfo.left]},\n {type: 'int32', data: [convInfo.strideHeight, convInfo.strideWidth]},\n {type: 'int32', data: [convInfo.dilationHeight, convInfo.dilationWidth]},\n {type: 'int32', data: [convInfo.inHeight, convInfo.inWidth]}\n ];\n\n let program: DepthwiseConv2DProgram|DepthwiseConv2D3x3Program;\n // TODO: To see if we need to relax the limitation. Currently, it's only for\n // filter size 3x3.\n if (convInfo.batchSize === 1 && convInfo.inHeight === convInfo.outHeight &&\n convInfo.inWidth === convInfo.outWidth && convInfo.strideHeight === 1 &&\n convInfo.strideWidth === 1 &&\n convInfo.filterHeight === convInfo.filterWidth &&\n convInfo.inChannels === convInfo.outChannels &&\n convInfo.dilationHeight === 1 && convInfo.dilationWidth === 1 &&\n convInfo.filterHeight === 3 && convInfo.inChannels % 4 === 0) {\n program = new DepthwiseConv2D3x3Program(convInfo);\n } else {\n program = new DepthwiseConv2DProgram(convInfo);\n dimensions.push(\n {type: 'int32', data: [convInfo.filterHeight]},\n {type: 'int32', data: [convInfo.filterWidth]},\n {type: 'int32', data: [convInfo.outChannels / convInfo.inChannels]});\n }\n\n return backend.runWebGPUProgram(program, [x, filter], x.dtype, dimensions);\n}\n\nexport const depthwiseConv2dNativeConfig: KernelConfig = {\n kernelName: DepthwiseConv2dNative,\n backendName: 'webgpu',\n kernelFunc: depthwiseConv2dNative as {} as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Multiply} from '@tensorflow/tfjs-core';\nimport {binaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {multiplyImplCPU as cpuMultiply} from '../kernel_utils/shared';\nimport {BinaryOpType} from '../binary_op_util';\n\nexport const multiplyKernelFunc = binaryKernelFunc({\n opSnippet: BinaryOpType.MUL,\n cpuKernelImpl: cpuMultiply,\n supportsComplex: true\n});\n\nexport const multiplyConfig: KernelConfig = {\n kernelName: Multiply,\n backendName: 'webgpu',\n kernelFunc: multiplyKernelFunc\n};\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util} from '@tensorflow/tfjs-core';\n\nimport {getMainHeaderAndGlobalIndexString} from './shader_preprocessor';\nimport {WebGPUProgram} from './webgpu_program';\nimport {computeDispatch, flatDispatchLayout} from './webgpu_util';\n\nexport class ReduceProgram implements WebGPUProgram {\n outputShape: number[];\n shaderKey: string;\n dispatchLayout: {x: number[]};\n dispatch: [number, number, number];\n workGroupSize: [number, number, number] = [64, 1, 1];\n variableNames = ['x'];\n uniforms = 'reduceSize : i32,';\n reduceType: 'max'|'mean'|'min'|'prod'|'sum';\n inputShape: number[];\n size = true;\n\n constructor(\n reduceInfo: backend_util.ReduceInfo,\n reduceType: 'max'|'mean'|'min'|'prod'|'sum') {\n this.inputShape = [reduceInfo.batchSize, reduceInfo.inSize];\n const [outputShape, ] =\n backend_util.computeOutAndReduceShapes(this.inputShape, [1]);\n this.outputShape = outputShape.length === 0 ? [1] : outputShape;\n\n this.dispatchLayout = flatDispatchLayout(this.outputShape);\n // A work group only outputs a data, so we transfer [1, 1, 1] to compute\n // dispatch size.\n this.dispatch =\n computeDispatch(this.dispatchLayout, this.outputShape, [1, 1, 1]);\n\n this.reduceType = reduceType;\n this.shaderKey = `reduce_${reduceType}`;\n }\n\n getUserCode(): string {\n let reduceOp = ``;\n let initValue = '0.0';\n if (this.reduceType === 'min' || this.reduceType === 'max') {\n reduceOp = `\n if (isnan(candidate)) {\n bestValue = uniforms.NAN;\n } else if (!isnan(bestValue) && candidate ${\n this.reduceType === 'min' ? '<' : '>'} bestValue)\n { bestValue = candidate; }`;\n initValue = 'f32(x[offset])';\n } else if (this.reduceType === 'sum' || this.reduceType === 'mean') {\n reduceOp = ' bestValue = bestValue + candidate; ';\n } else if (this.reduceType === 'prod') {\n reduceOp = ' bestValue = bestValue * candidate; ';\n initValue = '1.0';\n }\n\n const outputSnippet = this.reduceType === 'mean' ?\n // tslint:disable-next-line:max-line-length\n `setOutputAtIndex(outputIndex, bestValue / f32(uniforms.reduceSize));` :\n `setOutputAtIndex(outputIndex, bestValue);`;\n\n const sharedMemorySnippet = `\n var xBestValues : array;\n `;\n\n const userCode = `\n fn DIV_CEIL(a : u32, b : u32) -> u32 {\n return ((a - 1u) / b + 1u);\n }\n\n ${sharedMemorySnippet}\n fn getOffset(outputIndex : i32) -> i32 {\n let outputCoords = getCoordsFromIndex(outputIndex);\n let offset = ${\n this.outputShape.length === 1 ?\n 'outputCoords' :\n 'outputCoords[0]'} * uniforms.reduceSize;\n return offset;\n }\n ${getMainHeaderAndGlobalIndexString()}\n let outputIndex = index / i32(workGroupSizeX);\n let offset = getOffset(outputIndex);\n var bestValue = ${initValue};\n let Length = uniforms.reduceSize;\n let WorkPerThread = DIV_CEIL(u32(Length), workGroupSizeX);\n for (var k = i32(localId.x); k < Length && outputIndex < uniforms.size;\n k = k + i32(workGroupSizeX)) {\n let candidate = f32(x[offset + k]);\n ${reduceOp}\n }\n xBestValues[localId.x] = bestValue;\n workgroupBarrier();\n\n var reduceSize = min(u32(Length), workGroupSizeX);\n for (var currentSize = reduceSize / 2u; reduceSize > 1u;\n currentSize = reduceSize / 2u) {\n let interval = DIV_CEIL(reduceSize, 2u);\n if (localId.x < currentSize) {\n let candidate = xBestValues[localId.x + interval];\n ${reduceOp}\n xBestValues[localId.x] = bestValue;\n }\n reduceSize = interval;\n workgroupBarrier();\n }\n\n if (localId.x == 0u && outputIndex < uniforms.size) {\n ${outputSnippet}\n }\n }\n `;\n return userCode;\n }\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, sumOutType, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {maxImplCPU} from './shared';\nimport {prodImplCPU} from './shared';\nimport {ReduceProgram} from '../reduce_webgpu';\nimport {reshape} from '../kernels/Reshape';\nimport {transpose} from '../kernels/Transpose';\n\ntype ReduceTypes = 'max'|'mean'|'min'|'prod'|'sum';\n\nexport function reduce(\n x: TensorInfo, axis: number|number[], keepDims: boolean,\n reduceType: ReduceTypes, backend: WebGPUBackend): TensorInfo {\n const xRank = x.shape.length;\n const toDispose = [];\n\n const origAxes = util.parseAxisParam(axis, x.shape);\n let axes = origAxes;\n const permutedAxes = backend_util.getAxesPermutation(axes, xRank);\n\n let input = x;\n if (permutedAxes != null) {\n input = transpose({inputs: {x}, attrs: {perm: permutedAxes}, backend});\n axes = backend_util.getInnerMostAxes(axes.length, xRank);\n toDispose.push(input);\n }\n\n backend_util.assertAxesAreInnerMostDims(reduceType, axes, xRank);\n\n const [reduceOutShape, reduceShape] =\n backend_util.computeOutAndReduceShapes(input.shape, axes);\n let resOutShape = reduceOutShape;\n if (keepDims) {\n // rather than reshape at the end, set the target shape here.\n resOutShape = backend_util.expandShapeToKeepDim(reduceOutShape, origAxes);\n }\n\n let res;\n if ((reduceType === 'max' || reduceType === 'prod') &&\n backend.shouldExecuteOnCPU([input])) {\n const xVals = backend.tensorMap.get(input.dataId).values as TypedArray;\n switch (reduceType) {\n case 'max':\n const outValues = maxImplCPU(\n xVals, util.sizeFromShape(reduceShape), resOutShape, x.dtype);\n res = backend.makeTensorInfo(resOutShape, x.dtype, outValues);\n break;\n case 'prod':\n const {outVals, outShape, outDtype} =\n prodImplCPU(input.shape, input.dtype, xVals, axes);\n res = backend.makeTensorInfo(outShape, outDtype, outVals);\n break;\n default:\n throw new Error(\n `${reduceType} CPU implementation is not yet supported.`);\n }\n } else {\n const inSize = util.sizeFromShape(reduceShape);\n const xSize = util.sizeFromShape(input.shape);\n const batchSize = xSize / inSize;\n\n const reduceInfo = {windowSize: inSize, inSize, batchSize, outSize: 1};\n const dtype = reduceType === 'mean' ? 'float32' : sumOutType(x.dtype);\n const uniformData = [\n {type: 'int32', data: [inSize]},\n ];\n const program = new ReduceProgram(reduceInfo, reduceType);\n const reduced =\n backend.runWebGPUProgram(program, [input], dtype, uniformData);\n toDispose.push(reduced);\n\n res = reshape({inputs: {x: reduced}, attrs: {shape: resOutShape}, backend});\n }\n\n toDispose.forEach(t => backend.disposeData(t.dataId));\n\n return res;\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, Sum, SumAttrs, SumInputs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {reduce} from '../kernel_utils/reduce';\n\nexport function sum(\n args: {inputs: SumInputs, backend: WebGPUBackend, attrs: SumAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {axis, keepDims} = attrs;\n\n return reduce(x, axis, keepDims, 'sum', backend);\n}\n\nexport const sumConfig: KernelConfig = {\n kernelName: Sum,\n backendName: 'webgpu',\n kernelFunc: sum as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, Einsum, EinsumAttrs, EinsumInputs, KernelConfig, KernelFunc, Tensor, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\n\nimport {multiplyKernelFunc} from './Multiply';\nimport {reshape} from './Reshape';\nimport {sum} from './Sum';\nimport {transpose} from './Transpose';\n\nexport function einsum(\n args: {inputs: EinsumInputs, backend: WebGPUBackend, attrs: EinsumAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {equation} = attrs;\n const tensors = inputs as Tensor[];\n\n const {allDims, summedDims, idDims} =\n backend_util.decodeEinsumEquation(equation, tensors.length);\n backend_util.checkEinsumDimSizes(allDims.length, idDims, tensors);\n const {path, steps} = backend_util.getEinsumComputePath(summedDims, idDims);\n\n const nSteps = steps.length;\n let out: TensorInfo|null = null;\n let numDimsRemaining = allDims.length;\n const tensorsToDispose: TensorInfo[] = [];\n for (let i = 0; i < nSteps; ++i) {\n for (const idTerm of steps[i]) {\n const {permutationIndices: perm, expandDims: dimsToExpand} =\n backend_util.getEinsumPermutation(numDimsRemaining, idDims[idTerm]);\n let x: TensorInfo;\n if (backend_util.isIdentityPermutation(perm)) {\n x = tensors[idTerm];\n } else {\n x = transpose({inputs: {x: tensors[idTerm]}, backend, attrs: {perm}});\n tensorsToDispose.push(x);\n }\n const targetShape: number[] = x.shape.slice();\n for (let k = 0; k < dimsToExpand.length; ++k) {\n targetShape.splice(dimsToExpand[k], 0, 1);\n }\n\n if (!util.arraysEqual(x.shape, targetShape)) {\n x = reshape({inputs: {x}, backend, attrs: {shape: targetShape}});\n tensorsToDispose.push(x);\n }\n if (out === null) {\n out = x;\n } else {\n // tslint:disable-next-line: no-unnecessary-type-assertion\n out =\n multiplyKernelFunc({inputs: {a: x, b: out}, backend}) as TensorInfo;\n tensorsToDispose.push(out);\n }\n }\n if (i < nSteps - 1) {\n if (path[i] >= 0) {\n out = sum({\n inputs: {x: out},\n backend,\n attrs: {\n axis: path[i] - (allDims.length - numDimsRemaining),\n keepDims: false\n }\n });\n tensorsToDispose.push(out);\n }\n numDimsRemaining--;\n }\n }\n\n // Clean up intermediate tensors.\n for (const tensorInfo of tensorsToDispose) {\n if (tensorInfo === out) {\n continue;\n }\n backend.disposeData(tensorInfo.dataId);\n }\n\n return out;\n}\n\nexport const einsumConfig: KernelConfig = {\n kernelName: Einsum,\n backendName: 'webgpu',\n kernelFunc: einsum as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Elu, KernelConfig} from '@tensorflow/tfjs-core';\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {UnaryOpType} from '../unary_op_util';\n\nexport const elu = unaryKernelFunc({opType: UnaryOpType.ELU});\n\nexport const eluConfig: KernelConfig = {\n kernelName: Elu,\n backendName: 'webgpu',\n kernelFunc: elu\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Equal, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {binaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {equalImplCPU as cpuEqual} from '../kernel_utils/shared';\n\nimport {BinaryOpType} from '../binary_op_util';\n\nexport const equal = binaryKernelFunc(\n {opSnippet: BinaryOpType.EQUAL, dtype: 'bool', cpuKernelImpl: cpuEqual});\n\nexport const equalConfig: KernelConfig = {\n kernelName: Equal,\n backendName: 'webgpu',\n kernelFunc: equal\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Exp, KernelConfig} from '@tensorflow/tfjs-core';\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {expImplCPU} from '../kernel_utils/shared';\nimport {UnaryOpType} from '../unary_op_util';\n\nexport const exp = unaryKernelFunc({\n opType: UnaryOpType.EXP,\n cpuKernelImpl: expImplCPU,\n dtype: 'float32',\n});\n\nexport const expConfig: KernelConfig = {\n kernelName: Exp,\n backendName: 'webgpu',\n kernelFunc: exp\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ExpandDims, ExpandDimsAttrs, ExpandDimsInputs, KernelConfig, KernelFunc, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {reshape} from './Reshape';\n\nexport function expandDims(args: {\n inputs: ExpandDimsInputs,\n attrs: ExpandDimsAttrs,\n backend: WebGPUBackend\n}): TensorInfo {\n const {inputs, attrs, backend} = args;\n const {dim} = attrs;\n const {input} = inputs;\n\n const inputRank = input.shape.length;\n const newShape = input.shape.slice();\n let $dim = dim;\n if (dim < 0) {\n // Negative value is counted from the tail of rank.\n util.assert(\n -(inputRank + 1) <= dim,\n () => `Axis must be in the interval [${- (inputRank + 1)}, ${\n inputRank}]`);\n $dim = inputRank + dim + 1;\n }\n newShape.splice($dim, 0, 1);\n\n return reshape({inputs: {x: input}, backend, attrs: {shape: newShape}});\n}\n\nexport const expandDimsConfig: KernelConfig = {\n kernelName: ExpandDims,\n backendName: 'webgpu',\n kernelFunc: expandDims as {} as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Expm1, KernelConfig} from '@tensorflow/tfjs-core';\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {expm1ImplCPU} from '../kernel_utils/shared';\nimport {UnaryOpType} from '../unary_op_util';\n\nexport const expm1 =\n unaryKernelFunc({opType: UnaryOpType.EXPM1, cpuKernelImpl: expm1ImplCPU});\n\nexport const expm1Config: KernelConfig = {\n kernelName: Expm1,\n backendName: 'webgpu',\n kernelFunc: expm1\n};\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {getMainHeaderAndGlobalIndexString} from './shader_preprocessor';\nimport {WebGPUProgram} from './webgpu_program';\nimport {computeDispatch, flatDispatchLayout} from './webgpu_util';\n\nexport class FillProgram implements WebGPUProgram {\n variableNames: string[] = [];\n outputShape: number[] = [];\n shaderKey: string;\n dispatchLayout: {x: number[]};\n dispatch: [number, number, number];\n uniforms = 'value : f32,';\n workGroupSize: [number, number, number] = [64, 1, 1];\n size = true;\n\n constructor(shape: number[]) {\n this.outputShape = shape;\n this.dispatchLayout = flatDispatchLayout(this.outputShape);\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize);\n\n this.shaderKey = 'fill';\n }\n\n getUserCode(): string {\n const userCode = `\n ${getMainHeaderAndGlobalIndexString()}\n if (index < uniforms.size) {\n setOutputAtIndex(index, uniforms.value);\n }\n }\n `;\n return userCode;\n }\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Fill, FillAttrs, KernelConfig, KernelFunc, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {FillProgram} from '../fill_webgpu';\n\nexport function fill(args: {backend: WebGPUBackend, attrs: FillAttrs}):\n TensorInfo {\n const {backend, attrs} = args;\n const {shape, value} = attrs;\n let {dtype} = attrs;\n\n dtype = dtype || util.inferDtype(value);\n\n if (dtype === 'string') {\n // String type should be handled in CPU memory.\n const values = util.getArrayFromDType(dtype, util.sizeFromShape(shape));\n values.fill(value as string);\n return backend.makeTensorInfo(shape, dtype, values);\n } else {\n const program = new FillProgram(shape);\n const uniformData = [{type: 'float32', data: [value as number]}];\n return backend.runWebGPUProgram(program, [], dtype, uniformData);\n }\n}\n\nexport const fillConfig: KernelConfig = {\n kernelName: Fill,\n backendName: 'webgpu',\n kernelFunc: fill as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {getMainHeaderAndGlobalIndexString} from './shader_preprocessor';\nimport {computeDispatch, flatDispatchLayout} from './webgpu_util';\n\nimport {WebGPUProgram} from './webgpu_program';\n\nexport class FlipLeftRightProgram implements WebGPUProgram {\n outputShape: number[] = [];\n shaderKey: string;\n dispatchLayout: {x: number[]};\n dispatch: [number, number, number];\n variableNames = ['x'];\n workGroupSize: [number, number, number] = [64, 1, 1];\n size = true;\n\n constructor(imageShape: [number, number, number, number]) {\n this.outputShape = imageShape;\n this.dispatchLayout = flatDispatchLayout(this.outputShape);\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize);\n this.shaderKey = 'flipLeftRight';\n }\n\n getUserCode(): string {\n const userCode = `\n ${getMainHeaderAndGlobalIndexString()}\n if (index < uniforms.size) {\n let coords = getCoordsFromIndex(index);\n let coordX = uniforms.xShape[2] - coords[2] - 1;\n let outputValue = getX(coords[0], coords[1], coordX, coords[3]);\n setOutputAtIndex(index, outputValue);\n }\n }\n `;\n return userCode;\n }\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Tensor4D} from '@tensorflow/tfjs-core';\nimport {FlipLeftRight, FlipLeftRightInputs} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {FlipLeftRightProgram} from '../flip_left_right_webgpu';\n\nexport const flipLeftRightConfig: KernelConfig = {\n kernelName: FlipLeftRight,\n backendName: 'webgpu',\n kernelFunc: ({inputs, backend}) => {\n const {image} = inputs as FlipLeftRightInputs;\n const webgpuBackend = backend as WebGPUBackend;\n\n const program = new FlipLeftRightProgram((image as Tensor4D).shape);\n const output =\n webgpuBackend.runWebGPUProgram(program, [image], image.dtype);\n return output;\n }\n};\n","\n/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Floor, KernelConfig} from '@tensorflow/tfjs-core';\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {floorImplCPU} from '../kernel_utils/shared';\nimport {UnaryOpType} from '../unary_op_util';\n\nexport const floor =\n unaryKernelFunc({opType: UnaryOpType.FLOOR, cpuKernelImpl: floorImplCPU});\n\nexport const floorConfig: KernelConfig = {\n kernelName: Floor,\n backendName: 'webgpu',\n kernelFunc: floor\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {FloorDiv, KernelConfig} from '@tensorflow/tfjs-core';\nimport {binaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {BinaryOpType} from '../binary_op_util';\n\nexport const floorDiv =\n binaryKernelFunc({opSnippet: BinaryOpType.INT_DIV, dtype: 'int32'});\n\nexport const floorDivConfig: KernelConfig = {\n kernelName: FloorDiv,\n backendName: 'webgpu',\n kernelFunc: floorDiv\n};\n","/**\n * @license\n * Copyright 2022 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {getMainHeaderAndGlobalIndexString} from './shader_preprocessor';\nimport {WebGPUProgram} from './webgpu_program';\nimport {computeDispatch, flatDispatchLayout} from './webgpu_util';\n\nexport class FromPixelsProgram implements WebGPUProgram {\n outputShape: number[] = [0];\n shaderKey: string;\n workPerThread: number;\n dispatchLayout: {x: number[]};\n variableNames: string[] = [];\n dispatch: [number, number, number];\n workGroupSize: [number, number, number] =\n [256, 1, 1]; // The empirical value.\n\n useImport: boolean;\n\n constructor(outputShape: number[], useImport = false) {\n this.outputShape = outputShape;\n this.dispatchLayout = flatDispatchLayout(this.outputShape);\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize);\n\n this.useImport = useImport;\n this.shaderKey = `fromPixels_${this.useImport}`;\n }\n\n getUserCode(): string {\n const textureLoad = this.useImport ?\n 'textureLoad(src, vec2(coords.yx));' :\n 'textureLoad(src, vec2(coords.yx), 0)';\n const textureType = this.useImport ? 'texture_external' : 'texture_2d';\n return `\n @binding(1) @group(0) var src: ${textureType};\n\n ${getMainHeaderAndGlobalIndexString()}\n let flatIndexBase = index * uniforms.numChannels;\n for (var i = 0; i < uniforms.numChannels; i = i + 1) {\n let flatIndex = flatIndexBase + i;\n if (flatIndex < uniforms.size) {\n let coords = getCoordsFromIndex(flatIndexBase);\n let values = ${textureLoad};\n result[flatIndex] = i32(floor(255.0 * values[i]));\n }\n }\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use backend file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {env, KernelConfig, KernelFunc} from '@tensorflow/tfjs-core';\nimport {FromPixels, FromPixelsAttrs, FromPixelsInputs, util} from '@tensorflow/tfjs-core';\nimport {backend_util, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {FromPixelsProgram} from '../from_pixels_webgpu';\n\ntype ExternalImage = HTMLCanvasElement|ImageBitmap|OffscreenCanvas;\n\nexport const fromPixelsConfig: KernelConfig = {\n kernelName: FromPixels,\n backendName: 'webgpu',\n kernelFunc: fromPixels as {} as KernelFunc,\n};\n\nlet fromPixels2DContext: CanvasRenderingContext2D;\n\nexport function fromPixels(args: {\n inputs: FromPixelsInputs,\n backend: WebGPUBackend,\n attrs: FromPixelsAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n let {pixels} = inputs;\n const {numChannels} = attrs;\n\n if (pixels == null) {\n throw new Error('pixels passed to tf.browser.fromPixels() can not be null');\n }\n\n const isVideo = typeof (HTMLVideoElement) !== 'undefined' &&\n pixels instanceof HTMLVideoElement;\n const isImage = typeof (HTMLImageElement) !== 'undefined' &&\n pixels instanceof HTMLImageElement;\n const isCanvas = (typeof (HTMLCanvasElement) !== 'undefined' &&\n pixels instanceof HTMLCanvasElement) ||\n (typeof (OffscreenCanvas) !== 'undefined' &&\n pixels instanceof OffscreenCanvas);\n const isImageBitmap =\n typeof (ImageBitmap) !== 'undefined' && pixels instanceof ImageBitmap;\n\n const [width, height] = isVideo ?\n [\n (pixels as HTMLVideoElement).videoWidth,\n (pixels as HTMLVideoElement).videoHeight\n ] :\n [pixels.width, pixels.height];\n const outShape = [height, width, numChannels];\n\n if (env().getBool('WEBGPU_USE_IMPORT')) {\n if (isVideo) {\n return fromPixelsExternalImage({\n externalImage: pixels as HTMLVideoElement,\n backend,\n attrs,\n outShape,\n useImport: true\n });\n }\n }\n\n if (isVideo || isImage) {\n if (fromPixels2DContext == null) {\n fromPixels2DContext = document.createElement('canvas').getContext('2d');\n }\n fromPixels2DContext.canvas.width = width;\n fromPixels2DContext.canvas.height = height;\n fromPixels2DContext.drawImage(\n pixels as HTMLVideoElement | HTMLImageElement, 0, 0, width, height);\n pixels = fromPixels2DContext.canvas;\n }\n\n if (isImageBitmap || isCanvas || isVideo || isImage) {\n return fromPixelsExternalImage({\n externalImage: pixels as HTMLCanvasElement | ImageBitmap,\n backend,\n attrs,\n outShape,\n useImport: false\n });\n }\n\n // TODO: Encoding should happen on GPU once we no longer have to download\n // image data to the CPU.\n const imageData = (pixels as ImageData | backend_util.PixelData).data;\n let pixelArray = imageData;\n if (numChannels != null && numChannels !== 4) {\n pixelArray = new Uint8Array(pixels.width * pixels.height * numChannels);\n\n const dataLength = imageData.length;\n let j = 0;\n for (let i = 0; i < dataLength; i++) {\n if (i % 4 < numChannels) {\n pixelArray[j++] = imageData[i];\n }\n }\n }\n\n const output = backend.makeTensorInfo(outShape, 'int32');\n\n const info = backend.tensorMap.get(output.dataId);\n info.values = new Int32Array(pixelArray);\n backend.maybeReleaseBuffer(output.dataId);\n\n backend.uploadToGPU(output.dataId);\n return output;\n}\n\nfunction fromPixelsExternalImage(args: {\n externalImage: ExternalImage|HTMLVideoElement,\n backend: WebGPUBackend,\n attrs: FromPixelsAttrs,\n outShape: number[],\n useImport: boolean\n}): TensorInfo {\n const {externalImage, backend, attrs, outShape, useImport} = args;\n const {numChannels} = attrs;\n\n const size = util.sizeFromShape(outShape);\n const strides = util.computeStrides(outShape);\n const program = new FromPixelsProgram(outShape, useImport);\n\n const uniformData = [\n {type: 'uint32', data: [size]}, {type: 'uint32', data: [numChannels]},\n {type: 'uint32', data: [...strides]},\n {type: 'uint32', data: [...program.dispatch]}\n ];\n\n const output = backend.runFromPixelsProgram(\n program, outShape, uniformData, useImport, externalImage);\n return output;\n}\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util} from '@tensorflow/tfjs-core';\n\nimport {getMainHeaderAndGlobalIndexString} from './shader_preprocessor';\nimport {WebGPUProgram} from './webgpu_program';\nimport {computeDispatch, flatDispatchLayout} from './webgpu_util';\n\nexport class BatchNormProgram implements WebGPUProgram {\n outputShape: number[];\n shaderKey: string;\n dispatchLayout: {x: number[], y?: number[], z?: number[]};\n dispatch: [number, number, number];\n variableNames: string[];\n uniforms = 'varianceEpsilon : f32,';\n // This is an experimental value.\n workGroupSize: [number, number, number] = [128, 1, 1];\n offsetShape: number[]|null;\n scaleShape: number[]|null;\n varianceEpsilon: number;\n size = true;\n\n constructor(\n xShape: number[], meanShape: number[], varianceShape: number[],\n offsetShape: number[]|null, scaleShape: number[]|null) {\n this.variableNames = ['x', 'mean', 'variance'];\n backend_util.assertAndGetBroadcastShape(xShape, meanShape);\n backend_util.assertAndGetBroadcastShape(xShape, varianceShape);\n this.outputShape = xShape;\n this.dispatchLayout = flatDispatchLayout(this.outputShape);\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize);\n\n if (offsetShape != null) {\n backend_util.assertAndGetBroadcastShape(xShape, offsetShape);\n this.variableNames.push('offset');\n }\n if (scaleShape != null) {\n backend_util.assertAndGetBroadcastShape(xShape, scaleShape);\n this.variableNames.push('scale');\n }\n this.offsetShape = offsetShape;\n this.scaleShape = scaleShape;\n this.shaderKey = 'batchNorm';\n }\n\n getUserCode(): string {\n let offsetSnippet = '0.0';\n if (this.offsetShape != null) {\n offsetSnippet = 'getOffsetByOutputIndex(index)';\n }\n\n let scaleSnippet = '1.0';\n if (this.scaleShape != null) {\n scaleSnippet = 'getScaleByOutputIndex(index)';\n }\n\n const userCode = `\n ${getMainHeaderAndGlobalIndexString()}\n if (index < uniforms.size)\n {\n let xValue = getXByOutputIndex(index);\n let meanValue = getMeanByOutputIndex(index);\n let varianValue = getVarianceByOutputIndex(index);\n let offsetValue = ${offsetSnippet};\n let scaleValue = ${scaleSnippet};\n let inv = scaleValue * inverseSqrt(varianValue + f32(uniforms.varianceEpsilon));\n setOutputAtIndex(index,dot(vec3(xValue, -meanValue, offsetValue), vec3(inv, inv, 1.0)));\n }\n }\n `;\n return userCode;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {FusedBatchNorm, FusedBatchNormAttrs, FusedBatchNormInputs, KernelConfig, Tensor} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\n\nimport {BatchNormProgram} from '../batchnorm_webgpu';\n\nexport const fusedBatchNormConfig: KernelConfig = {\n kernelName: FusedBatchNorm,\n backendName: 'webgpu',\n kernelFunc: ({inputs, attrs, backend}) => {\n const {x, scale, offset, mean, variance} = inputs as FusedBatchNormInputs;\n const {varianceEpsilon} = attrs as unknown as FusedBatchNormAttrs;\n const webGPUBackend = backend as WebGPUBackend;\n const batchNormInputs = [x as Tensor, mean as Tensor, variance as Tensor];\n let offsetShape = null;\n if (offset != null) {\n offsetShape = offset.shape;\n batchNormInputs.push(offset as Tensor);\n }\n let scaleShape = null;\n if (scale != null) {\n scaleShape = scale.shape;\n batchNormInputs.push(scale as Tensor);\n }\n const program = new BatchNormProgram(\n x.shape, mean.shape, variance.shape, offsetShape, scaleShape);\n const uniformData = [{type: 'float32', data: [varianceEpsilon]}];\n return webGPUBackend.runWebGPUProgram(\n program, batchNormInputs, x.dtype, uniformData);\n }\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, FusedConv2D, FusedConv2DAttrs, FusedConv2DInputs, KernelConfig, KernelFunc} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\n\nimport {conv2DImpl} from './Conv2D_impl';\n\nexport function fusedConv2d(args: {\n inputs: FusedConv2DInputs,\n attrs: FusedConv2DAttrs,\n backend: WebGPUBackend\n}) {\n const {inputs, backend, attrs} = args;\n const {x, filter, bias, preluActivationWeights} = inputs;\n const {\n strides,\n pad,\n dataFormat,\n dilations,\n dimRoundingMode,\n activation,\n leakyreluAlpha\n } = attrs;\n\n const $dataFormat = backend_util.convertConv2DDataFormat(dataFormat);\n const convInfo = backend_util.computeConv2DInfo(\n x.shape as [number, number, number, number],\n filter.shape as [number, number, number, number], strides, dilations, pad,\n dimRoundingMode, false /* depthwise */, $dataFormat);\n\n return conv2DImpl({x, filter, convInfo, backend, bias, preluActivationWeights,\n leakyreluAlpha, activation});\n}\n\nexport const fusedConv2DConfig: KernelConfig = {\n kernelName: FusedConv2D,\n backendName: 'webgpu',\n kernelFunc: fusedConv2d as {} as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, FusedDepthwiseConv2D, FusedDepthwiseConv2DAttrs, FusedDepthwiseConv2DInputs, KernelConfig, KernelFunc, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {DepthwiseConv2D3x3Program} from '../depthwise_conv2d_3x3_webgpu';\nimport {DepthwiseConv2DProgram} from '../depthwise_conv2d_webgpu';\n\nexport function fusedDepthwiseConv2D(args: {\n inputs: FusedDepthwiseConv2DInputs,\n attrs: FusedDepthwiseConv2DAttrs,\n backend: WebGPUBackend\n}) {\n const {inputs, backend, attrs} = args;\n const {x, filter, bias, preluActivationWeights} = inputs;\n const {strides, pad, dilations, dimRoundingMode, activation, leakyreluAlpha} =\n attrs;\n\n let $dilations = dilations;\n if ($dilations == null) {\n $dilations = [1, 1];\n }\n\n util.assert(\n backend_util.eitherStridesOrDilationsAreOne(strides, $dilations),\n () => 'Error in depthwiseConv2d: Either strides or dilations must be ' +\n `1. Got strides ${strides} and dilations '${$dilations}'`);\n\n const convInfo = backend_util.computeConv2DInfo(\n x.shape as [number, number, number, number],\n filter.shape as [number, number, number, number], strides, $dilations,\n pad, dimRoundingMode, true /* depthwise */);\n\n const programInputs: TensorInfo[] = [x, filter];\n\n const hasBias = bias != null;\n const hasPreluActivationWeights = preluActivationWeights != null;\n\n if (hasBias) {\n programInputs.push(bias);\n }\n if (hasPreluActivationWeights) {\n programInputs.push(preluActivationWeights);\n }\n\n const dimensions = [\n {type: 'int32', data: [convInfo.padInfo.top, convInfo.padInfo.left]},\n {type: 'int32', data: [convInfo.strideHeight, convInfo.strideWidth]},\n {type: 'int32', data: [convInfo.dilationHeight, convInfo.dilationWidth]},\n {type: 'int32', data: [convInfo.inHeight, convInfo.inWidth]}\n ];\n\n let program: DepthwiseConv2DProgram|DepthwiseConv2D3x3Program;\n // TODO: To see if we need to relax the limitation. Currently, it's only for\n // filter size 3x3.\n if (convInfo.batchSize === 1 && convInfo.inHeight === convInfo.outHeight &&\n convInfo.inWidth === convInfo.outWidth && convInfo.strideHeight === 1 &&\n convInfo.strideWidth === 1 &&\n convInfo.filterHeight === convInfo.filterWidth &&\n convInfo.inChannels === convInfo.outChannels &&\n convInfo.dilationHeight === 1 && convInfo.dilationWidth === 1 &&\n convInfo.filterHeight === 3 && convInfo.inChannels % 4 === 0) {\n program = new DepthwiseConv2D3x3Program(\n convInfo, hasBias, activation, hasPreluActivationWeights);\n } else {\n program = new DepthwiseConv2DProgram(\n convInfo, hasBias, activation, hasPreluActivationWeights);\n dimensions.push(\n {type: 'int32', data: [convInfo.filterHeight]},\n {type: 'int32', data: [convInfo.filterWidth]},\n {type: 'int32', data: [convInfo.outChannels / convInfo.inChannels]});\n }\n if (activation === 'leakyrelu') {\n dimensions.push({type: 'float32', data: [leakyreluAlpha]});\n program.uniforms += ' alpha : f32,';\n }\n const result =\n backend.runWebGPUProgram(program, programInputs, 'float32', dimensions);\n\n return result;\n}\n\nexport const fusedDepthwiseConv2DConfig: KernelConfig = {\n kernelName: FusedDepthwiseConv2D,\n backendName: 'webgpu',\n kernelFunc: fusedDepthwiseConv2D as {} as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {getCoordsDataType, getMainHeaderAndGlobalIndexString} from './shader_preprocessor';\nimport {WebGPUProgram} from './webgpu_program';\nimport {computeDispatch, flatDispatchLayout} from './webgpu_util';\n\nexport class GatherNDProgram implements WebGPUProgram {\n outputShape: number[];\n shaderKey: string;\n dispatchLayout: {x: number[]};\n dispatch: [number, number, number];\n variableNames: string[] = ['A', 'indices'];\n uniforms: string;\n workGroupSize: [number, number, number] = [64, 1, 1];\n size = true;\n sliceDim: number;\n constructor(sliceDim: number, shape: number[]) {\n this.outputShape = shape;\n this.dispatchLayout = flatDispatchLayout(this.outputShape);\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize);\n this.shaderKey = `gathernd_${sliceDim}`;\n this.sliceDim = sliceDim;\n this.uniforms = `sliceDim : i32, strides : ${getCoordsDataType(sliceDim)},`;\n }\n\n getUserCode(): string {\n let strideString;\n if (this.sliceDim > 1) {\n strideString = 'uniforms.strides[j]';\n } else {\n strideString = 'uniforms.strides';\n }\n const userCode = `\n ${getMainHeaderAndGlobalIndexString()}\n if (index < uniforms.size) {\n let coords = getCoordsFromIndex(index);\n var flattenIndex = 0;\n for (var j = 0; j < uniforms.sliceDim; j = j + 1) {\n let indexTemp = i32(round(getIndices(coords[0], j)));\n let strideNum = ${strideString};\n flattenIndex = flattenIndex + indexTemp * strideNum;\n }\n\n setOutputAtIndex(index, getA(flattenIndex, coords[1]));\n }\n }\n `;\n return userCode;\n }\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, GatherNd, GatherNdInputs, KernelConfig, KernelFunc, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {gatherNdImplCPU} from '../kernel_utils/shared';\n\nimport {GatherNDProgram} from '../gather_nd_webgpu';\nimport {reshape} from './Reshape';\n\nexport function gatherNd(\n args: {inputs: GatherNdInputs, backend: WebGPUBackend}): TensorInfo {\n const {inputs, backend} = args;\n const {params, indices} = inputs;\n\n const indicesShape = indices.shape;\n const sliceRank = indicesShape[indicesShape.length - 1];\n const paramsSize = util.sizeFromShape(params.shape);\n\n const [resultShape, numSlices, sliceSize, strides] =\n backend_util.prepareAndValidate(params, indices);\n\n const flattenIndices = reshape(\n {inputs: {x: indices}, backend, attrs: {shape: [numSlices, sliceRank]}});\n const flattenX = reshape({\n inputs: {x: params},\n backend,\n attrs: {shape: [(util.sizeFromShape(params.shape) / sliceSize), sliceSize]}\n });\n if (backend.shouldExecuteOnCPU([params, indices]) ||\n params.dtype === 'string') {\n const indicesData = backend.readSync(indices.dataId) as TypedArray;\n const paramsBuf = backend.bufferSync(params);\n const outValue = gatherNdImplCPU(\n indicesData, paramsBuf, params.dtype, numSlices, sliceRank, sliceSize,\n strides, params.shape, paramsSize);\n\n return backend.makeTensorInfo(resultShape, params.dtype, outValue.values);\n }\n const program = new GatherNDProgram(sliceRank, [numSlices, sliceSize]);\n const uniformData =\n [{type: 'int32', data: [sliceRank]}, {type: 'int32', data: strides}];\n const res = backend.runWebGPUProgram(\n program, [flattenX, flattenIndices], flattenX.dtype, uniformData);\n\n const reshaped =\n reshape({inputs: {x: res}, backend, attrs: {shape: resultShape}});\n\n backend.disposeData(flattenIndices.dataId);\n backend.disposeData(flattenX.dataId);\n backend.disposeData(res.dataId);\n\n return reshaped;\n}\n\nexport const gatherNdConfig: KernelConfig = {\n kernelName: GatherNd,\n backendName: 'webgpu',\n kernelFunc: gatherNd as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {getMainHeaderAndGlobalIndexString} from './shader_preprocessor';\nimport {computeDispatch, flatDispatchLayout} from './webgpu_util';\n\nimport {WebGPUProgram} from './webgpu_program';\n\nexport class GatherProgram implements WebGPUProgram {\n outputShape: number[];\n shaderKey: string;\n dispatchLayout: {x: number[]};\n dispatch: [number, number, number];\n variableNames: string[] = ['A', 'indices'];\n workGroupSize: [number, number, number] = [64, 1, 1];\n aShape: number[];\n size = true;\n\n constructor(aShape: number[], outputShape: number[]) {\n this.outputShape = aShape.slice();\n this.aShape = aShape;\n this.outputShape = outputShape;\n this.dispatchLayout = flatDispatchLayout(this.outputShape);\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize);\n this.shaderKey = `gather`;\n }\n\n getUserCode(): string {\n const sourceCoords = getSourceCoords(this.aShape);\n const userCode = `\n ${getMainHeaderAndGlobalIndexString()}\n if (index < uniforms.size) {\n let resRC = getCoordsFromIndex(index);\n let indexZ = i32(getIndices(resRC.x, resRC.z));\n let inBounds = select(0.0, 1.0, indexZ >= 0 && indexZ < uniforms.aShape[2]);\n setOutputAtIndex(index, inBounds * getA(${sourceCoords}));\n }\n }\n `;\n return userCode;\n }\n}\n\n// The input and output are always flattened into rank 4 tensors.\nfunction getSourceCoords(aShape: number[]): string {\n const currentCoords = ['resRC.x', 'resRC.y', 'resRC.z', 'resRC.w'];\n const sourceCoords = [];\n for (let i = 0; i < aShape.length; i++) {\n if (i === 2) {\n sourceCoords.push('indexZ');\n } else {\n sourceCoords.push(`${currentCoords[i]}`);\n }\n }\n return sourceCoords.join();\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, buffer, GatherV2, GatherV2Attrs, GatherV2Inputs, KernelConfig, KernelFunc, Rank, TensorBuffer, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {gatherV2ImplCPU} from '../kernel_utils/shared';\n\nimport {GatherProgram} from '../gather_webgpu';\nimport {reshape} from './Reshape';\n\nexport function gatherV2(\n args:\n {inputs: GatherV2Inputs, backend: WebGPUBackend, attrs: GatherV2Attrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x, indices} = inputs;\n const {axis, batchDims} = attrs;\n\n // Unlike WebGL, WebGPU won't check if index is out of bound by calling\n // backend.readSync() function in debug mode.\n const parsedAxis = util.parseAxisParam(axis, x.shape)[0];\n\n const shapeInfo = backend_util.segment_util.collectGatherOpShapeInfo(\n x, indices, parsedAxis, batchDims);\n\n const indicesSize = util.sizeFromShape(indices.shape);\n\n const toDispose = [];\n\n const flattenX = reshape({\n inputs: {x},\n backend,\n attrs: {\n shape: [\n shapeInfo.batchSize, shapeInfo.outerSize, shapeInfo.dimSize,\n shapeInfo.sliceSize\n ]\n }\n });\n\n const flattenIndex = reshape({\n inputs: {x: indices},\n backend,\n attrs: {shape: [shapeInfo.batchSize, indicesSize / shapeInfo.batchSize]}\n });\n\n toDispose.push(flattenX);\n toDispose.push(flattenIndex);\n\n const flattenOutputShape = [\n shapeInfo.batchSize, shapeInfo.outerSize, indicesSize / shapeInfo.batchSize,\n shapeInfo.sliceSize\n ];\n\n if (backend.shouldExecuteOnCPU([x, indices])) {\n const indicesBufferInfo = backend.tensorMap.get(flattenIndex.dataId);\n const indicesValues = indicesBufferInfo.values as TypedArray;\n const indicesBuf =\n buffer(flattenIndex.shape, flattenIndex.dtype, indicesValues) as\n TensorBuffer;\n const xBufferInfo = backend.tensorMap.get(flattenX.dataId);\n const xValues = xBufferInfo.values as TypedArray;\n const xBuf =\n buffer(flattenX.shape, flattenX.dtype, xValues) as TensorBuffer;\n const outBuf = gatherV2ImplCPU(xBuf, indicesBuf, flattenOutputShape);\n\n toDispose.forEach(t => backend.disposeData(t.dataId));\n\n return backend.makeTensorInfo(\n shapeInfo.outputShape, outBuf.dtype, outBuf.values as TypedArray);\n }\n\n const program = new GatherProgram(flattenX.shape, flattenOutputShape);\n const res = backend.runWebGPUProgram(\n program, [flattenX, flattenIndex], flattenX.dtype);\n toDispose.push(res);\n\n const reshaped = reshape(\n {inputs: {x: res}, backend, attrs: {shape: shapeInfo.outputShape}});\n toDispose.forEach(t => backend.disposeData(t.dataId));\n return reshaped;\n}\n\nexport const gatherV2Config: KernelConfig = {\n kernelName: GatherV2,\n backendName: 'webgpu',\n kernelFunc: gatherV2 as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Greater, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {binaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {greaterImplCPU as cpuGreater} from '../kernel_utils/shared';\n\nimport {BinaryOpType} from '../binary_op_util';\n\nexport const greater = binaryKernelFunc({\n opSnippet: BinaryOpType.GREATER,\n cpuKernelImpl: cpuGreater,\n dtype: 'bool',\n});\n\nexport const greaterConfig: KernelConfig = {\n kernelName: Greater,\n backendName: 'webgpu',\n kernelFunc: greater\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GreaterEqual, KernelConfig} from '@tensorflow/tfjs-core';\nimport {binaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {greaterEqualImplCPU as cpuGreaterEqual} from '../kernel_utils/shared';\nimport {BinaryOpType} from '../binary_op_util';\n\nexport const greaterEqual = binaryKernelFunc({\n opSnippet: BinaryOpType.GREATER_EQUAL,\n dtype: 'bool',\n cpuKernelImpl: cpuGreaterEqual\n});\n\nexport const greaterEqualConfig: KernelConfig = {\n kernelName: GreaterEqual,\n backendName: 'webgpu',\n kernelFunc: greaterEqual\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, LeakyRelu, LeakyReluAttrs, LeakyReluInputs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {UnaryOpType} from '../unary_op_util';\nimport {UnaryOpProgram} from '../unary_op_webgpu';\n\nexport function leakyRelu(args: {\n inputs: LeakyReluInputs,\n backend: WebGPUBackend,\n attrs: LeakyReluAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {alpha} = attrs;\n const uniformData = [{type: 'float32', data: [alpha]}];\n const program = new UnaryOpProgram(x.shape, UnaryOpType.LEAKYRELU);\n program.uniforms = 'alpha : f32,';\n return backend.runWebGPUProgram(program, [x], 'float32', uniformData);\n}\n\nexport const leakyReluConfig: KernelConfig = {\n kernelName: LeakyRelu,\n backendName: 'webgpu',\n kernelFunc: leakyRelu as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Less} from '@tensorflow/tfjs-core';\nimport {binaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {lessImplCPU as cpuLess} from '../kernel_utils/shared';\nimport {BinaryOpType} from '../binary_op_util';\n\nexport const less = binaryKernelFunc(\n {opSnippet: BinaryOpType.LESS, dtype: 'bool', cpuKernelImpl: cpuLess});\n\nexport const lessConfig: KernelConfig = {\n kernelName: Less,\n backendName: 'webgpu',\n kernelFunc: less\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, LessEqual} from '@tensorflow/tfjs-core';\nimport {binaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {lessEqualImplCPU as cpuLessEqual} from '../kernel_utils/shared';\nimport {BinaryOpType} from '../binary_op_util';\n\nexport const lessEqual = binaryKernelFunc({\n opSnippet: BinaryOpType.LESS_EQUAL,\n dtype: 'bool',\n cpuKernelImpl: cpuLessEqual\n});\n\nexport const lessEqualConfig: KernelConfig = {\n kernelName: LessEqual,\n backendName: 'webgpu',\n kernelFunc: lessEqual\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Log} from '@tensorflow/tfjs-core';\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {logImplCPU} from '../kernel_utils/shared';\nimport {UnaryOpType} from '../unary_op_util';\n\nexport const log =\n unaryKernelFunc({opType: UnaryOpType.LOG, cpuKernelImpl: logImplCPU});\n\nexport const logConfig: KernelConfig = {\n kernelName: Log,\n backendName: 'webgpu',\n kernelFunc: log\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, LogicalAnd} from '@tensorflow/tfjs-core';\n\nimport {binaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\n\nimport {BinaryOpType} from '../binary_op_util';\n\nexport const logicalAnd = binaryKernelFunc({\n opSnippet: BinaryOpType.LOGICAL_AND,\n dtype: 'bool'\n});\n\nexport const logicalAndConfig: KernelConfig = {\n kernelName: LogicalAnd,\n backendName: 'webgpu',\n kernelFunc: logicalAnd\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, LogicalNot} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\n\nimport {UnaryOpType} from '../unary_op_util';\n\nexport const logicalNot = unaryKernelFunc({opType: UnaryOpType.LOGICAL_NOT});\n\nexport const logicalNotConfig: KernelConfig = {\n kernelName: LogicalNot,\n backendName: 'webgpu',\n kernelFunc: logicalNot\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, Max, MaxAttrs, MaxInputs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {reduce} from '../kernel_utils/reduce';\n\nexport function max(\n args: {inputs: MaxInputs, backend: WebGPUBackend, attrs: MaxAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {reductionIndices, keepDims} = attrs;\n\n return reduce(x, reductionIndices, keepDims, 'max', backend);\n}\n\nexport const maxConfig: KernelConfig = {\n kernelName: Max,\n backendName: 'webgpu',\n kernelFunc: max as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Maximum} from '@tensorflow/tfjs-core';\n\nimport {binaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {maximumImplCPU as cpuMaximum} from '../kernel_utils/shared';\n\nimport {BinaryOpType} from '../binary_op_util';\n\nexport const maximum = binaryKernelFunc({\n opSnippet: BinaryOpType.MAX,\n cpuKernelImpl: cpuMaximum,\n});\n\nexport const maximumConfig: KernelConfig = {\n kernelName: Maximum,\n backendName: 'webgpu',\n kernelFunc: maximum\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {backend_util, KernelConfig, KernelFunc, MaxPool, MaxPoolAttrs, MaxPoolInputs, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {identity} from './Identity';\nimport {Pool2DProgram} from '../pool2d_webgpu';\nimport {PoolWithFilterSizeEqualsOneProgram} from '../pool_filtersizeone_webgpu';\n\nexport function maxPool(\n args: {inputs: MaxPoolInputs, backend: WebGPUBackend, attrs: MaxPoolAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {filterSize, strides, pad, dimRoundingMode} = attrs;\n const dilations = 1;\n const convInfo = backend_util.computePool2DInfo(\n x.shape as [number, number, number, number], filterSize, strides,\n dilations, pad, dimRoundingMode);\n let program: Pool2DProgram|PoolWithFilterSizeEqualsOneProgram;\n const dimensions = [];\n if (convInfo.filterHeight === 1 && convInfo.filterWidth === 1) {\n if (util.arraysEqual(convInfo.inShape, convInfo.outShape)) {\n return identity({inputs: {x}, backend});\n }\n program = new PoolWithFilterSizeEqualsOneProgram(convInfo);\n dimensions.push(\n {type: 'int32', data: [convInfo.strideHeight, convInfo.strideWidth]});\n } else {\n program = new Pool2DProgram(convInfo, 'max');\n dimensions.push(\n {type: 'int32', data: [convInfo.strideHeight, convInfo.strideWidth]},\n {type: 'int32', data: [convInfo.padInfo.top, convInfo.padInfo.left]}, {\n type: 'int32',\n data: [convInfo.dilationHeight, convInfo.dilationWidth]\n },\n {type: 'int32', data: [convInfo.inHeight, convInfo.inWidth]}, {\n type: 'int32',\n data: [convInfo.effectiveFilterHeight, convInfo.effectiveFilterWidth]\n });\n }\n\n return backend.runWebGPUProgram(program, [x], x.dtype, dimensions);\n}\n\nexport const maxPoolConfig: KernelConfig = {\n kernelName: MaxPool,\n backendName: 'webgpu',\n kernelFunc: maxPool as {} as KernelFunc\n};\n","/**\r\n * @license\r\n * Copyright 2021 Google LLC. All Rights Reserved.\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n * http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n * =============================================================================\r\n */\r\n\r\nimport {KernelConfig, KernelFunc, Mean, MeanAttrs, MeanInputs, TensorInfo} from '@tensorflow/tfjs-core';\r\n\r\nimport {WebGPUBackend} from '../backend_webgpu';\r\nimport {reduce} from '../kernel_utils/reduce';\r\n\r\nexport function mean(\r\n args: {inputs: MeanInputs, attrs: MeanAttrs, backend: WebGPUBackend}):\r\n TensorInfo {\r\n const {inputs, backend, attrs} = args;\r\n const {x} = inputs;\r\n const {keepDims, axis} = attrs;\r\n\r\n return reduce(x, axis, keepDims, 'mean', backend);\r\n}\r\n\r\nexport const meanConfig: KernelConfig = {\r\n kernelName: Mean,\r\n backendName: 'webgpu',\r\n kernelFunc: mean as {} as KernelFunc\r\n};\r\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, Min, MinAttrs, MinInputs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {reduce} from '../kernel_utils/reduce';\n\nexport function min(\n args: {inputs: MinInputs, backend: WebGPUBackend, attrs: MinAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {axis, keepDims} = attrs;\n\n return reduce(x, axis, keepDims, 'min', backend);\n}\n\nexport const minConfig: KernelConfig = {\n kernelName: Min,\n backendName: 'webgpu',\n kernelFunc: min as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Minimum} from '@tensorflow/tfjs-core';\n\nimport {binaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {minimumImplCPU as cpuMinimum} from '../kernel_utils/shared';\n\nimport {BinaryOpType} from '../binary_op_util';\n\nexport const minimum = binaryKernelFunc({\n opSnippet: BinaryOpType.MIN,\n cpuKernelImpl: cpuMinimum,\n});\n\nexport const minimumConfig: KernelConfig = {\n kernelName: Minimum,\n backendName: 'webgpu',\n kernelFunc: minimum\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {getCoordsDataType, getMainHeaderAndGlobalIndexString} from './shader_preprocessor';\nimport {WebGPUProgram} from './webgpu_program';\nimport {computeDispatch, flatDispatchLayout} from './webgpu_util';\n\nexport class MirrorPadProgram implements WebGPUProgram {\n outputShape: number[];\n shaderKey: string;\n uniforms = '';\n dispatchLayout: {x: number[]};\n dispatch: [number, number, number];\n variableNames = ['x'];\n workGroupSize: [number, number, number] = [64, 1, 1];\n xShape: number[];\n offset: number;\n size = true;\n\n constructor(\n xShape: number[], paddings: Array<[number, number]>,\n mode: 'reflect'|'symmetric') {\n this.outputShape = paddings.map(\n (p, i) => p[0] /* beforePad */ + xShape[i] + p[1] /* afterPad */);\n this.dispatchLayout = flatDispatchLayout(this.outputShape);\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize);\n\n this.xShape = xShape;\n paddings.map((_, i) => {\n this.uniforms += ` pad${i} : vec2,`;\n });\n this.offset = mode === 'reflect' ? 0 : 1;\n this.shaderKey = `mirrorPad_${mode}`;\n }\n\n getUserCode(): string {\n const rank = this.xShape.length;\n // The length of paddings are same with the rank of the input tensor.\n const start = this.xShape.map((_, i) => `uniforms.pad${i}[0]`).join(',');\n const end = this.xShape\n .map(\n (_, i) => `uniforms.pad${i}[0] + uniforms.xShape${\n rank > 1 ? `[${i}]` : ''}`)\n .join(',');\n\n const shaderStart = rank === 1 ? 'start' : 'start[i]';\n const shaderEnd = rank === 1 ? 'end' : 'end[i]';\n const shaderOutC = rank === 1 ? 'outC' : 'outC[i]';\n const dtype = getCoordsDataType(rank);\n const unpackedCoords = rank > 1 ?\n ['coords[0]', 'coords[1]', 'coords[2]', 'coords[3]'].slice(0, rank) :\n 'coords';\n\n return `\n ${getMainHeaderAndGlobalIndexString()}\n if (index < uniforms.size) {\n let start = ${dtype}(${start});\n let end = ${dtype}(${end});\n var outC = getCoordsFromIndex(index);\n for (var i = 0; i < ${rank}; i = i + 1) {\n if (${shaderOutC} < ${shaderStart}) {\n ${shaderOutC} = ${shaderStart} * 2 - ${shaderOutC} - ${\n this.offset};\n } else if(${shaderOutC} >= ${shaderEnd}) {\n ${shaderOutC} = (${shaderEnd} - 1) * 2 - ${shaderOutC} + ${\n this.offset};\n }\n }\n let coords = outC - start;\n setOutputAtIndex(index, getX(${unpackedCoords}));\n }\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, MirrorPad, MirrorPadAttrs, MirrorPadInputs} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\n\nimport {MirrorPadProgram} from '../mirror_pad_webgpu';\n\nexport const mirrorPadConfig: KernelConfig = {\n kernelName: MirrorPad,\n backendName: 'webgpu',\n kernelFunc: ({inputs, attrs, backend}) => {\n const {x} = inputs as MirrorPadInputs;\n const {paddings, mode} = attrs as unknown as MirrorPadAttrs;\n const webGPUBackend = backend as WebGPUBackend;\n\n const uniformData = paddings.map(p => {\n return {type: 'int32', data: [p[0], p[1]]};\n });\n const program = new MirrorPadProgram(x.shape, paddings, mode);\n const output =\n webGPUBackend.runWebGPUProgram(program, [x], x.dtype, uniformData);\n\n return output;\n }\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, Neg, NegInputs, TensorInfo, TypedArray} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {negImplCPU} from '../kernel_utils/shared';\n\nimport {UnaryOpType} from '../unary_op_util';\nimport {UnaryOpProgram} from '../unary_op_webgpu';\n\n// This doesn't use unaryKernelFunc because negImplCPU is not of type\n// SimpleUnaryKernelImplCPU.\nexport function neg(args: {inputs: NegInputs, backend: WebGPUBackend}):\n TensorInfo {\n const {inputs, backend} = args;\n const {x} = inputs;\n\n if (backend.shouldExecuteOnCPU([x])) {\n const xData = backend.tensorMap.get(x.dataId);\n const [outValues, newShape] =\n negImplCPU(xData.values as TypedArray, x.shape, x.dtype);\n return backend.makeTensorInfo(newShape, x.dtype, outValues);\n }\n\n const program = new UnaryOpProgram(x.shape, UnaryOpType.NEG);\n\n return backend.runWebGPUProgram(program, [x], x.dtype);\n}\n\nexport const negConfig: KernelConfig = {\n kernelName: Neg,\n backendName: 'webgpu',\n kernelFunc: neg as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {kernel_impls, KernelConfig, KernelFunc, NonMaxSuppressionV3, NonMaxSuppressionV3Attrs, NonMaxSuppressionV3Inputs, TypedArray} from '@tensorflow/tfjs-core';\nimport {WebGPUBackend} from '../backend_webgpu';\n\nexport function nonMaxSuppressionV3(args: {\n inputs: NonMaxSuppressionV3Inputs,\n backend: WebGPUBackend,\n attrs: NonMaxSuppressionV3Attrs\n}) {\n console.warn(\n 'tf.nonMaxSuppression() in webgpu locks the UI thread. ' +\n 'Call tf.nonMaxSuppressionAsync() instead');\n\n const {inputs, backend, attrs} = args;\n const {boxes, scores} = inputs;\n const {maxOutputSize, iouThreshold, scoreThreshold} = attrs;\n\n const boxesVals = backend.readSync(boxes.dataId) as TypedArray;\n const scoresVals = backend.readSync(scores.dataId) as TypedArray;\n\n const {selectedIndices} = kernel_impls.nonMaxSuppressionV3Impl(\n boxesVals, scoresVals, maxOutputSize, iouThreshold, scoreThreshold);\n\n return backend.makeTensorInfo(\n [selectedIndices.length], 'int32', new Int32Array(selectedIndices));\n}\n\nexport const nonMaxSuppressionV3Config: KernelConfig = {\n kernelName: NonMaxSuppressionV3,\n backendName: 'webgpu',\n kernelFunc: nonMaxSuppressionV3 as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {kernel_impls, KernelConfig, KernelFunc, NonMaxSuppressionV5, NonMaxSuppressionV5Attrs, NonMaxSuppressionV5Inputs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nexport type TypedArray = Float32Array|Int32Array|Uint8Array;\n\nexport function nonMaxSuppressionV5(args: {\n inputs: NonMaxSuppressionV5Inputs,\n backend: WebGPUBackend,\n attrs: NonMaxSuppressionV5Attrs\n}): [TensorInfo, TensorInfo] {\n console.warn(\n 'tf.nonMaxSuppression() in webgpu locks the UI thread. ' +\n 'Call tf.nonMaxSuppressionAsync() instead');\n\n const {inputs, backend, attrs} = args;\n const {boxes, scores} = inputs;\n const {maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma} = attrs;\n\n const boxesVals = backend.readSync(boxes.dataId) as TypedArray;\n const scoresVals = backend.readSync(scores.dataId) as TypedArray;\n\n const maxOutputSizeVal = maxOutputSize;\n const iouThresholdVal = iouThreshold;\n const scoreThresholdVal = scoreThreshold;\n const softNmsSigmaVal = softNmsSigma;\n\n const {selectedIndices, selectedScores} =\n kernel_impls.nonMaxSuppressionV5Impl(\n boxesVals, scoresVals, maxOutputSizeVal, iouThresholdVal,\n scoreThresholdVal, softNmsSigmaVal);\n\n return [\n backend.makeTensorInfo(\n [selectedIndices.length], 'int32', new Int32Array(selectedIndices)),\n backend.makeTensorInfo(\n [selectedScores.length], 'float32', new Float32Array(selectedScores))\n ];\n}\n\nexport const nonMaxSuppressionV5Config: KernelConfig = {\n kernelName: NonMaxSuppressionV5,\n backendName: 'webgpu',\n kernelFunc: nonMaxSuppressionV5 as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, TensorInfo, ZerosLike, ZerosLikeInputs} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\n\nimport {complex} from './Complex';\nimport {fill} from './Fill';\nimport {imag} from './Imag';\nimport {real} from './Real';\n\nexport function zerosLike(\n args: {inputs: ZerosLikeInputs, backend: WebGPUBackend}): TensorInfo {\n const {inputs, backend} = args;\n const {x} = inputs;\n if (x.dtype === 'complex64') {\n const realPart = real({inputs: {input: x}, backend});\n const r = zerosLike({inputs: {x: realPart}, backend});\n const imagPart = imag({inputs: {input: x}, backend});\n const i = zerosLike({inputs: {x: imagPart}, backend});\n\n const result = complex({inputs: {real: r, imag: i}, backend});\n\n backend.disposeData(realPart.dataId);\n backend.disposeData(r.dataId);\n backend.disposeData(imagPart.dataId);\n backend.disposeData(i.dataId);\n\n return result;\n } else {\n return fill({\n attrs: {\n shape: x.shape,\n dtype: x.dtype,\n value: x.dtype === 'string' ? '' : 0\n },\n backend\n });\n }\n}\n\nexport const zerosLikeConfig: KernelConfig = {\n kernelName: ZerosLike,\n backendName: 'webgpu',\n kernelFunc: zerosLike as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, OnesLike, OnesLikeInputs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\n\nimport {complex} from './Complex';\nimport {fill} from './Fill';\nimport {imag} from './Imag';\nimport {real} from './Real';\nimport {zerosLike} from './ZerosLike';\n\nexport function onesLike(\n args: {inputs: OnesLikeInputs, backend: WebGPUBackend}): TensorInfo {\n const {inputs, backend} = args;\n const {x} = inputs;\n\n if (x.dtype === 'string') {\n throw new Error('onesLike is not supported under string dtype');\n } else if (x.dtype === 'complex64') {\n const realPart = real({inputs: {input: x}, backend});\n const r = onesLike({inputs: {x: realPart}, backend});\n const imagPart = imag({inputs: {input: x}, backend});\n const i = zerosLike({inputs: {x: imagPart}, backend});\n\n const result = complex({inputs: {real: r, imag: i}, backend});\n\n backend.disposeData(realPart.dataId);\n backend.disposeData(r.dataId);\n backend.disposeData(imagPart.dataId);\n backend.disposeData(i.dataId);\n\n return result;\n } else {\n return fill({attrs: {shape: x.shape, dtype: x.dtype, value: 1}, backend});\n }\n}\n\nexport const onesLikeConfig: KernelConfig = {\n kernelName: OnesLike,\n backendName: 'webgpu',\n kernelFunc: onesLike as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, Pack, PackAttrs, PackInputs, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {concat} from './Concat';\nimport {expandDims} from './ExpandDims';\n\nexport function pack(\n args: {inputs: PackInputs, backend: WebGPUBackend, attrs: PackAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {axis} = attrs;\n\n if (inputs.length === 1) {\n return expandDims(\n {inputs: {input: inputs[0]}, backend, attrs: {dim: axis}});\n }\n\n const shape = inputs[0].shape;\n const dtype = inputs[0].dtype;\n\n inputs.forEach(t => {\n util.assertShapesMatch(\n shape, t.shape,\n 'All tensors passed to stack must have matching shapes');\n util.assert(\n dtype === t.dtype,\n () => 'All tensors passed to stack must have matching dtypes');\n });\n\n const intermediateTensorInfos: TensorInfo[] = [];\n const expandedTensors = inputs.map(t => {\n const expandedT =\n expandDims({inputs: {input: t}, backend, attrs: {dim: axis}});\n intermediateTensorInfos.push(expandedT);\n return expandedT;\n });\n\n const result = concat({inputs: expandedTensors, backend, attrs: {axis}});\n\n intermediateTensorInfos.forEach(t => backend.disposeData(t.dataId));\n\n return result;\n}\n\nexport const packConfig: KernelConfig = {\n kernelName: Pack,\n backendName: 'webgpu',\n kernelFunc: pack as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {getCoordsDataType, getMainHeaderAndGlobalIndexString} from './shader_preprocessor';\nimport {WebGPUProgram} from './webgpu_program';\nimport {computeDispatch, flatDispatchLayout} from './webgpu_util';\n\nexport class PadProgram implements WebGPUProgram {\n outputShape: number[];\n shaderKey: string;\n dispatchLayout: {x: number[]};\n dispatch: [number, number, number];\n variableNames = ['x'];\n uniforms = 'constantValue : f32,';\n workGroupSize: [number, number, number] = [64, 1, 1];\n xShape: number[];\n size = true;\n\n constructor(xShape: number[], paddings: Array<[number, number]>) {\n this.outputShape = paddings.map(\n (p, i) => p[0] /* beforePad */ + xShape[i] + p[1] /* afterPad */);\n this.dispatchLayout = flatDispatchLayout(this.outputShape);\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize);\n paddings.map((_, i) => {\n this.uniforms += ` pad${i} : vec2,`;\n });\n this.xShape = xShape;\n this.shaderKey = 'pad';\n }\n\n getUserCode(): string {\n const rank = this.xShape.length;\n const type = getCoordsDataType(rank);\n // The length of paddings are same with the rank of the input tensor.\n const start = this.xShape.map((_, i) => `uniforms.pad${i}[0]`).join(',');\n const end = this.xShape\n .map(\n (_, i) => `uniforms.pad${i}[0] + uniforms.xShape${\n rank > 1 ? `[${i}]` : ''}`)\n .join(',');\n const startValue = rank > 1 ? `${type}(${start})` : `${start}`;\n const endValue = rank > 1 ? `${type}(${end})` : `${end}`;\n\n const leftPadCondition = rank > 1 ? `any(outC < start)` : `outC < start`;\n const rightPadCondition = rank > 1 ? `any(outC >= end)` : `outC >= end`;\n\n const unpackedCoords = rank > 1 ?\n ['coords[0]', 'coords[1]', 'coords[2]', 'coords[3]'].slice(0, rank) :\n 'coords';\n\n const userCode = `\n ${getMainHeaderAndGlobalIndexString()}\n if (index < uniforms.size) {\n let start = ${startValue};\n let end = ${endValue};\n let outC = getCoordsFromIndex(index);\n\n if (${leftPadCondition} || ${rightPadCondition}) {\n setOutputAtIndex(index, uniforms.constantValue);\n } else {\n let coords = outC - start;\n setOutputAtIndex(index, getX(${unpackedCoords}));\n }\n }\n }\n `;\n return userCode;\n }\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, PadV2, PadV2Attrs, PadV2Inputs, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {identity} from './Identity';\nimport {PadProgram} from '../pad_webgpu';\nimport {fill} from './Fill';\n\nexport const padV2 =\n (args: {inputs: PadV2Inputs,\n backend: WebGPUBackend,\n attrs: PadV2Attrs}): TensorInfo => {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {paddings, constantValue} = attrs;\n if (paddings.every(p => util.arraysEqual(p, [0, 0]))) {\n return identity({inputs: {x}, backend});\n }\n if (util.sizeFromShape(x.shape) === 0) {\n // Short-circuit the computation, since x doesn't have value, only\n // the shape is used to compute output shape to pad.\n const outputShape = paddings.map(\n (p, i) =>\n p[0] /* beforePad */ + x.shape[i] + p[1] /* afterPad */);\n return fill({\n backend,\n attrs: {shape: outputShape, value: constantValue, dtype: x.dtype}\n });\n }\n const uniformData = [{type: 'float32', data: [constantValue]}];\n paddings.map(p => uniformData.push({type: 'int32', data: [p[0], p[1]]}));\n const program = new PadProgram(x.shape, paddings);\n return backend.runWebGPUProgram(program, [x], x.dtype, uniformData);\n };\n\nexport const padV2Config: KernelConfig = {\n kernelName: PadV2,\n backendName: 'webgpu',\n kernelFunc: padV2 as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Pow} from '@tensorflow/tfjs-core';\n\nimport {binaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {BinaryOpType} from '../binary_op_util';\n\nexport const pow = binaryKernelFunc({\n opSnippet: BinaryOpType.POW,\n});\n\nexport const powConfig: KernelConfig = {\n kernelName: Pow,\n backendName: 'webgpu',\n kernelFunc: pow\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, Prelu, PreluInputs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\n\nimport {BinaryOpType} from '../binary_op_util';\nimport {BinaryOpProgram} from '../binary_op_webgpu';\n\nexport function prelu(args: {inputs: PreluInputs, backend: WebGPUBackend}):\n TensorInfo {\n const {inputs, backend} = args;\n const {x, alpha} = inputs;\n\n const program = new BinaryOpProgram(BinaryOpType.PRELU, x.shape, alpha.shape);\n return backend.runWebGPUProgram(program, [x, alpha], 'float32');\n}\n\nexport const preluConfig: KernelConfig = {\n kernelName: Prelu,\n backendName: 'webgpu',\n kernelFunc: prelu as {} as KernelFunc\n};\n","/**\r\n * @license\r\n * Copyright 2021 Google LLC. All Rights Reserved.\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n * http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n * =============================================================================\r\n */\r\n\r\nimport {KernelConfig, KernelFunc, Prod, ProdAttrs, ProdInputs, TensorInfo} from '@tensorflow/tfjs-core';\r\n\r\nimport {WebGPUBackend} from '../backend_webgpu';\r\nimport {reduce} from '../kernel_utils/reduce';\r\n\r\nexport function prod(\r\n args: {inputs: ProdInputs, backend: WebGPUBackend, attrs: ProdAttrs}):\r\n TensorInfo {\r\n const {inputs, backend, attrs} = args;\r\n const {x} = inputs;\r\n const {axis, keepDims} = attrs;\r\n\r\n return reduce(x, axis, keepDims, 'prod', backend);\r\n}\r\n\r\nexport const prodConfig: KernelConfig = {\r\n kernelName: Prod,\r\n backendName: 'webgpu',\r\n kernelFunc: prod as {} as KernelFunc\r\n};\r\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, Range, RangeAttrs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {rangeImplCPU} from '../kernel_utils/shared';\n\nexport const range =\n (args: {backend: WebGPUBackend, attrs: RangeAttrs}): TensorInfo => {\n const {backend, attrs} = args;\n const {start, stop, step, dtype} = attrs;\n const values = rangeImplCPU(start, stop, step, dtype);\n return backend.makeTensorInfo([values.length], dtype, values);\n };\n\nexport const rangeConfig: KernelConfig = {\n kernelName: Range,\n backendName: 'webgpu',\n kernelFunc: range as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, RealDiv} from '@tensorflow/tfjs-core';\n\nimport {binaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\n\nimport {BinaryOpType} from '../binary_op_util';\n\nexport const realDiv = binaryKernelFunc({opSnippet: BinaryOpType.DIV});\n\nexport const realDivConfig: KernelConfig = {\n kernelName: RealDiv,\n backendName: 'webgpu',\n kernelFunc: realDiv as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Relu} from '@tensorflow/tfjs-core';\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {UnaryOpType} from '../unary_op_util';\n\nexport const relu = unaryKernelFunc({opType: UnaryOpType.RELU});\n\nexport const reluConfig: KernelConfig = {\n kernelName: Relu,\n backendName: 'webgpu',\n kernelFunc: relu\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Relu6} from '@tensorflow/tfjs-core';\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {UnaryOpType} from '../unary_op_util';\n\nexport const relu6 = unaryKernelFunc({opType: UnaryOpType.RELU6});\n\nexport const relu6Config: KernelConfig = {\n kernelName: Relu6,\n backendName: 'webgpu',\n kernelFunc: relu6\n};\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {getMainHeaderAndGlobalIndexString} from './shader_preprocessor';\nimport {WebGPUProgram} from './webgpu_program';\nimport {computeDispatch, flatDispatchLayout} from './webgpu_util';\n\nexport class ResizeBilinearProgram implements WebGPUProgram {\n outputShape: number[];\n shaderKey: string;\n dispatchLayout: {x: number[]};\n dispatch: [number, number, number];\n variableNames = ['x'];\n uniforms = 'adjustHeightWidth : vec2, halfPixelCenters : f32,';\n workGroupSize: [number, number, number] = [64, 1, 1];\n size = true;\n\n constructor(\n inputShape: [number, number, number, number], newHeight: number,\n newWidth: number) {\n this.outputShape = [inputShape[0], newHeight, newWidth, inputShape[3]];\n\n this.dispatchLayout = flatDispatchLayout(this.outputShape);\n\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize);\n\n this.shaderKey = `resizeBilinear`;\n }\n\n getUserCode(): string {\n const userCode = `\n ${getMainHeaderAndGlobalIndexString()}\n if (index < uniforms.size) {\n let coords = getCoordsFromIndex(index);\n let b = coords[0];\n let d = coords[3];\n let rc = coords.yz;\n\n let effectiveInSize = vec2(\n f32(uniforms.xShape.y) - uniforms.adjustHeightWidth[0],\n f32(uniforms.xShape.z) - uniforms.adjustHeightWidth[1]);\n\n let effectiveOutSize = vec2(\n f32(uniforms.outShape.y) - uniforms.adjustHeightWidth[0],\n f32(uniforms.outShape.z) - uniforms.adjustHeightWidth[1]);\n\n let effectiveInputOverOutputRatioRC =\n effectiveInSize / effectiveOutSize;\n\n // Fractional source index\n let sourceFracIndexRC =\n (vec2(rc) + vec2(uniforms.halfPixelCenters)) *\n effectiveInputOverOutputRatioRC - vec2(uniforms.halfPixelCenters);\n\n // Compute the four integer indices.\n let sourceFloorRC = vec2(sourceFracIndexRC);\n let sourceCeilRC = vec2(\n min(vec2(uniforms.xShape.yz) - vec2(1.0), ceil(sourceFracIndexRC)));\n\n let topLeft = getX(b, sourceFloorRC.x, sourceFloorRC.y, d);\n let bottomLeft = getX(b, sourceCeilRC.x, sourceFloorRC.y, d);\n let topRight = getX(b, sourceFloorRC.x, sourceCeilRC.y, d);\n let bottomRight = getX(b, sourceCeilRC.x, sourceCeilRC.y, d);\n\n let fracRC = sourceFracIndexRC - vec2(sourceFloorRC);\n\n let top = topLeft + (topRight - topLeft) * fracRC.y;\n let bottom = bottomLeft + (bottomRight - bottomLeft) * fracRC.y;\n let newValue = top + (bottom - top) * fracRC.x;\n\n setOutputAtIndex(index, newValue);\n }\n }\n `;\n return userCode;\n }\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, ResizeBilinear, ResizeBilinearAttrs, ResizeBilinearInputs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {ResizeBilinearProgram} from '../resize_bilinear_webgpu';\n\nexport function resizeBilinear(args: {\n inputs: ResizeBilinearInputs,\n backend: WebGPUBackend,\n attrs: ResizeBilinearAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {images} = inputs;\n const {alignCorners, size, halfPixelCenters} = attrs;\n\n const [newHeight, newWidth] = size;\n const adjustHeight = alignCorners && newHeight > 1 ? 1.0 : 0.0;\n const adjustWidth = alignCorners && newWidth > 1 ? 1.0 : 0.0;\n const halfPixelCentersValue = halfPixelCenters ? 0.5 : 0.0;\n const uniformData = [\n {type: 'float32', data: [adjustHeight, adjustWidth]},\n {type: 'float32', data: [halfPixelCentersValue]}\n ];\n\n const program = new ResizeBilinearProgram(\n images.shape as [number, number, number, number], newHeight, newWidth);\n\n return backend.runWebGPUProgram(program, [images], 'float32', uniformData);\n}\n\nexport const resizeBilinearConfig: KernelConfig = {\n kernelName: ResizeBilinear,\n backendName: 'webgpu',\n kernelFunc: resizeBilinear as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {getMainHeaderAndGlobalIndexString} from './shader_preprocessor';\nimport {WebGPUProgram} from './webgpu_program';\nimport {computeDispatch, flatDispatchLayout} from './webgpu_util';\n\nexport class ResizeNearestNeighborProgram implements WebGPUProgram {\n outputShape: number[];\n shaderKey: string;\n dispatchLayout: {x: number[]};\n dispatch: [number, number, number];\n variableNames = ['x'];\n uniforms = 'adjustHeightWidth : vec2, roundBase : f32,';\n workGroupSize: [number, number, number] = [64, 1, 1];\n halfPixelCenters: boolean;\n size = true;\n\n constructor(\n inputShape: [number, number, number, number], newHeight: number,\n newWidth: number, halfPixelCenters: boolean) {\n this.outputShape = [inputShape[0], newHeight, newWidth, inputShape[3]];\n\n this.dispatchLayout = flatDispatchLayout(this.outputShape);\n\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize);\n\n this.halfPixelCenters = halfPixelCenters;\n this.shaderKey = `resizeNearest_${halfPixelCenters}`;\n }\n\n getUserCode(): string {\n let sourceFracIndexRC: string;\n if (this.halfPixelCenters) {\n sourceFracIndexRC =\n `max((vec2(rc) + vec2(0.5)) * effectiveInputOverOutputRatioRC` +\n `, vec2(0.0))`;\n } else {\n sourceFracIndexRC = `vec2(rc) * effectiveInputOverOutputRatioRC`;\n }\n\n const userCode = `\n ${getMainHeaderAndGlobalIndexString()}\n if (index < uniforms.size) {\n let coords = getCoordsFromIndex(index);\n let b = coords[0];\n let d = coords[3];\n let rc = coords.yz;\n\n let effectiveInSize = vec2(\n f32(uniforms.xShape.y) - uniforms.adjustHeightWidth[0],\n f32(uniforms.xShape.z) - uniforms.adjustHeightWidth[1]);\n\n let effectiveOutSize = vec2(\n f32(uniforms.outShape.y) - uniforms.adjustHeightWidth[0],\n f32(uniforms.outShape.z) - uniforms.adjustHeightWidth[1]);\n\n let effectiveInputOverOutputRatioRC =\n effectiveInSize / effectiveOutSize;\n\n // Fractional source index\n let sourceFracIndexRC = ${sourceFracIndexRC};\n\n // Compute the coordinators of nearest neighbor point.\n let inputShapeRC = vec2(f32(uniforms.xShape.y), f32(uniforms.xShape.z));\n let sourceNearestRC = vec2(\n min(inputShapeRC - 1.0, floor(sourceFracIndexRC + uniforms.roundBase)));\n let newValue = getX(b, sourceNearestRC.x, sourceNearestRC.y, d);\n\n setOutputAtIndex(index, newValue);\n }\n }\n `;\n return userCode;\n }\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, ResizeNearestNeighbor, ResizeNearestNeighborAttrs, ResizeNearestNeighborInputs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {ResizeNearestNeighborProgram} from '../resize_nearest_neighbor_webgpu';\n\nexport function resizeNearestNeighbor(args: {\n inputs: ResizeNearestNeighborInputs,\n backend: WebGPUBackend,\n attrs: ResizeNearestNeighborAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {images} = inputs;\n const {alignCorners, halfPixelCenters, size} = attrs;\n\n const [newHeight, newWidth] = size;\n const adjustHeight = alignCorners && newHeight > 1 ? 1.0 : 0.0;\n const adjustWidth = alignCorners && newWidth > 1 ? 1.0 : 0.0;\n // When align corners is false, we rounds the value with floor.\n const roundBase = alignCorners ? 0.5 : 0.0;\n const uniformData = [\n {type: 'float32', data: [adjustHeight, adjustWidth]},\n {type: 'float32', data: [roundBase]}\n ];\n\n const program = new ResizeNearestNeighborProgram(\n images.shape as [number, number, number, number], newHeight, newWidth,\n halfPixelCenters);\n return backend.runWebGPUProgram(program, [images], images.dtype, uniformData);\n}\n\nexport const resizeNearestNeighborConfig: KernelConfig = {\n kernelName: ResizeNearestNeighbor,\n backendName: 'webgpu',\n kernelFunc: resizeNearestNeighbor as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {getMainHeaderAndGlobalIndexString} from './shader_preprocessor';\nimport {WebGPUProgram} from './webgpu_program';\nimport {computeDispatch, flatDispatchLayout} from './webgpu_util';\n\nexport class RotateProgram implements WebGPUProgram {\n outputShape: number[] = [];\n shaderKey: string;\n dispatchLayout: {x: number[]};\n dispatch: [number, number, number];\n variableNames = ['x'];\n uniforms: string;\n workGroupSize: [number, number, number] = [64, 1, 1];\n fillSnippet: string;\n size = true;\n\n constructor(\n imageShape: [number, number, number, number],\n fillValue: number|[number, number, number]) {\n this.outputShape = imageShape;\n this.dispatchLayout = flatDispatchLayout(this.outputShape);\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize);\n this.uniforms = `centerX : f32, centerY : f32, sinRadians : f32,\n cosRadians : f32,`;\n this.shaderKey = 'rotate';\n this.outputShape = imageShape;\n\n if (typeof fillValue === 'number') {\n this.uniforms += ` fillValue : f32,`;\n this.fillSnippet = `var outputValue = uniforms.fillValue;`;\n this.shaderKey += '_float';\n } else {\n this.uniforms += ` fillValue : vec3,`;\n this.fillSnippet = `var outputValue = uniforms.fillValue[coords[3]];`;\n this.shaderKey += '_vec3';\n }\n }\n\n getUserCode(): string {\n const userCode = `\n ${getMainHeaderAndGlobalIndexString()}\n\n if (index < uniforms.size) {\n let coords = getCoordsFromIndex(index);\n let coordXFloat = (f32(coords[2]) - uniforms.centerX) *\n uniforms.cosRadians - (f32(coords[1]) - uniforms.centerY) *\n uniforms.sinRadians;\n let coordYFloat = (f32(coords[2]) - uniforms.centerX) *\n uniforms.sinRadians + (f32(coords[1]) - uniforms.centerY) *\n uniforms.cosRadians;\n let coordX = i32(round(coordXFloat + uniforms.centerX));\n let coordY = i32(round(coordYFloat + uniforms.centerY));\n ${this.fillSnippet}\n if(coordX >= 0 && coordX < uniforms.xShape[2] && coordY >= 0 &&\n coordY < uniforms.xShape[1]) {\n outputValue = getX(coords[0], coordY, coordX, coords[3]);\n }\n setOutputAtIndex(index, outputValue);\n }\n }\n `;\n return userCode;\n }\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, KernelConfig, Tensor4D} from '@tensorflow/tfjs-core';\nimport {RotateWithOffset, RotateWithOffsetAttrs, RotateWithOffsetInputs} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {RotateProgram} from '../rotate_webgpu';\n\nexport const rotateWithOffsetConfig: KernelConfig = {\n kernelName: RotateWithOffset,\n backendName: 'webgpu',\n kernelFunc: ({inputs, attrs, backend}) => {\n const {image} = inputs as RotateWithOffsetInputs;\n const {radians, fillValue, center} = attrs as {} as RotateWithOffsetAttrs;\n const webgpuBackend = backend as WebGPUBackend;\n\n const program = new RotateProgram((image as Tensor4D).shape, fillValue);\n const [centerX, centerY] =\n backend_util.getImageCenter(center, image.shape[1], image.shape[2]);\n const uniformData = [\n {type: 'float32', data: [centerX]},\n {type: 'float32', data: [centerY]},\n {type: 'float32', data: [Math.sin(radians)]},\n {type: 'float32', data: [Math.cos(radians)]}\n ];\n\n if (typeof fillValue === 'number') {\n uniformData.push(\n {type: 'float32', data: [Number.parseFloat(fillValue.toFixed(2))]});\n } else {\n uniformData.push({type: 'float32', data: fillValue});\n }\n\n const output = webgpuBackend.runWebGPUProgram(\n program, [image], image.dtype, uniformData);\n return output;\n }\n };\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Rsqrt} from '@tensorflow/tfjs-core';\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {rsqrtImplCPU} from '../kernel_utils/shared';\nimport {UnaryOpType} from '../unary_op_util';\n\nexport const rsqrt =\n unaryKernelFunc({opType: UnaryOpType.RSQRT, cpuKernelImpl: rsqrtImplCPU});\n\nexport const rsqrtConfig: KernelConfig = {\n kernelName: Rsqrt,\n backendName: 'webgpu',\n kernelFunc: rsqrt\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {DataType} from '@tensorflow/tfjs-core';\n\nimport {getCoordsDataType, getMainHeaderAndGlobalIndexString} from './shader_preprocessor';\nimport {WebGPUProgram} from './webgpu_program';\nimport {computeDispatch, flatDispatchLayout} from './webgpu_util';\n\nexport class ScatterOptimizedProgram implements WebGPUProgram {\n variableNames = ['updates', 'indices'];\n uniforms: string;\n outputShape: number[];\n shaderKey: string;\n dispatchLayout: {x: number[]};\n dispatch: [number, number, number];\n workGroupSize: [number, number, number] = [64, 1, 1];\n updatesRank: number;\n indicesRank: number;\n sliceDimGreaterThanOne: boolean;\n atomic = true;\n type: DataType;\n\n constructor(\n flattenXShape: number[], sliceDim: number, indicesRank: number,\n updatesRank: number, strides: number[], shape: number[],\n outputDtype: DataType) {\n this.outputShape = shape;\n this.type = outputDtype;\n this.dispatchLayout = flatDispatchLayout(flattenXShape);\n // Dispatching based on |updates| shape instead of output shape.\n this.dispatch =\n computeDispatch(this.dispatchLayout, flattenXShape, this.workGroupSize);\n this.sliceDimGreaterThanOne = sliceDim > 1;\n this.shaderKey = `scatter_${indicesRank}_${updatesRank}_${\n this.sliceDimGreaterThanOne}_${outputDtype}`;\n const stridesType = getCoordsDataType(strides.length);\n this.uniforms = `sliceDim : i32, strides: ${stridesType}, size: i32,`;\n this.updatesRank = updatesRank;\n this.indicesRank = indicesRank;\n }\n\n getUserCode(): string {\n let indicesString = '';\n if (this.indicesRank === 1) {\n indicesString = 'coords[0]';\n } else if (this.indicesRank === 2) {\n indicesString = 'coords[0], j';\n }\n const indicesSnippet = `getIndices(${indicesString})`;\n\n const strideString = this.sliceDimGreaterThanOne ? 'uniforms.strides[j]' :\n 'uniforms.strides';\n\n let updatesString = '';\n let outCoordsString = '';\n let getUpdatesCoordsFromFlatIndex = '';\n if (this.updatesRank === 1) {\n updatesString = 'coords[0]';\n outCoordsString = 'flattenedIndex';\n getUpdatesCoordsFromFlatIndex = `\n fn getUpdatesCoordsFromFlatIndex(index : i32) -> i32 {\n return index;\n }\n `;\n } else if (this.updatesRank === 2) {\n updatesString = 'coords[0], coords[1]';\n outCoordsString = 'vec2(flattenedIndex, coords[1])';\n getUpdatesCoordsFromFlatIndex = `\n fn getUpdatesCoordsFromFlatIndex(index : i32) -> vec2 {\n let d0 = index / uniforms.updatesShape[1];\n let d1 = index - d0 * uniforms.updatesShape[1];\n return vec2(d0, d1);\n }\n `;\n }\n const updatesSnippet = `getUpdates(${updatesString})`;\n\n // atomicAdd only supports uint/int type. For float, we use\n // atomicCompareExchangeWeak to simulate.\n const atomicAddSnippet = this.type === 'int32' ?\n `atomicAdd(&(result[flatIndex]), i32(updateValue));` :\n `\n var assumed = atomicLoad(&(result[flatIndex]));\n var success = 0;\n for (; success == 0;) {\n let new = bitcast(assumed) + updateValue;\n let newI32 = bitcast(new);\n let resValue = atomicCompareExchangeWeak(&(result[flatIndex]), assumed, newI32);\n assumed = resValue[0];\n success = resValue[1];\n }\n `;\n\n const userCode = `\n ${getUpdatesCoordsFromFlatIndex}\n\n ${getMainHeaderAndGlobalIndexString()}\n\n if (index < uniforms.size) {\n let coords = getUpdatesCoordsFromFlatIndex(index);\n var flattenedIndex = 0;\n for (var j = 0; j < uniforms.sliceDim; j = j + 1) {\n let indexInside = i32(round(${indicesSnippet}));\n flattenedIndex = flattenedIndex + indexInside * ${strideString};\n }\n let updateValue = ${updatesSnippet};\n let flatIndex = getOutputIndexFromCoords(${outCoordsString});\n\n ${atomicAddSnippet}\n }\n }`;\n return userCode;\n }\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, KernelConfig, KernelFunc, ScatterNd, ScatterNdAttrs, ScatterNdInputs, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\n\nimport {fill} from './Fill';\nimport {reshape} from './Reshape';\nimport {ScatterOptimizedProgram} from '../scatter_optimized_webgpu';\n\nexport function scatterNd(args: {\n inputs: ScatterNdInputs,\n backend: WebGPUBackend,\n attrs: ScatterNdAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {indices, updates} = inputs;\n const {shape} = attrs;\n\n const {sliceRank, numUpdates, sliceSize, strides, outputSize} =\n backend_util.calculateShapes(updates, indices, shape);\n\n const flattenShape = [outputSize / sliceSize, sliceSize];\n\n if (outputSize === 0) {\n return backend.makeTensorInfo(shape, indices.dtype);\n }\n\n const flattenIndices = reshape(\n {inputs: {x: indices}, backend, attrs: {shape: [numUpdates, sliceRank]}});\n const flattenX = reshape(\n {inputs: {x: updates}, backend, attrs: {shape: [numUpdates, sliceSize]}});\n\n const type = flattenX.dtype;\n const output =\n fill({backend, attrs: {shape: flattenShape, value: 0, dtype: type}});\n const size = util.sizeFromShape(flattenX.shape);\n const uniformData = [\n {type: 'int32', data: [sliceRank]}, {type: 'int32', data: strides},\n {type: 'int32', data: [size]}\n ];\n const program = new ScatterOptimizedProgram(\n flattenX.shape, sliceRank, flattenIndices.shape.length,\n flattenX.shape.length, strides, flattenShape, type);\n const res = backend.runWebGPUProgram(\n program, [flattenX, flattenIndices], type, uniformData, output);\n\n const reshaped = reshape({inputs: {x: res}, backend, attrs: {shape}});\n\n backend.disposeData(flattenIndices.dataId);\n backend.disposeData(flattenX.dataId);\n backend.disposeData(res.dataId);\n\n return reshaped;\n}\n\nexport const scatterNdConfig: KernelConfig = {\n kernelName: ScatterNd,\n backendName: 'webgpu',\n kernelFunc: scatterNd as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {getMainHeaderAndGlobalIndexString} from './shader_preprocessor';\nimport {computeDispatch, flatDispatchLayout} from './webgpu_util';\n\nimport {WebGPUProgram} from './webgpu_program';\n\nexport class SelectProgram implements WebGPUProgram {\n variableNames = ['c', 'a', 'b'];\n outputShape: number[];\n shaderKey: string;\n dispatchLayout: {x: number[]};\n dispatch: [number, number, number];\n workGroupSize: [number, number, number] = [64, 1, 1];\n cRank: number;\n rank: number;\n size = true;\n\n constructor(cRank: number, shape: number[], rank: number) {\n this.outputShape = shape;\n this.dispatchLayout = flatDispatchLayout(this.outputShape);\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize);\n\n this.cRank = cRank;\n this.rank = rank;\n this.shaderKey = 'select';\n }\n\n getUserCode(): string {\n // TODO(WGSL): below code can be merged with getUserCode.\n let cCoords;\n let abCoords;\n if (this.rank > 4) {\n throw Error(`Where for rank ${this.rank} is not yet supported`);\n }\n\n if (this.rank === 1) {\n abCoords = `resRC`;\n cCoords = `resRC`;\n } else {\n const currentCoords = ['resRC.x', 'resRC.y', 'resRC.z', 'resRC.w'];\n const cCoordVars = [];\n const abCoordVars = [];\n for (let i = 0; i < this.outputShape.length; i++) {\n abCoordVars.push(`${currentCoords[i]}`);\n if (i < this.cRank) {\n cCoordVars.push(`${currentCoords[i]}`);\n }\n }\n cCoords = cCoordVars.join();\n abCoords = abCoordVars.join();\n }\n\n const userCode = `\n ${getMainHeaderAndGlobalIndexString()}\n if (index < uniforms.size) {\n let resRC = getCoordsFromIndex(index);\n let cVal = getC(${cCoords});\n if (cVal >= 1.0) {\n setOutputAtIndex(index, getA(${abCoords}));\n } else {\n setOutputAtIndex(index, getB(${abCoords}));\n }\n }\n }\n `;\n return userCode;\n }\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, Select, SelectInputs, TensorInfo, upcastType} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {SelectProgram} from '../select_webgpu';\n\nexport function select(args: {inputs: SelectInputs, backend: WebGPUBackend}):\n TensorInfo {\n const {inputs, backend} = args;\n const {condition, t, e} = inputs;\n\n const program =\n new SelectProgram(condition.shape.length, t.shape, t.shape.length);\n return backend.runWebGPUProgram(\n program, [condition, t, e], upcastType(t.dtype, e.dtype));\n}\n\nexport const selectConfig: KernelConfig = {\n kernelName: Select,\n backendName: 'webgpu',\n kernelFunc: select as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Sigmoid} from '@tensorflow/tfjs-core';\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {UnaryOpType} from '../unary_op_util';\n\nexport const sigmoid = unaryKernelFunc({opType: UnaryOpType.SIGMOID});\n\nexport const sigmoidConfig: KernelConfig = {\n kernelName: Sigmoid,\n backendName: 'webgpu',\n kernelFunc: sigmoid,\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Sin} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\n\nimport {UnaryOpType} from '../unary_op_util';\n\nexport const sin = unaryKernelFunc({opType: UnaryOpType.SIN});\n\nexport const sinConfig: KernelConfig = {\n kernelName: Sin,\n backendName: 'webgpu',\n kernelFunc: sin\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Sinh} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\n\nimport {UnaryOpType} from '../unary_op_util';\n\nexport const sinh = unaryKernelFunc({opType: UnaryOpType.SINH});\n\nexport const sinhConfig: KernelConfig = {\n kernelName: Sinh,\n backendName: 'webgpu',\n kernelFunc: sinh\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Sub} from '@tensorflow/tfjs-core';\nimport {binaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {subImplCPU as cpuSub} from '../kernel_utils/shared';\nimport {BinaryOpType} from '../binary_op_util';\n\nexport const sub = binaryKernelFunc({\n opSnippet: BinaryOpType.SUB,\n cpuKernelImpl: cpuSub,\n supportsComplex: true\n});\n\nexport const subConfig: KernelConfig = {\n kernelName: Sub,\n backendName: 'webgpu',\n kernelFunc: sub\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, KernelConfig, KernelFunc, Softmax, SoftmaxAttrs, SoftmaxInputs, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\n\nimport {exp} from './Exp';\nimport {max} from './Max';\nimport {realDiv} from './RealDiv';\nimport {reshape} from './Reshape';\nimport {sub} from './Sub';\nimport {sum} from './Sum';\n\nexport function softmax(\n args: {inputs: SoftmaxInputs, backend: WebGPUBackend, attrs: SoftmaxAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {logits} = inputs;\n const {dim} = attrs;\n\n const axes = util.parseAxisParam([dim], logits.shape);\n\n const maxLogit = max({\n inputs: {x: logits},\n backend,\n attrs: {reductionIndices: axes, keepDims: false}\n });\n\n const expandedShape = backend_util.expandShapeToKeepDim(maxLogit.shape, axes);\n\n const maxLogitsReshaped =\n reshape({inputs: {x: maxLogit}, backend, attrs: {shape: expandedShape}});\n const a =\n sub({inputs: {a: logits, b: maxLogitsReshaped}, backend}) as TensorInfo;\n const b = exp({inputs: {x: a}, backend}) as TensorInfo;\n const sumExp =\n sum({inputs: {x: b}, backend, attrs: {axis: axes, keepDims: false}});\n const sumExpReshaped =\n reshape({inputs: {x: sumExp}, backend, attrs: {shape: expandedShape}});\n const res =\n realDiv({inputs: {a: b, b: sumExpReshaped}, backend}) as TensorInfo;\n\n backend.disposeData(maxLogit.dataId);\n backend.disposeData(maxLogitsReshaped.dataId);\n backend.disposeData(a.dataId);\n backend.disposeData(b.dataId);\n backend.disposeData(sumExp.dataId);\n backend.disposeData(sumExpReshaped.dataId);\n\n return res;\n}\n\nexport const softmaxConfig: KernelConfig = {\n kernelName: Softmax,\n backendName: 'webgpu',\n kernelFunc: softmax as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, KernelConfig, KernelFunc, SpaceToBatchND, SpaceToBatchNDAttrs, SpaceToBatchNDInputs, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\n\nimport {padV2} from './PadV2';\nimport {reshape} from './Reshape';\nimport {transpose} from './Transpose';\n\nexport const spaceToBatchND = (args: {\n inputs: SpaceToBatchNDInputs,\n backend: WebGPUBackend,\n attrs: SpaceToBatchNDAttrs\n}): TensorInfo => {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {blockShape, paddings} = attrs;\n\n util.assert(\n x.shape.length <= 4,\n () => 'spaceToBatchND for rank > 4 with a WebGPU backend not ' +\n 'implemented yet');\n\n const prod = blockShape.reduce((a, b) => a * b);\n\n const completePaddings: Array<[number, number]> = [[0, 0]];\n completePaddings.push(...paddings as Array<[number, number]>);\n for (let i = 1 + blockShape.length; i < x.shape.length; ++i) {\n completePaddings.push([0, 0]);\n }\n\n const toDispose = [];\n\n const paddedX = padV2({\n inputs: {x},\n backend,\n attrs: {paddings: completePaddings, constantValue: 0}\n });\n\n const reshapedPaddedShape =\n backend_util.getReshaped(paddedX.shape, blockShape, prod, false);\n\n const permutedReshapedPaddedPermutation = backend_util.getPermuted(\n reshapedPaddedShape.length, blockShape.length, false);\n\n const flattenShape =\n backend_util.getReshapedPermuted(paddedX.shape, blockShape, prod, false);\n\n const reshapedPaddedX = reshape(\n {inputs: {x: paddedX}, backend, attrs: {shape: reshapedPaddedShape}});\n\n const paddedXT = transpose({\n inputs: {x: reshapedPaddedX},\n backend,\n attrs: {perm: permutedReshapedPaddedPermutation}\n });\n\n const result =\n reshape({inputs: {x: paddedXT}, backend, attrs: {shape: flattenShape}});\n\n toDispose.push(paddedX);\n toDispose.push(reshapedPaddedX);\n toDispose.push(paddedXT);\n\n toDispose.forEach(t => backend.disposeData(t.dataId));\n\n return result;\n};\n\nexport const spaceToBatchNDConfig: KernelConfig = {\n kernelName: SpaceToBatchND,\n backendName: 'webgpu',\n kernelFunc: spaceToBatchND as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {getCoordsDataType, getMainHeaderAndGlobalIndexString} from './shader_preprocessor';\nimport {WebGPUProgram} from './webgpu_program';\nimport {computeDispatch, flatDispatchLayout} from './webgpu_util';\n\nexport class ScatterProgram implements WebGPUProgram {\n variableNames = ['updates', 'indices', 'defaultValue'];\n uniforms: string;\n outputShape: number[];\n shaderKey: string;\n dispatchLayout: {x: number[]};\n dispatch: [number, number, number];\n workGroupSize: [number, number, number] = [64, 1, 1];\n workPerThread = 4;\n size = true;\n indicesSnippet: string;\n strideString: string;\n updatesSnippet: string;\n\n constructor(\n updateSize: number, sliceDim: number, indicesRank: number,\n updatesRank: number, strides: number[], shape: number[],\n summingDupeIndex = true) {\n this.outputShape = shape;\n this.dispatchLayout = flatDispatchLayout(this.outputShape);\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize,\n [this.workPerThread, 1, 1]);\n const sliceDimGreaterThanOne = sliceDim > 1;\n this.shaderKey =\n `scatter_${indicesRank}_${updatesRank}_${sliceDimGreaterThanOne}`;\n const stridesType = getCoordsDataType(strides.length);\n this.uniforms =\n `updateSize : i32, sliceDim : i32, strides: ${stridesType},`;\n let indicesString = '';\n if (indicesRank === 1) {\n indicesString = 'i';\n } else if (indicesRank === 2) {\n indicesString = 'i, j';\n }\n this.indicesSnippet = `getIndices(${indicesString})`;\n\n let updatesString = '';\n if (updatesRank === 1) {\n updatesString = 'i';\n } else if (updatesRank === 2) {\n updatesString = 'i, coords[1]';\n }\n this.updatesSnippet = `getUpdates(${updatesString})`;\n\n this.strideString =\n sliceDimGreaterThanOne ? 'uniforms.strides[j]' : 'uniforms.strides';\n }\n\n getUserCode(): string {\n const userCode = `\n ${getMainHeaderAndGlobalIndexString()}\n\n let globalIndex = index * ${this.workPerThread};\n if (globalIndex < uniforms.size) {\n var sum = vec4(0.0);\n var found = vec4(false);\n for (var i = 0; i < uniforms.updateSize; i = i + 1) {\n var flattenedIndex = 0;\n for (var j = 0; j < uniforms.sliceDim; j = j + 1) {\n let indexInside = i32(round(${this.indicesSnippet}));\n flattenedIndex = flattenedIndex + indexInside * ${\n this.strideString};\n }\n for (var innerIndex = 0; innerIndex < ${\n this.workPerThread}; innerIndex = innerIndex + 1) {\n let curIndex = globalIndex + innerIndex;\n let coords = getCoordsFromIndex(curIndex);\n if (flattenedIndex == coords[0]) {\n sum[innerIndex] = sum[innerIndex] + ${this.updatesSnippet};\n found[innerIndex] = true;\n }\n }\n }\n for (var innerIndex = 0; innerIndex < ${\n this.workPerThread}; innerIndex = innerIndex + 1) {\n let curIndex = globalIndex + innerIndex;\n if (curIndex < uniforms.size)\n {\n setOutputAtIndex(curIndex, mix(getDefaultValue(), sum[innerIndex], f32(found[innerIndex])));\n }\n }\n }\n }`;\n return userCode;\n }\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, KernelConfig, KernelFunc, SparseToDense, SparseToDenseAttrs, SparseToDenseInputs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\n\nimport {reshape} from './Reshape';\nimport {ScatterProgram} from '../scatter_webgpu';\n\nexport function sparseToDense(args: {\n inputs: SparseToDenseInputs,\n backend: WebGPUBackend,\n attrs: SparseToDenseAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {sparseIndices, sparseValues, defaultValue} = inputs;\n const {outputShape} = attrs;\n\n const {sliceRank, numUpdates, strides, outputSize} =\n backend_util.calculateShapes(sparseValues, sparseIndices, outputShape);\n\n const sumDupeIndices = false;\n const uniformData = [\n {type: 'int32', data: [numUpdates]},\n {type: 'int32', data: [sliceRank]},\n {type: 'int32', data: strides},\n ];\n const program = new ScatterProgram(\n numUpdates, sliceRank, sparseIndices.shape.length,\n sparseValues.shape.length, strides, [outputSize, 1], sumDupeIndices);\n\n const res = backend.runWebGPUProgram(\n program, [sparseValues, sparseIndices, defaultValue], sparseValues.dtype,\n uniformData);\n\n const reshaped =\n reshape({inputs: {x: res}, backend, attrs: {shape: outputShape}});\n\n backend.disposeData(res.dataId);\n return reshaped;\n}\n\nexport const sparseToDenseConfig: KernelConfig = {\n kernelName: SparseToDense,\n backendName: 'webgpu',\n kernelFunc: sparseToDense as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, KernelConfig, KernelFunc, SplitV, SplitVAttrs, SplitVInputs, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {slice} from './Slice';\n\nexport function splitV(\n args: {inputs: SplitVInputs, backend: WebGPUBackend, attrs: SplitVAttrs}):\n TensorInfo[] {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {numOrSizeSplits, axis} = attrs;\n\n const $axis = util.parseAxisParam(axis, x.shape)[0];\n const splitSizes = backend_util.prepareSplitSize(x, numOrSizeSplits, $axis);\n\n const xRank = x.shape.length;\n const begin = new Array(xRank).fill(0);\n const size = x.shape.slice();\n\n return splitSizes.map(s => {\n const sliceSize = [...size];\n sliceSize[$axis] = s;\n const sliceT =\n slice({inputs: {x}, backend, attrs: {begin, size: sliceSize}});\n begin[$axis] += s;\n return sliceT;\n });\n}\n\nexport const splitVConfig: KernelConfig = {\n kernelName: SplitV,\n backendName: 'webgpu',\n kernelFunc: splitV as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Sqrt} from '@tensorflow/tfjs-core';\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {UnaryOpType} from '../unary_op_util';\n\nexport const sqrt = unaryKernelFunc({opType: UnaryOpType.SQRT});\n\nexport const sqrtConfig: KernelConfig = {\n kernelName: Sqrt,\n backendName: 'webgpu',\n kernelFunc: sqrt\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Square, SquareInputs} from '@tensorflow/tfjs-core';\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {UnaryOpProgram} from '../unary_op_webgpu';\nimport {UnaryOpType} from '../unary_op_util';\n\nexport const squareConfig: KernelConfig = {\n kernelName: Square,\n backendName: 'webgpu',\n kernelFunc: ({inputs, backend}) => {\n const {x} = inputs as SquareInputs;\n const webGPUBackend = backend as WebGPUBackend;\n const program = new UnaryOpProgram(x.shape, UnaryOpType.SQUARE);\n return webGPUBackend.runWebGPUProgram(program, [x], x.dtype);\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, SquaredDifference} from '@tensorflow/tfjs-core';\n\nimport {binaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\n\nimport {BinaryOpType} from '../binary_op_util';\n\nexport const squaredDifference = binaryKernelFunc({\n opSnippet: BinaryOpType.SQUARED_DIFFERENCE,\n});\n\nexport const squaredDifferenceConfig: KernelConfig = {\n kernelName: SquaredDifference,\n backendName: 'webgpu',\n kernelFunc: squaredDifference\n};\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {getCoordsDataType, getMainHeaderAndGlobalIndexString} from './shader_preprocessor';\nimport {WebGPUProgram} from './webgpu_program';\nimport {computeDispatch, flatDispatchLayout} from './webgpu_util';\n\nexport class StridedSliceProgram implements WebGPUProgram {\n variableNames = ['x'];\n uniforms: string;\n outputShape: number[];\n shaderKey: string;\n dispatchLayout: {x: number[]};\n dispatch: [number, number, number];\n // TODO(xing.xu): Increase the workPerThread.\n workPerThread = 1;\n workGroupSize: [number, number, number] = [64, 1, 1];\n size = true;\n\n constructor(destSize: number[]) {\n this.outputShape = destSize;\n this.dispatchLayout = flatDispatchLayout(this.outputShape);\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize,\n [this.workPerThread, 1, 1]);\n\n const dtype = getCoordsDataType(this.outputShape.length);\n this.uniforms = `begin : ${dtype}, strides : ${dtype}, `;\n this.shaderKey = 'stridedSlice';\n }\n\n getUserCode(): string {\n const rank = this.outputShape.length;\n let newCoords = '';\n if (rank === 1) {\n newCoords = 'coords * uniforms.strides + uniforms.begin';\n } else {\n let outputAxis = 0;\n newCoords =\n this.outputShape\n .map((_, i) => {\n outputAxis++;\n return this.outputShape.length === 1 ?\n `coords * uniforms.strides[${i}] + uniforms.begin[${i}]` :\n `coords[${outputAxis - 1}] * uniforms.strides[${\n i}] + uniforms.begin[${i}]`;\n })\n .join(',');\n }\n\n const userCode = `\n ${getMainHeaderAndGlobalIndexString()}\n if (index < uniforms.size) {\n let coords = getCoordsFromIndex(index);\n setOutputAtIndex(index, getX(${newCoords}));\n }\n }\n `;\n return userCode;\n }\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {buffer, KernelConfig, KernelFunc, Rank, slice_util, StridedSlice, StridedSliceAttrs, StridedSliceInputs, TensorBuffer, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {stridedSliceImplCPU} from '../kernel_utils/shared';\n\nimport {reshape} from './Reshape';\nimport {slice} from './Slice';\nimport {StridedSliceProgram} from '../strided_slice_webgpu';\n\nexport function stridedSlice(args: {\n inputs: StridedSliceInputs,\n backend: WebGPUBackend,\n attrs: StridedSliceAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {\n begin,\n end,\n strides,\n beginMask,\n endMask,\n ellipsisMask,\n newAxisMask,\n shrinkAxisMask\n } = attrs;\n\n const {\n finalShapeSparse,\n finalShape,\n isIdentity,\n sliceDim0,\n isSimpleSlice,\n begin: $begin,\n end: $end,\n strides: $strides\n } =\n slice_util.sliceInfo(\n x.shape, begin, end, strides, beginMask, endMask, ellipsisMask,\n newAxisMask, shrinkAxisMask);\n\n let result;\n\n if (isIdentity) {\n // Optimization #1, slice is a no-op plus reshape\n result = reshape({inputs: {x}, backend, attrs: {shape: finalShape}});\n } else if (sliceDim0 || isSimpleSlice) {\n // Optimization #2, slice is memory contiguous (only occurs in dim 0)\n util.assert(\n x.shape.length >= 1,\n () => `Input must have rank at least 1, got: ${x.shape.length}`);\n\n const size = slice_util.computeOutShape($begin, $end, $strides);\n // To tolerate begin[0] > end[0] (a 0-output slice), we min(begin, end).\n const sliced = slice({inputs: {x}, backend, attrs: {begin: $begin, size}});\n result =\n reshape({inputs: {x: sliced}, backend, attrs: {shape: finalShape}});\n backend.disposeData(sliced.dataId);\n } else {\n const shouldExecuteOnCPU = backend.shouldExecuteOnCPU([x]);\n if (shouldExecuteOnCPU) {\n const values = backend.readSync(x.dataId) as TypedArray;\n const xBuf = buffer(x.shape, x.dtype, values) as TensorBuffer;\n const resultValues =\n stridedSliceImplCPU(finalShapeSparse, xBuf, $strides, $begin);\n result = backend.makeTensorInfo(finalShape, x.dtype, resultValues.values);\n } else {\n const program = new StridedSliceProgram(finalShapeSparse);\n const uniformData =\n [{type: 'int32', data: $begin}, {type: 'int32', data: $strides}];\n const resultValues =\n backend.runWebGPUProgram(program, [x], x.dtype, uniformData);\n result = reshape(\n {inputs: {x: resultValues}, backend, attrs: {shape: finalShape}});\n backend.disposeData(resultValues.dataId);\n }\n }\n\n return result;\n}\n\nexport const stridedSliceConfig: KernelConfig = {\n kernelName: StridedSlice,\n backendName: 'webgpu',\n kernelFunc: stridedSlice as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, StringNGrams, StringNGramsAttrs, StringNGramsInputs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {stringNGramsImplCPU} from '../kernel_utils/shared';\n\nexport function stringNGrams(args: {\n inputs: StringNGramsInputs,\n backend: WebGPUBackend,\n attrs: StringNGramsAttrs\n}): [TensorInfo, TensorInfo] {\n const {inputs, backend, attrs} = args;\n const {\n separator,\n nGramWidths,\n leftPad,\n rightPad,\n padWidth,\n preserveShortSequences\n } = attrs;\n const {data, dataSplits} = inputs;\n const $data = backend.readSync(data.dataId) as Uint8Array[];\n const $dataSplits = backend.readSync(dataSplits.dataId) as Int32Array;\n\n const [nGrams, nGramsSplits] = stringNGramsImplCPU(\n $data, $dataSplits, separator, nGramWidths, leftPad, rightPad, padWidth,\n preserveShortSequences);\n return [\n backend.makeTensorInfo([nGrams.length], 'string', nGrams),\n backend.makeTensorInfo(dataSplits.shape, 'int32', nGramsSplits),\n ];\n}\n\nexport const stringNGramsConfig: KernelConfig = {\n kernelName: StringNGrams,\n backendName: 'webgpu',\n kernelFunc: stringNGrams as {} as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Tanh} from '@tensorflow/tfjs-core';\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {UnaryOpType} from '../unary_op_util';\n\nexport const tanh = unaryKernelFunc({opType: UnaryOpType.TANH});\n\nexport const tanhConfig: KernelConfig = {\n kernelName: Tanh,\n backendName: 'webgpu',\n kernelFunc: tanh\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {getMainHeaderAndGlobalIndexString} from './shader_preprocessor';\nimport {computeDispatch, flatDispatchLayout} from './webgpu_util';\n\nimport {WebGPUProgram} from './webgpu_program';\n\nexport class TileProgram implements WebGPUProgram {\n variableNames = ['A'];\n outputShape: number[];\n shaderKey: string;\n dispatchLayout: {x: number[]};\n dispatch: [number, number, number];\n workGroupSize: [number, number, number] = [64, 1, 1];\n size = true;\n rank: number;\n\n constructor(aShape: number[], reps: number[]) {\n const outputShape: number[] = new Array(aShape.length);\n for (let i = 0; i < outputShape.length; i++) {\n outputShape[i] = aShape[i] * reps[i];\n }\n this.outputShape = outputShape;\n this.dispatchLayout = flatDispatchLayout(this.outputShape);\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize);\n this.rank = this.outputShape.length;\n this.shaderKey = 'tile';\n }\n\n getUserCode(): string {\n const sourceCoords = getSourceCoords(this.rank, 'uniforms.');\n\n const userCode = `\n ${getMainHeaderAndGlobalIndexString()}\n if (index < uniforms.size) {\n let resRC = getCoordsFromIndex(index);\n setOutputAtIndex(index, getA(${sourceCoords}));\n }\n }\n `;\n return userCode;\n }\n}\n\nfunction getSourceCoords(rank: number, uniformPrefix = ''): string {\n if (rank >= 5) {\n throw Error(`Tile for rank ${rank} is not yet supported`);\n }\n if (rank === 1) {\n return `(resRC % ${uniformPrefix}aShape)`;\n }\n\n const currentCoords = ['resRC.x', 'resRC.y', 'resRC.z', 'resRC.w'];\n const sourceCoords = [];\n for (let i = 0; i < rank; i++) {\n sourceCoords.push(`(${currentCoords[i]} % ${uniformPrefix}aShape[${i}])`);\n }\n return sourceCoords.join();\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {buffer, KernelConfig, KernelFunc, TensorInfo, Tile, TileAttrs, TileInputs, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {tileImplCPU} from '../kernel_utils/shared';\nimport {TileProgram} from '../tile_webgpu';\n\nexport function tile(\n params: {inputs: TileInputs, backend: WebGPUBackend, attrs: TileAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = params;\n const {x} = inputs;\n const {reps} = attrs;\n\n // tile gpu program cannot handle rank >= 5 case.\n if (backend.shouldExecuteOnCPU([x]) || x.dtype === 'string' ||\n x.shape.length >= 5) {\n // Even thought string tensor is always on CPU, just to be consistent on how\n // to access tensor data.\n const data = backend.readSync(x.dataId);\n const value = x.dtype === 'string' ?\n (data as Uint8Array[]).map(d => util.decodeString(d)) :\n data as TypedArray;\n const buf = buffer(x.shape, x.dtype, value);\n const outBuf = tileImplCPU(buf, reps);\n return backend.makeTensorInfo(outBuf.shape, outBuf.dtype, outBuf.values);\n }\n\n const program = new TileProgram(x.shape, reps);\n const output = backend.runWebGPUProgram(program, [x], x.dtype);\n\n return output;\n}\n\nexport const tileConfig: KernelConfig = {\n kernelName: Tile,\n backendName: 'webgpu',\n kernelFunc: tile as {} as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {getMainHeaderAndGlobalIndexString} from './shader_preprocessor';\nimport {WebGPUProgram} from './webgpu_program';\nimport {computeDispatch, flatDispatchLayout} from './webgpu_util';\n\n// Based on Algorithm 2 of Bitonic Top K, ref:\n// https://anilshanbhag.in/static/papers/gputopk_sigmod18.pdf\n// The original algorithm is based on computing the top K only, however\n// since for TFJS we require the indices of the top K values as well then the\n// algorithm found here is a bit modified. Rather than producing the values\n// at each step, the indices containing the top K are generated instead.\n// The output values are not generated to reduce the number of outputs in the\n// GPU, the values can easily be retrieved from the indices using a gather\n// op.\n\nexport class SwapProgram implements WebGPUProgram {\n outputShape: number[];\n shaderKey: string;\n dispatchLayout: {x: number[]};\n dispatch: [number, number, number];\n variableNames = ['x', 'indices'];\n uniforms: string;\n workGroupSize: [number, number, number] = [256, 1, 1];\n size = true;\n\n constructor(shape: number[]) {\n this.outputShape = shape;\n this.dispatchLayout = flatDispatchLayout(this.outputShape);\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize);\n this.uniforms = `inputSize : i32, firstPass : i32, negativeInf : f32,\n dir : i32, inc : i32,`;\n this.shaderKey = 'swap';\n }\n\n getUserCode(): string {\n const userCode = `\n ${getMainHeaderAndGlobalIndexString()}\n if (index < uniforms.size) {\n let outC = getCoordsFromIndex(index);\n let batch = outC[0];\n let elemIdx = outC[1];\n // We compare elements pair-wise within a group of size 2 * inc.\n // The comparing rule for each group alternates between ascending\n // and descending. Within each group, we compare each pair at\n // positions i and i+inc. To decide whether an element at position i\n // is x0 or x1, we mod it by 2 * inc, if the result is smaller than\n // inc, it is in the first half of the group, we denote it as x0,\n // otherwise we denote it as x1.\n // For example, as shown in the Bitonic top K paper referenced\n // above, Figure5(a) shows that element[1] is in the second half of\n // the group when group size is 2, but it is in the first half of\n // the group when group size is 4.\n let isFirstInPair = elemIdx % (2 * uniforms.inc) < uniforms.inc;\n var i = 0;\n if (isFirstInPair) {\n i = elemIdx;\n } else {\n i = elemIdx - uniforms.inc;\n }\n\n var i0 = 0;\n if (uniforms.firstPass == 1) {\n i0 = i;\n } else {\n i0 = i32(getIndices(batch, i));\n }\n\n var i1 = 0;\n if (uniforms.firstPass == 1) {\n i1 = i + uniforms.inc;\n } else {\n i1 = i32(getIndices(batch, i + uniforms.inc));\n }\n\n var x0 = f32(0.0);\n var x1 = f32(0.0);\n if (i0 < uniforms.inputSize) {\n x0 = getX(batch, i0);\n } else {\n x0 = uniforms.negativeInf;\n }\n if (i1 < uniforms.inputSize) {\n x1 = getX(batch, i1);\n } else {\n x1 = uniforms.negativeInf;\n }\n\n let reverse = elemIdx % (2 * uniforms.dir) >= uniforms.dir;\n let isGreater = x0 > x1 || (x0 == x1 && i1 > i0);\n if (reverse == isGreater) {\n // Elements in opposite order of direction\n let iTemp = i0;\n i0 = i1;\n i1 = iTemp;\n }\n if (isFirstInPair) {\n setOutputAtIndex(index, f32(i0));\n } else {\n setOutputAtIndex(index, f32(i1));\n }\n }\n }\n `;\n return userCode;\n }\n}\n\nexport class MergeProgram implements WebGPUProgram {\n outputShape: number[];\n shaderKey: string;\n dispatchLayout: {x: number[]};\n dispatch: [number, number, number];\n variableNames = ['x', 'indices'];\n uniforms: string;\n workGroupSize: [number, number, number] = [256, 1, 1];\n size = true;\n\n constructor(shape: number[]) {\n this.outputShape = shape;\n this.dispatchLayout = flatDispatchLayout(this.outputShape);\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize);\n // |n| Size of the original input of TopK\n // |firstPass| indicates if this is the first time swap is being used which\n // means no indices input containing the top K is present yet.\n // |k| Top k elements desired\n this.uniforms = `inputSize : i32, firstPass : i32, k : i32,`;\n this.shaderKey = 'merge';\n }\n\n getUserCode(): string {\n const userCode = `\n ${getMainHeaderAndGlobalIndexString()}\n if (index < uniforms.size) {\n let outC = getCoordsFromIndex(index);\n let batch = outC[0];\n let elemIdx = outC[1];\n // The output size is half of the previous size.\n // If the previous sequence is | | | | _ _ _ _ | | | | _ _ _ _\n // (k=4), we only need to output the indices at positions |, the\n // indices at positions _ can be thrown away, see Figure5(b) After\n // Phase 2 (Merge phase) in the Bitonic Top K paper referenced\n // above.\n // For example, the paper shows we only need to output the orange\n // bars. The output sequence should look like this | | | | | | | |.\n // Because the sequence is halved, to map the output index back to\n // the previous sequence to find the corresponding value, we need\n // to double the index. When we double the index, we basically\n // interpolate a position, so 2i looks like\n // | _ | _ | _ | _ | _ | _ | _. We move the | to the first k\n // position of each 2k positions by - elemIdx % k. E.g. for output\n // at index 4,5,6,7, we want to get the corresponding element at\n // original index 8,9,10,11, for output at index 8,9,10,11,\n // we want to get the corresponding element at original index\n // 16,17,18,19, so on and so forth.\n\n var i = 0;\n if (elemIdx < uniforms.k) {\n i = elemIdx;\n } else {\n i = elemIdx * 2 - elemIdx % uniforms.k;\n }\n var i0 = 0;\n if (uniforms.firstPass == 1) {\n i0 = i;\n } else {\n i0 = i32(getIndices(batch, i));\n }\n var i1 = 0;\n if (uniforms.firstPass == 1) {\n i1 = i + uniforms.k;\n } else {\n i1 = i32(getIndices(batch, i + uniforms.k));\n }\n\n let x0 = getX(batch, i0);\n var x1 = f32(0.0);\n if (i1 < uniforms.inputSize) {\n x1 = getX(batch, i1);\n } else {\n x1 = x0;\n }\n\n if (x0 >= x1) {\n setOutputAtIndex(index, f32(i0));\n } else {\n setOutputAtIndex(index, f32(i1));\n }\n }\n }\n `;\n return userCode;\n }\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, NumericDataType, TensorInfo, TopK, TopKAttrs, TopKInputs, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {topKImplCPU} from '../kernel_utils/shared';\nimport {MergeProgram, SwapProgram} from '../top_k_webgpu';\nimport {fill} from './Fill';\nimport {gatherV2} from './GatherV2';\nimport {reshape} from './Reshape';\nimport {slice} from './Slice';\n\nfunction disposeIntermediateTensorInfoOrNull(\n backend: WebGPUBackend, tensorInfo: TensorInfo) {\n if (tensorInfo !== null) {\n backend.disposeData(tensorInfo.dataId);\n }\n}\n\nfunction roundUpToPow2(num: number) {\n let pow2 = 1;\n while (pow2 < num) {\n pow2 *= 2;\n }\n return pow2;\n}\n\n// Based on Algorithm 2 of Bitonic Top K, ref:\n// https://anilshanbhag.in/static/papers/gputopk_sigmod18.pdf\nexport function topK(\n args: {inputs: TopKInputs, backend: WebGPUBackend, attrs: TopKAttrs}):\n TensorInfo[] {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {k, sorted}= attrs;\n\n const xShape = x.shape;\n const lastDim = xShape[xShape.length - 1];\n\n if (backend.shouldExecuteOnCPU([x])) {\n const xVals = backend.readSync(x.dataId) as TypedArray;\n const [allTopKVals, allTopKIndices] =\n topKImplCPU(xVals, xShape, x.dtype as NumericDataType, k, sorted);\n\n return [\n backend.makeTensorInfo(\n allTopKVals.shape, allTopKVals.dtype, allTopKVals.values),\n backend.makeTensorInfo(\n allTopKIndices.shape, allTopKIndices.dtype, allTopKIndices.values)\n ];\n }\n\n if (k === 0) {\n xShape[xShape.length - 1] = 0;\n return [\n backend.makeTensorInfo(xShape, x.dtype, []),\n backend.makeTensorInfo(xShape, 'int32', [])\n ];\n }\n\n if (lastDim === 1 /* firstPass */) {\n return [\n x, fill({attrs: {shape: xShape, dtype: 'int32', value: 0}, backend})\n ];\n }\n\n // Reshape into a 2d tensor [batch, lastDim] and compute topk along lastDim.\n const xSize = util.sizeFromShape(xShape);\n const batch = xSize / lastDim;\n const x2D = reshape({inputs: {x}, attrs: {shape: [batch, lastDim]}, backend});\n\n const kPow2 = roundUpToPow2(k);\n const lastDimPow2 = roundUpToPow2(lastDim);\n\n // Only the indices containing the top K are kept at every step to reduce\n // number of outputs in the GPU algorithms, so once the final set of indices\n // is computed then gather is used to grab the corresponding values\n // from the original input.\n let indices: TensorInfo = null;\n\n // GPU algorithm always takes in an indices input but this input is not used\n // on the first run of a GPU algorithm, therefore if indices is null we simply\n // pass in x2D instead of it but the value will not actually be used\n const getInputs = () => indices === null ? [x2D, x2D] : [x2D, indices];\n\n const runSwap = (dir: number, inc: number, shape: number[]) => {\n const inputs = getInputs();\n const program = new SwapProgram(shape);\n const firstPass = indices === null ? 1 : 0;\n const uniformDataSwap = [\n {type: 'int32', data: [lastDim]},\n {type: 'int32', data: [firstPass]},\n {type: 'float32', data: [Number.NEGATIVE_INFINITY]},\n {type: 'int32', data: [dir]},\n {type: 'int32', data: [inc]}\n ];\n const prevIndices = indices;\n indices = backend.runWebGPUProgram(\n program, inputs, 'int32', uniformDataSwap);\n disposeIntermediateTensorInfoOrNull(backend, prevIndices);\n };\n\n // Step 1: local sort\n for (let len = 1; len < kPow2; len *= 2) {\n const dir = len * 2;\n for (let inc = len; inc >= 1; inc /= 2) {\n runSwap(dir, inc, [batch, lastDimPow2]);\n }\n }\n\n // Step 2: merge\n for (let indicesSize = lastDimPow2; indicesSize > kPow2; indicesSize /= 2) {\n const inputs = getInputs();\n const mergeProgram = new MergeProgram([batch, indicesSize / 2]);\n const firstPass = indices === null ? 1 : 0;\n const uniformDataMerge = [\n {type: 'int32', data: [lastDim]},\n {type: 'int32', data: [firstPass]},\n {type: 'int32', data: [kPow2]}\n ];\n const prevIndices = indices;\n indices = backend.runWebGPUProgram(\n mergeProgram, inputs, 'int32', uniformDataMerge);\n disposeIntermediateTensorInfoOrNull(backend, prevIndices);\n\n // Step 3: rebuild\n const len = kPow2 / 2;\n const dir = len * 2;\n for (let inc = len; inc >= 1; inc /= 2) {\n runSwap(dir, inc, indices.shape);\n }\n }\n\n // Keep only the requested top K results instead of kPow2\n let prevIndices = indices;\n indices = slice(\n {inputs: {x: indices}, backend, attrs: {begin: 0, size: [batch, k]}});\n disposeIntermediateTensorInfoOrNull(backend, prevIndices);\n\n // Gather values on last dimension\n let values = gatherV2(\n {inputs: {x: x2D, indices}, backend, attrs: {axis: 1, batchDims: 1}});\n disposeIntermediateTensorInfoOrNull(backend, x2D);\n\n // Reshape back to the original input shape, except that the last\n // dimension is k.\n const newShape = xShape.slice(0, -1);\n newShape.push(k);\n\n prevIndices = indices;\n indices = reshape({inputs: {x: indices}, attrs: {shape: newShape}, backend});\n disposeIntermediateTensorInfoOrNull(backend, prevIndices);\n\n const prevValues = values;\n values = reshape({inputs: {x: values}, attrs: {shape: newShape}, backend});\n disposeIntermediateTensorInfoOrNull(backend, prevValues);\n\n return [values, indices];\n}\n\nexport const topKConfig: KernelConfig = {\n kernelName: TopK,\n backendName: 'webgpu',\n kernelFunc: topK as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {getMainHeaderAndGlobalIndexString} from './shader_preprocessor';\nimport {WebGPUProgram} from './webgpu_program';\nimport {computeDispatch, flatDispatchLayout} from './webgpu_util';\n\nexport class TransformProgram implements WebGPUProgram {\n variableNames = ['Image', 'Transforms'];\n outputShape: number[];\n uniforms = 'interpolationModeId : i32, fillModeId : i32, fillValue : f32,';\n shaderKey: string;\n dispatchLayout: {x: number[]};\n dispatch: [number, number, number];\n workGroupSize: [number, number, number] = [64, 1, 1];\n size = true;\n\n constructor(outShape: [number, number, number, number]) {\n this.outputShape = outShape;\n this.dispatchLayout = flatDispatchLayout(this.outputShape);\n this.dispatch = computeDispatch(\n this.dispatchLayout, this.outputShape, this.workGroupSize);\n this.shaderKey = 'transform';\n }\n\n getUserCode(): string {\n const userCode = `\n fn mapCoord(outCoord : f32, len : f32) -> f32{\n var inCoord = outCoord;\n if(uniforms.fillModeId == 2) {\n if (inCoord < 0.0) {\n if (len <= 1.0) {\n inCoord = 0.0;\n } else {\n let sz2 = 2.0 * len;\n if (inCoord < sz2) {\n inCoord = sz2 * f32(i32(f32(-inCoord / sz2))) +\n inCoord;\n }\n if (inCoord < -len) {\n inCoord = inCoord + sz2;\n } else {\n inCoord = -inCoord - 1.0;\n }\n }\n } else if (inCoord > len - 1.0) {\n if (len <= 1.0) {\n inCoord = 0.0;\n } else {\n let sz2 = 2.0 * len;\n inCoord = inCoord - sz2 * f32(i32(f32(inCoord / sz2)));\n if (inCoord >= len) {\n inCoord = sz2 - inCoord - 1.0;\n }\n }\n }\n return clamp(inCoord, 0.0, len - 1.0);\n } else if (uniforms.fillModeId == 3) {\n if (inCoord < 0.0) {\n if (len <= 1.0) {\n inCoord = 0.0;\n } else {\n let sz = len - 1.0;\n inCoord = inCoord + len * (f32(i32(f32(-inCoord / sz))) + 1.0);\n }\n } else if (inCoord > len - 1.0) {\n if (len <= 1.0) {\n inCoord = 0.0;\n } else {\n let sz = len - 1.0;\n inCoord = inCoord - len * f32(i32(f32(inCoord / sz)));\n }\n }\n return clamp(inCoord, 0.0, len - 1.0);\n } else if (uniforms.fillModeId == 4) {\n return clamp(outCoord, 0.0, len - 1.0);\n }\n return outCoord;\n }\n fn readWithFillValue(batch : i32, coordY : i32, coordX : i32,\n channel : i32) -> f32 {\n var outputValue : f32;\n if (0 <= coordY && coordY < uniforms.imageShape[1] && 0 <= coordX && coordX < uniforms.imageShape[2]) {\n outputValue = getImage(batch, coordY, coordX, channel);\n } else {\n outputValue = uniforms.fillValue;\n }\n return outputValue;\n }\n\n ${getMainHeaderAndGlobalIndexString()}\n if (index < uniforms.size) {\n let coords = getCoordsFromIndex(index);\n var outputValue : f32;\n let batch = coords[0];\n let x = coords[2];\n let y = coords[1];\n let channel = coords[3];\n let xf = f32(x);\n let yf = f32(y);\n let a1 = getTransforms(batch, 0);\n let a2 = getTransforms(batch, 1);\n let a3 = getTransforms(batch, 2);\n let b1 = getTransforms(batch, 3);\n let b2 = getTransforms(batch, 4);\n let b3 = getTransforms(batch, 5);\n let c1 = getTransforms(batch, 6);\n let c2 = getTransforms(batch, 7);\n let projection = c1 * xf + c2 * yf + 1.0;\n if (projection == 0.0) {\n outputValue = uniforms.fillValue;\n } else {\n let inX = (a1 * xf + a2 * yf + a3) / projection;\n let inY = (b1 * xf + b2 * yf + b3) / projection;\n let mapX = mapCoord(inX, f32(uniforms.imageShape[2]));\n let mapY = mapCoord(inY, f32(uniforms.imageShape[1]));\n\n if (uniforms.interpolationModeId == 1) {\n let coordY = i32(round(mapY));\n let coordX = i32(round(mapX));\n outputValue = readWithFillValue(batch, coordY, coordX,\n channel);\n } else {\n let yFloor = floor(mapY);\n let xFloor = floor(mapX);\n let yCeil = yFloor + 1.0;\n let xCeil = xFloor + 1.0;\n let valueYFloor = (xCeil - mapX) *\n readWithFillValue(batch, i32(yFloor), i32(xFloor), channel) +\n (mapX - xFloor) *\n readWithFillValue(batch, i32(yFloor), i32(xCeil), channel);\n let valueYCeil = (xCeil - mapX) *\n readWithFillValue(batch, i32(yCeil), i32(xFloor), channel) +\n (mapX - xFloor) *\n readWithFillValue(batch, i32(yCeil), i32(xCeil), channel);\n outputValue = (yCeil - mapY) * valueYFloor +\n (mapY - yFloor) * valueYCeil;\n }\n }\n setOutputAtIndex(index, outputValue);\n }\n }\n `;\n return userCode;\n }\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, TensorInfo, Transform, TransformAttrs, TransformInputs} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\nimport {TransformProgram} from '../transform_webgpu';\n\nexport function transform(args: {\n inputs: TransformInputs,\n backend: WebGPUBackend,\n attrs: TransformAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {image, transforms} = inputs;\n const {interpolation, fillMode, fillValue, outputShape} = attrs;\n\n const [batch, imageHeight, imageWidth, numChannels] = image.shape;\n const [outHeight, outWidth] =\n outputShape != null ? outputShape : [imageHeight, imageWidth];\n const outShape =\n [batch, outHeight, outWidth,\n numChannels] as [number, number, number, number];\n\n const program = new TransformProgram(outShape);\n const interpolationModeId = interpolation === 'nearest' ? 1 : 2;\n let fillModeId: number;\n switch (fillMode) {\n case 'constant':\n fillModeId = 1;\n break;\n case 'reflect':\n fillModeId = 2;\n break;\n case 'wrap':\n fillModeId = 3;\n break;\n case 'nearest':\n fillModeId = 4;\n break;\n default:\n fillModeId = 1;\n break;\n }\n const uniformData = [\n {type: 'int32', data: [interpolationModeId]},\n {type: 'int32', data: [fillModeId]}, {type: 'float32', data: [fillValue]}\n ];\n return backend.runWebGPUProgram(\n program, [image, transforms], 'float32', uniformData);\n}\n\nexport const transformConfig: KernelConfig = {\n kernelName: Transform,\n backendName: 'webgpu',\n kernelFunc: transform as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, TensorInfo, Unpack, UnpackAttrs, UnpackInputs} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from '../backend_webgpu';\n\nimport {reshape} from './Reshape';\nimport {slice} from './Slice';\n\nexport function unpack(\n args:\n {inputs: UnpackInputs, backend: WebGPUBackend, attrs: UnpackAttrs}):\n TensorInfo[] {\n const {inputs, backend, attrs} = args;\n const {value} = inputs;\n let {axis} = attrs;\n\n if (axis < 0) {\n axis += value.shape.length;\n }\n\n const x = value;\n const xRank = x.shape.length;\n\n const num = value.shape[axis];\n const outShape: number[] = new Array(xRank - 1);\n let outIndex = 0;\n for (let i = 0; i < xRank; i++) {\n if (i !== axis) {\n outShape[outIndex++] = x.shape[i];\n }\n }\n\n const toDispose = [];\n\n const begin = new Array(xRank).fill(0);\n const size = x.shape.slice();\n size[axis] = 1;\n const res: TensorInfo[] = new Array(num);\n for (let i = 0; i < res.length; i++) {\n begin[axis] = i;\n const sliced = slice({inputs: {x}, backend, attrs: {begin, size}});\n const reshaped =\n reshape({inputs: {x: sliced}, backend, attrs: {shape: outShape}});\n res[i] = reshaped;\n\n toDispose.push(sliced);\n }\n\n toDispose.forEach(t => backend.disposeData(t.dataId));\n return res;\n}\n\nexport const unpackConfig: KernelConfig = {\n kernelName: Unpack,\n backendName: 'webgpu',\n kernelFunc: unpack as {} as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {KernelConfig, registerKernel} from '@tensorflow/tfjs-core';\n\nimport {_fusedMatMulConfig} from './kernels/_FusedMatMul';\nimport {absConfig} from './kernels/Abs';\nimport {addConfig} from './kernels/Add';\nimport {addNConfig} from './kernels/AddN';\nimport {argMaxConfig} from './kernels/ArgMax';\nimport {argMinConfig} from './kernels/ArgMin';\nimport {avgPoolConfig} from './kernels/AvgPool';\nimport {batchMatMulConfig} from './kernels/BatchMatMul';\nimport {batchToSpaceNDConfig} from './kernels/BatchToSpaceND';\nimport {castConfig} from './kernels/Cast';\nimport {ceilConfig} from './kernels/Ceil';\nimport {clipByValueConfig} from './kernels/ClipByValue';\nimport {complexConfig} from './kernels/Complex';\nimport {concatConfig} from './kernels/Concat';\nimport {conv2DConfig} from './kernels/Conv2D';\nimport {conv2DBackpropInputConfig} from './kernels/Conv2DBackpropInput';\nimport {cosConfig} from './kernels/Cos';\nimport {coshConfig} from './kernels/Cosh';\nimport {cropAndResizeConfig} from './kernels/CropAndResize';\nimport {cumprodConfig} from './kernels/Cumprod';\nimport {cumsumConfig} from './kernels/Cumsum';\nimport {depthToSpaceConfig} from './kernels/DepthToSpace';\nimport {depthwiseConv2dNativeConfig} from './kernels/DepthwiseConv2dNative';\nimport {einsumConfig} from './kernels/Einsum';\nimport {eluConfig} from './kernels/Elu';\nimport {equalConfig} from './kernels/Equal';\nimport {expConfig} from './kernels/Exp';\nimport {expandDimsConfig} from './kernels/ExpandDims';\nimport {expm1Config} from './kernels/Expm1';\nimport {fillConfig} from './kernels/Fill';\nimport {flipLeftRightConfig} from './kernels/FlipLeftRight';\nimport {floorConfig} from './kernels/Floor';\nimport {floorDivConfig} from './kernels/FloorDiv';\nimport {fromPixelsConfig} from './kernels/FromPixels';\nimport {fusedBatchNormConfig} from './kernels/FusedBatchNorm';\nimport {fusedConv2DConfig} from './kernels/FusedConv2D';\nimport {fusedDepthwiseConv2DConfig} from './kernels/FusedDepthwiseConv2D';\nimport {gatherNdConfig} from './kernels/GatherNd';\nimport {gatherV2Config} from './kernels/GatherV2';\nimport {greaterConfig} from './kernels/Greater';\nimport {greaterEqualConfig} from './kernels/GreaterEqual';\nimport {identityConfig} from './kernels/Identity';\nimport {imagConfig} from './kernels/Imag';\nimport {leakyReluConfig} from './kernels/LeakyRelu';\nimport {lessConfig} from './kernels/Less';\nimport {lessEqualConfig} from './kernels/LessEqual';\nimport {logConfig} from './kernels/Log';\nimport {logicalAndConfig} from './kernels/LogicalAnd';\nimport {logicalNotConfig} from './kernels/LogicalNot';\nimport {maxConfig} from './kernels/Max';\nimport {maximumConfig} from './kernels/Maximum';\nimport {maxPoolConfig} from './kernels/MaxPool';\nimport {meanConfig} from './kernels/Mean';\nimport {minConfig} from './kernels/Min';\nimport {minimumConfig} from './kernels/Minimum';\nimport {mirrorPadConfig} from './kernels/MirrorPad';\nimport {multiplyConfig} from './kernels/Multiply';\nimport {negConfig} from './kernels/Neg';\nimport {nonMaxSuppressionV3Config} from './kernels/NonMaxSuppressionV3';\nimport {nonMaxSuppressionV5Config} from './kernels/NonMaxSuppressionV5';\nimport {notEqualConfig} from './kernels/NotEqual';\nimport {onesLikeConfig} from './kernels/OnesLike';\nimport {packConfig} from './kernels/Pack';\nimport {padV2Config} from './kernels/PadV2';\nimport {powConfig} from './kernels/Pow';\nimport {preluConfig} from './kernels/Prelu';\nimport {prodConfig} from './kernels/Prod';\nimport {rangeConfig} from './kernels/Range';\nimport {realConfig} from './kernels/Real';\nimport {realDivConfig} from './kernels/RealDiv';\nimport {reluConfig} from './kernels/Relu';\nimport {relu6Config} from './kernels/Relu6';\nimport {reshapeConfig} from './kernels/Reshape';\nimport {resizeBilinearConfig} from './kernels/ResizeBilinear';\nimport {resizeNearestNeighborConfig} from './kernels/ResizeNearestNeighbor';\nimport {rotateWithOffsetConfig} from './kernels/RotateWithOffset';\nimport {rsqrtConfig} from './kernels/Rsqrt';\nimport {scatterNdConfig} from './kernels/ScatterNd';\nimport {selectConfig} from './kernels/Select';\nimport {sigmoidConfig} from './kernels/Sigmoid';\nimport {sinConfig} from './kernels/Sin';\nimport {sinhConfig} from './kernels/Sinh';\nimport {sliceConfig} from './kernels/Slice';\nimport {softmaxConfig} from './kernels/Softmax';\nimport {spaceToBatchNDConfig} from './kernels/SpaceToBatchND';\nimport {sparseToDenseConfig} from './kernels/SparseToDense';\nimport {splitVConfig} from './kernels/SplitV';\nimport {sqrtConfig} from './kernels/Sqrt';\nimport {squareConfig} from './kernels/Square';\nimport {squaredDifferenceConfig} from './kernels/SquaredDifference';\nimport {stridedSliceConfig} from './kernels/StridedSlice';\nimport {stringNGramsConfig} from './kernels/StringNGrams';\nimport {subConfig} from './kernels/Sub';\nimport {sumConfig} from './kernels/Sum';\nimport {tanhConfig} from './kernels/Tanh';\nimport {tileConfig} from './kernels/Tile';\nimport {topKConfig} from './kernels/TopK';\nimport {transformConfig} from './kernels/Transform';\nimport {transposeConfig} from './kernels/Transpose';\nimport {unpackConfig} from './kernels/Unpack';\nimport {zerosLikeConfig} from './kernels/ZerosLike';\n\n// List all kernel configs here\nconst kernelConfigs: KernelConfig[] = [\n _fusedMatMulConfig,\n absConfig,\n addConfig,\n addNConfig,\n argMaxConfig,\n argMinConfig,\n avgPoolConfig,\n batchMatMulConfig,\n batchToSpaceNDConfig,\n castConfig,\n ceilConfig,\n clipByValueConfig,\n complexConfig,\n concatConfig,\n conv2DConfig,\n conv2DBackpropInputConfig,\n cosConfig,\n coshConfig,\n cropAndResizeConfig,\n cumprodConfig,\n cumsumConfig,\n depthToSpaceConfig,\n depthwiseConv2dNativeConfig,\n einsumConfig,\n eluConfig,\n equalConfig,\n expConfig,\n expandDimsConfig,\n expm1Config,\n fillConfig,\n flipLeftRightConfig,\n fromPixelsConfig,\n floorConfig,\n floorDivConfig,\n fusedBatchNormConfig,\n fusedConv2DConfig,\n fusedDepthwiseConv2DConfig,\n gatherNdConfig,\n gatherV2Config,\n greaterConfig,\n greaterEqualConfig,\n identityConfig,\n imagConfig,\n leakyReluConfig,\n lessConfig,\n lessEqualConfig,\n logConfig,\n logicalAndConfig,\n logicalNotConfig,\n maxConfig,\n maximumConfig,\n maxPoolConfig,\n meanConfig,\n minConfig,\n minimumConfig,\n mirrorPadConfig,\n multiplyConfig,\n negConfig,\n nonMaxSuppressionV3Config,\n nonMaxSuppressionV5Config,\n notEqualConfig,\n onesLikeConfig,\n packConfig,\n padV2Config,\n powConfig,\n preluConfig,\n prodConfig,\n rangeConfig,\n realConfig,\n realDivConfig,\n reluConfig,\n relu6Config,\n reshapeConfig,\n resizeBilinearConfig,\n resizeNearestNeighborConfig,\n rotateWithOffsetConfig,\n rsqrtConfig,\n scatterNdConfig,\n selectConfig,\n sigmoidConfig,\n sinConfig,\n sinhConfig,\n sliceConfig,\n stridedSliceConfig,\n stringNGramsConfig,\n softmaxConfig,\n spaceToBatchNDConfig,\n sparseToDenseConfig,\n splitVConfig,\n sqrtConfig,\n squareConfig,\n squaredDifferenceConfig,\n subConfig,\n sumConfig,\n tanhConfig,\n tileConfig,\n topKConfig,\n transformConfig,\n transposeConfig,\n unpackConfig,\n zerosLikeConfig\n];\n\nfor (const kernelConfig of kernelConfigs) {\n registerKernel(kernelConfig);\n}\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nexport class BufferManager {\n private numUsedBuffers = 0;\n private numFreeBuffers = 0;\n private freeBuffers: Map = new Map();\n private usedBuffers: Map = new Map();\n\n public numBytesUsed = 0;\n public numBytesAllocated = 0;\n\n constructor(private device: GPUDevice) {}\n\n acquireUploadBuffer(byteSize: number, usage: GPUBufferUsageFlags) {\n return this.acquireBuffer(byteSize, usage, true);\n }\n\n acquireBuffer(\n byteSize: number, usage: GPUBufferUsageFlags, mappedAtCreation = false) {\n const key = getBufferKey(byteSize, usage);\n if (!this.freeBuffers.has(key)) {\n this.freeBuffers.set(key, []);\n }\n\n if (!this.usedBuffers.has(key)) {\n this.usedBuffers.set(key, []);\n }\n\n this.numBytesUsed += byteSize;\n this.numUsedBuffers++;\n\n if (this.freeBuffers.get(key).length > 0) {\n this.numFreeBuffers--;\n\n const newBuffer = this.freeBuffers.get(key).shift();\n this.usedBuffers.get(key).push(newBuffer);\n return newBuffer;\n }\n\n this.numBytesAllocated += byteSize;\n const newBuffer =\n this.device.createBuffer({mappedAtCreation, size: byteSize, usage});\n this.usedBuffers.get(key).push(newBuffer);\n\n return newBuffer;\n }\n\n releaseBuffer(\n buffer: GPUBuffer, byteSize: number, usage: GPUBufferUsageFlags) {\n if (this.freeBuffers.size === 0) {\n return;\n }\n\n const key = getBufferKey(byteSize, usage);\n if (!this.freeBuffers.has(key)) {\n this.freeBuffers.set(key, []);\n }\n\n this.freeBuffers.get(key).push(buffer);\n this.numFreeBuffers++;\n this.numUsedBuffers--;\n\n const bufferList = this.usedBuffers.get(key);\n const bufferIndex = bufferList.indexOf(buffer);\n if (bufferIndex < 0) {\n throw new Error(\n 'Cannot release a buffer that was never provided by this ' +\n 'buffer manager');\n }\n bufferList.splice(bufferIndex, 1);\n this.numBytesUsed -= byteSize;\n }\n\n releaseUploadBuffer(\n buffer: GPUBuffer, byteSize: number, usage: GPUBufferUsageFlags) {\n buffer.mapAsync(GPUMapMode.WRITE)\n .then(\n () => {\n this.releaseBuffer(buffer, byteSize, usage);\n },\n (err) => {\n // Do nothing;\n });\n }\n\n getNumUsedBuffers(): number {\n return this.numUsedBuffers;\n }\n\n getNumFreeBuffers(): number {\n return this.numFreeBuffers;\n }\n\n dispose() {\n this.freeBuffers.forEach((buffers, key) => {\n buffers.forEach(buff => {\n buff.destroy();\n });\n });\n\n this.usedBuffers.forEach((buffers, key) => {\n buffers.forEach(buff => {\n buff.destroy();\n });\n });\n\n this.freeBuffers = new Map();\n this.usedBuffers = new Map();\n this.numUsedBuffers = 0;\n this.numFreeBuffers = 0;\n this.numBytesUsed = 0;\n this.numBytesAllocated = 0;\n }\n}\n\nfunction getBufferKey(byteSize: number, usage: GPUBufferUsageFlags) {\n return `${byteSize}_${usage}`;\n}\n","/**\n * @license\n * Copyright 2022 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nexport class TextureManager {\n private numUsedTextures = 0;\n private numFreeTextures = 0;\n private freeTextures: Map = new Map();\n private usedTextures: Map = new Map();\n\n public numBytesUsed = 0;\n public numBytesAllocated = 0;\n\n constructor(private device: GPUDevice) {}\n\n acquireTexture(\n width: number, height: number, format: GPUTextureFormat,\n usage: GPUTextureUsageFlags) {\n const bytesPerElement = getBytesPerElement(format);\n const byteSize = width * height * bytesPerElement;\n const key = getTextureKey(width, height, format, usage);\n if (!this.freeTextures.has(key)) {\n this.freeTextures.set(key, []);\n }\n\n if (!this.usedTextures.has(key)) {\n this.usedTextures.set(key, []);\n }\n\n this.numBytesUsed += byteSize;\n this.numUsedTextures++;\n\n if (this.freeTextures.get(key).length > 0) {\n this.numFreeTextures--;\n\n const newTexture = this.freeTextures.get(key).shift();\n this.usedTextures.get(key).push(newTexture);\n return newTexture;\n }\n\n this.numBytesAllocated += byteSize;\n\n const newTexture = this.device.createTexture({\n size: [width, height],\n format,\n usage,\n });\n this.usedTextures.get(key).push(newTexture);\n\n return newTexture;\n }\n\n releaseTexture(\n texture: GPUTexture, width: number, height: number,\n format: GPUTextureFormat, usage: GPUTextureUsageFlags) {\n if (this.freeTextures.size === 0) {\n return;\n }\n\n const key = getTextureKey(width, height, format, usage);\n if (!this.freeTextures.has(key)) {\n this.freeTextures.set(key, []);\n }\n\n this.freeTextures.get(key).push(texture);\n this.numFreeTextures++;\n this.numUsedTextures--;\n\n const textureList = this.usedTextures.get(key);\n const textureIndex = textureList.indexOf(texture);\n if (textureIndex < 0) {\n throw new Error(\n 'Cannot release a texture that was never provided by this ' +\n 'texture manager');\n }\n textureList.splice(textureIndex, 1);\n const bytesPerElement = getBytesPerElement(format);\n const byteSize = width * height * bytesPerElement;\n this.numBytesUsed -= byteSize;\n }\n\n getNumUsedTextures(): number {\n return this.numUsedTextures;\n }\n\n getNumFreeTextures(): number {\n return this.numFreeTextures;\n }\n\n dispose() {\n this.freeTextures.forEach((textures, key) => {\n textures.forEach(texture => {\n texture.destroy();\n });\n });\n\n this.usedTextures.forEach((textures, key) => {\n textures.forEach(texture => {\n texture.destroy();\n });\n });\n\n this.freeTextures = new Map();\n this.usedTextures = new Map();\n this.numUsedTextures = 0;\n this.numFreeTextures = 0;\n this.numBytesUsed = 0;\n this.numBytesAllocated = 0;\n }\n}\n\nfunction getTextureKey(\n width: number, height: number, format: GPUTextureFormat,\n usage: GPUTextureUsageFlags) {\n return `${width}_${height}_${format}_${usage}`;\n}\n\nfunction getBytesPerElement(format: GPUTextureFormat) {\n if (format === 'rgba8unorm') {\n return 16;\n } else {\n throw new Error(`${format} is not supported!`);\n }\n}\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {DataType, Rank, ShapeMap, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport * as shader_preprocessor from './shader_preprocessor';\n\nexport interface WebGPUProgram {\n // The unique key to distinguish different shader source code.\n shaderKey: string;\n outputShape: number[];\n // dispatchLayout enumerates how tensor dimensions are distributed among\n // dispatch x,y,z dimensions.\n dispatchLayout: {x: number[], y?: number[], z?: number[]};\n // dispatch specifies geometry of thread groups - derived from dispatchLayout.\n dispatch: [number, number, number];\n variableNames: string[];\n uniforms?: string;\n // Size of register cache in one dimension (assumes square cache).\n // Each thread writes to workPerThread * workPerThread locations in the output\n // buffer.\n workPerThread?: number;\n // workGroupSize.x * workGroupSize.y * workGroupSize.z = the number of threads\n // in a thread group. Individual dimensions determines thread layout within\n // the group.\n workGroupSize: [number, number, number];\n isVec4?: boolean;\n // Whether to use output size for bounds checking.\n size?: boolean;\n // Whether to use atomic built-in functions.\n atomic?: boolean;\n getUserCode: () => string;\n}\n\nexport interface TensorData {\n dtype: DataType;\n}\n\nexport const makeBindGroup =\n (device: GPUDevice, bindGroupLayout: GPUBindGroupLayout,\n inputs: GPUBindingResource[], output: GPUBindingResource,\n uniforms?: GPUBindingResource) => {\n const bindings = [output, ...inputs];\n if (uniforms) {\n bindings.push(uniforms);\n }\n return device.createBindGroup({\n layout: bindGroupLayout,\n entries: bindings.map((b, i) => ({binding: i, resource: b})),\n });\n };\n\nexport const compileProgram =\n (device: GPUDevice, program: WebGPUProgram,\n pipelineLayout: GPUPipelineLayout,\n inputsData: shader_preprocessor.InputInfo[], output: TensorInfo,\n isFromPixel = false): GPUComputePipeline => {\n const outputData = {dtype: output.dtype, shape: output.shape};\n\n const source = shader_preprocessor.makeShader(\n inputsData, outputData, program, isFromPixel);\n const module = device.createShaderModule(\n {code: source, label: program.constructor.name});\n const pipeline = device.createComputePipeline({\n layout: pipelineLayout,\n compute: {module, entryPoint: 'main'},\n label: program.constructor.name\n });\n\n return pipeline;\n };\n\nexport function makeShaderKey(\n program: WebGPUProgram, shapes: Array, types: string[] = [],\n broadcastDimsKey = '', inputShapesEqualsOutShape = ''): string {\n const key = program.shaderKey + '_' +\n (program.workGroupSize ? program.workGroupSize.join(',') : '') +\n shapes.map(shape => shape.length).join(',') + types.join(',') +\n program.variableNames.join(',') + broadcastDimsKey +\n inputShapesEqualsOutShape;\n return key;\n}\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport './flags_webgpu';\n\nimport {backend_util, buffer, DataStorage, DataType, DataValues, engine, env, KernelBackend, Rank, RecursiveArray, ShapeMap, TensorBuffer, TensorInfo, TimingInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {BufferManager} from './buffer_manager';\nimport {TextureManager} from './texture_manager';\nimport * as webgpu_program from './webgpu_program';\nimport * as webgpu_util from './webgpu_util';\nimport {WebGPULayout} from './webgpu_util';\n\nexport interface WebGPUMemoryInfo extends backend_util.MemoryInfo {\n numBytesInGPU: number;\n numBytesAllocatedInGPU: number;\n unreliable: boolean;\n}\n\ntype BufferInfo = {\n byteSize: number,\n usage: GPUBufferUsageFlags,\n buffer?: GPUBuffer\n};\n\ntype TensorBufferInfo = {\n values: backend_util.BackendValues,\n dtype: DataType,\n bufferInfo: BufferInfo,\n refCount: number,\n // For complex numbers, the real and imaginary parts are stored as their own\n // individual tensors, with a parent joining the two with the\n // complexTensorInfos field.\n complexTensorInfos?: {real: TensorInfo, imag: TensorInfo}\n};\n\ntype TextureInfo = {\n width: number,\n height: number,\n format: GPUTextureFormat,\n usage: GPUTextureUsageFlags,\n texture?: GPUTexture\n};\n\ntype ExternalImage = HTMLCanvasElement|ImageBitmap|OffscreenCanvas;\n\ninterface DataId {}\n\nexport type WebGPUKernelInfo = {\n name: string; query: Promise;\n};\n\nexport type TimerNode = RecursiveArray|WebGPUKernelInfo;\n\nexport interface WebGPUTimingInfo extends TimingInfo {\n uploadWaitMs: number;\n downloadWaitMs: number;\n}\n\ntype ProgramUniform = Array<{type: string; data: number[]}>;\n\n// Empirically determined constant used to determine size threshold for handing\n// off execution to the CPU.\nconst CPU_HANDOFF_SIZE_THRESHOLD =\n env().getNumber('WEBGPU_CPU_HANDOFF_SIZE_THRESHOLD');\n\n// Reshape dispatch, not to exceed device limits.\nconst reshapeDispatch =\n (device: GPUDevice,\n program: webgpu_program.WebGPUProgram): [number, number, number] => {\n const MAX_COMPUTE_PER_DIMENSION_DISPATCH_SIZE =\n device.limits.maxComputeWorkgroupsPerDimension;\n const layout = program['dispatchLayout'];\n const dispatch = program['dispatch'];\n if (dispatch.every((d) => d <= MAX_COMPUTE_PER_DIMENSION_DISPATCH_SIZE)) {\n return dispatch;\n }\n\n util.assert(\n dispatch[0] > MAX_COMPUTE_PER_DIMENSION_DISPATCH_SIZE &&\n layout.y === undefined && layout.z === undefined,\n () => 'Dispatch size exceeds WebGPU limits in Y or Z dimension.');\n\n let dispatchAverage = Math.ceil(Math.sqrt(dispatch[0]));\n if (dispatchAverage > MAX_COMPUTE_PER_DIMENSION_DISPATCH_SIZE) {\n dispatchAverage = Math.ceil(Math.cbrt(dispatch[0]));\n util.assert(\n dispatchAverage <= MAX_COMPUTE_PER_DIMENSION_DISPATCH_SIZE,\n () => 'Total dispatch size exceeds WebGPU maximum.');\n return [dispatchAverage, dispatchAverage, dispatchAverage];\n } else {\n return [dispatchAverage, dispatchAverage, 1];\n }\n };\n\nexport class WebGPUBackend extends KernelBackend {\n device: GPUDevice;\n queue: GPUQueue;\n currentCommandEncoder: GPUCommandEncoder;\n currentComputePass: GPUComputePassEncoder;\n tensorMap: DataStorage;\n supportTimeQuery: boolean;\n dummyCanvas: HTMLCanvasElement;\n dummyContext: GPUCanvasContext;\n\n private static nextDataId = 0;\n private nextDataId(): number {\n return WebGPUBackend.nextDataId++;\n }\n private commandQueueOwnedIds = new WeakSet();\n private layoutCache: {[key: number]: WebGPULayout};\n private pipelineCache: {[key: string]: GPUComputePipeline};\n private bufferManager: BufferManager;\n private textureManager: TextureManager;\n\n private tensorDisposalQueue: DataId[] = [];\n private uniformDisposalQueue: BufferInfo[] = [];\n private stagingDisposalQueue: BufferInfo[] = [];\n private textureDisposalQueue: TextureInfo[] = [];\n\n private disposed = false;\n\n private programTimersStack: TimerNode[];\n private activeTimers: TimerNode[];\n private uploadWaitMs = 0;\n private downloadWaitMs = 0;\n private dispatchNumberInEncoder = 0;\n private querySet: GPUQuerySet;\n private fromPixelTextureLayout: WebGPULayout = null;\n private fromPixelImportTextureLayout: WebGPULayout = null;\n\n constructor(device: GPUDevice, supportTimeQuery = false) {\n super();\n if (!webgpu_util.isWebGPUSupported()) {\n throw new Error('WebGPU is not supported on this device');\n }\n this.layoutCache = {};\n this.pipelineCache = {};\n this.device = device;\n this.queue = device.queue;\n this.currentCommandEncoder = null;\n this.currentComputePass = null;\n this.supportTimeQuery = supportTimeQuery;\n\n this.bufferManager = new BufferManager(this.device);\n this.textureManager = new TextureManager(this.device);\n this.tensorMap = new DataStorage(this, engine());\n if (this.supportTimeQuery) {\n this.querySet = this.device.createQuerySet({\n type: 'timestamp',\n count: 2,\n });\n }\n\n // Profiling tools like PIX needs this dummy canvas to\n // trigger capturing a frame.\n if (env().getBool('WEBGPU_USE_PROFILE_TOOL')) {\n this.dummyCanvas = document.createElement('canvas');\n this.dummyCanvas.width = 1;\n this.dummyCanvas.height = 1;\n\n this.dummyContext = this.dummyCanvas.getContext('webgpu');\n this.dummyContext.configure({\n device,\n format: 'bgra8unorm',\n });\n\n document.body.appendChild(this.dummyCanvas);\n }\n }\n\n floatPrecision(): 32 {\n return 32;\n }\n\n defaultGpuBufferUsage(): number {\n return GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC |\n GPUBufferUsage.COPY_DST;\n }\n\n flushDisposalQueue() {\n this.tensorDisposalQueue.forEach(d => {\n this.maybeReleaseBuffer(d);\n this.tensorMap.delete(d);\n });\n this.uniformDisposalQueue.forEach(\n d => this.bufferManager.releaseBuffer(d.buffer, d.byteSize, d.usage));\n this.stagingDisposalQueue.forEach(\n d => this.bufferManager.releaseUploadBuffer(\n d.buffer, d.byteSize, d.usage));\n this.textureDisposalQueue.forEach(\n d => this.textureManager.releaseTexture(\n d.texture, d.width, d.height, d.format, d.usage));\n\n this.tensorDisposalQueue = [];\n this.uniformDisposalQueue = [];\n this.stagingDisposalQueue = [];\n\n this.textureDisposalQueue = [];\n }\n\n /**\n * Dispose the memory if the dataId has 0 refCount. Return true if the memory\n * is released or memory is not managed in this backend, false if memory is\n * not cleared.\n * @param dataId\n * @oaram force Optional, remove the data regardless of refCount\n */\n disposeData(dataId: DataId, force = false): boolean {\n if (this.tensorMap.has(dataId)) {\n const data = this.tensorMap.get(dataId);\n data.refCount--;\n if (!force && data.refCount > 0) {\n return false;\n }\n\n if (this.commandQueueOwnedIds.has(dataId)) {\n this.tensorDisposalQueue.push(dataId);\n return false;\n } else {\n this.maybeReleaseBuffer(dataId);\n }\n\n const {complexTensorInfos} = this.tensorMap.get(dataId);\n if (complexTensorInfos != null) {\n this.disposeData(complexTensorInfos.real.dataId, true);\n this.disposeData(complexTensorInfos.imag.dataId, true);\n }\n\n this.tensorMap.delete(dataId);\n }\n return true;\n }\n\n memory(): WebGPUMemoryInfo {\n return {\n numBytesInGPU: this.bufferManager.numBytesUsed,\n numBytesAllocatedInGPU: this.bufferManager.numBytesAllocated,\n unreliable: false\n } as WebGPUMemoryInfo;\n }\n\n getBufferManager(): BufferManager {\n return this.bufferManager;\n }\n\n getTextureManager(): TextureManager {\n return this.textureManager;\n }\n\n acquireBuffer(\n byteSize: number,\n usage: GPUBufferUsageFlags = this.defaultGpuBufferUsage()) {\n return this.bufferManager.acquireBuffer(byteSize, usage);\n }\n\n maybeReleaseBuffer(dataId: DataId) {\n const info = this.tensorMap.get(dataId);\n if (info != null && info.bufferInfo.buffer != null) {\n this.bufferManager.releaseBuffer(\n info.bufferInfo.buffer, info.bufferInfo.byteSize,\n info.bufferInfo.usage);\n info.bufferInfo.buffer = null;\n }\n }\n\n /** Return refCount of a `TensorData`. */\n refCount(dataId: DataId): number {\n if (this.tensorMap.has(dataId)) {\n const tensorData = this.tensorMap.get(dataId);\n return tensorData.refCount;\n }\n return 0;\n }\n\n /** Increase refCount of a `TensorData`. */\n incRef(dataId: DataId): void {\n const tensorData = this.tensorMap.get(dataId);\n tensorData.refCount++;\n }\n\n /** Decrease refCount of a `TensorData`. */\n decRef(dataId: DataId): void {\n if (this.tensorMap.has(dataId)) {\n const tensorData = this.tensorMap.get(dataId);\n tensorData.refCount--;\n }\n }\n\n write(values: backend_util.BackendValues, shape: number[], dtype: DataType):\n DataId {\n if (dtype === 'complex64' && values != null) {\n throw new Error(\n `Cannot write to a complex64 dtype. ` +\n `Please use tf.complex(real, imag).`);\n }\n\n const dataId = {id: this.nextDataId()};\n const byteSize =\n util.sizeFromShape(shape) * webgpu_util.GPUBytesPerElement(dtype);\n\n this.tensorMap.set(dataId, {\n dtype,\n values,\n bufferInfo: {byteSize, usage: this.defaultGpuBufferUsage()},\n refCount: 1\n });\n return dataId;\n }\n\n move(\n dataId: DataId, values: backend_util.BackendValues, shape: number[],\n dtype: DataType, refCount: number): void {\n if (dtype === 'complex64') {\n throw new Error(\n `Cannot write to a complex64 dtype. ` +\n `Please use tf.complex(real, imag).`);\n }\n const byteSize =\n util.sizeFromShape(shape) * webgpu_util.GPUBytesPerElement(dtype);\n\n this.tensorMap.set(dataId, {\n dtype,\n values,\n bufferInfo: {byteSize, usage: this.defaultGpuBufferUsage()},\n refCount\n });\n }\n\n submitQueue() {\n this.ensureComputePassEnded();\n this.queue.submit([this.currentCommandEncoder.finish()]);\n this.currentCommandEncoder = null;\n this.dispatchNumberInEncoder = 0;\n\n this.commandQueueOwnedIds = new WeakSet();\n\n this.flushDisposalQueue();\n }\n\n getBuffer(dataId: DataId) {\n this.uploadToGPU(dataId);\n return this.tensorMap.get(dataId).bufferInfo.buffer;\n }\n\n ensureCommandEncoderReady() {\n if (!this.currentCommandEncoder) {\n this.currentCommandEncoder = this.device.createCommandEncoder();\n }\n }\n\n ensureComputePassEnded() {\n if (this.currentComputePass) {\n this.currentComputePass.end();\n this.currentComputePass = null;\n }\n }\n\n getComputePass() {\n if (!this.currentComputePass) {\n this.currentComputePass = this.currentCommandEncoder.beginComputePass();\n }\n return this.currentComputePass;\n }\n\n private async getBufferData(info: TensorBufferInfo):\n Promise {\n if (info.values != null) {\n // Data is on the CPU.\n return info.values;\n }\n const staging = this.acquireBuffer(\n info.bufferInfo.byteSize,\n GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ);\n this.ensureCommandEncoderReady();\n this.ensureComputePassEnded();\n this.currentCommandEncoder.copyBufferToBuffer(\n info.bufferInfo.buffer, 0, staging, 0, info.bufferInfo.byteSize);\n this.submitQueue();\n\n await staging.mapAsync(GPUMapMode.READ);\n const values = staging.getMappedRange().slice(0);\n\n staging.unmap();\n if (staging != null) {\n this.bufferManager.releaseBuffer(\n staging, info.bufferInfo.byteSize,\n GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ);\n }\n\n // Need to get texture from swapChain to enable profiling tool\n // to capture a frame\n if (env().getBool('WEBGPU_USE_PROFILE_TOOL')) {\n util.assert(\n this.dummyContext !== undefined,\n () => `Fail to get context for profiling tool`);\n this.dummyContext.getCurrentTexture();\n }\n\n return values as backend_util.BackendValues;\n }\n\n private convertAndCacheOnCPU(dataId: DataId, data: backend_util.TypedArray):\n backend_util.TypedArray {\n const info = this.tensorMap.get(dataId);\n\n this.maybeReleaseBuffer(dataId);\n\n info.values = data;\n return info.values;\n }\n\n // TODO: Remove once this is fixed:\n // https://github.com/tensorflow/tfjs/issues/1595\n readSync(dataId: object): backend_util.BackendValues {\n const texData = this.tensorMap.get(dataId);\n const {values} = texData;\n\n if (values == null) {\n throw new Error(\n 'WebGPU readSync is only available for CPU-resident tensors.');\n }\n\n return values;\n }\n\n async read(dataId: object): Promise {\n if (!this.tensorMap.has(dataId)) {\n throw new Error(`Tensor ${dataId} was not registered!`);\n }\n const info = this.tensorMap.get(dataId);\n\n const {values} = info;\n\n if (values != null) {\n // TODO(xing.xu@intel.com): Merge backend_util.BackendValues and\n // backend_util.TypedArray.\n return this.convertAndCacheOnCPU(\n dataId, values as backend_util.TypedArray) as\n backend_util.BackendValues;\n }\n\n // Download the values from the GPU.\n let vals: backend_util.BackendValues;\n if (info.dtype === 'complex64') {\n const ps = await Promise.all([\n this.read(info.complexTensorInfos.real.dataId),\n this.read(info.complexTensorInfos.imag.dataId)\n ]);\n\n const realValues = ps[0];\n const imagValues = ps[1];\n vals = backend_util.mergeRealAndImagArrays(\n realValues as Float32Array, imagValues as Float32Array);\n } else {\n const data = await this.getBufferData(info);\n vals =\n webgpu_util.ArrayBufferToTypedArray(data as ArrayBuffer, info.dtype);\n }\n this.convertAndCacheOnCPU(dataId, vals);\n return vals;\n }\n\n bufferSync(t: TensorInfo): TensorBuffer {\n const data = this.readSync(t.dataId);\n let decodedData = data as DataValues;\n if (t.dtype === 'string') {\n try {\n // Decode the bytes into string.\n decodedData = (data as Uint8Array[]).map(d => util.decodeString(d));\n } catch {\n throw new Error('Failed to decode encoded string bytes into utf-8');\n }\n }\n return buffer(t.shape as ShapeMap[R], t.dtype, decodedData) as\n TensorBuffer;\n }\n\n async time(f: () => void): Promise {\n const oldActiveTimers = this.activeTimers;\n const newActiveTimers: TimerNode[] = [];\n\n let outerMostTime = false;\n if (this.programTimersStack == null) {\n this.programTimersStack = newActiveTimers;\n outerMostTime = true;\n } else {\n this.activeTimers.push(newActiveTimers);\n }\n this.activeTimers = newActiveTimers;\n\n f();\n\n const flattenedActiveTimerQueries =\n util.flatten(this.activeTimers.map((d: WebGPUKernelInfo) => d.query))\n .filter(d => d != null);\n const flattenedActiveTimerNames =\n util.flatten(this.activeTimers.map((d: WebGPUKernelInfo) => d.name))\n .filter(d => d != null);\n\n this.activeTimers = oldActiveTimers;\n\n if (outerMostTime) {\n this.programTimersStack = null;\n }\n const res: WebGPUTimingInfo = {\n uploadWaitMs: this.uploadWaitMs,\n downloadWaitMs: this.downloadWaitMs,\n kernelMs: null,\n wallMs: null\n };\n\n const kernelMs = await Promise.all(flattenedActiveTimerQueries);\n res['kernelMs'] = util.sum(kernelMs);\n res['getExtraProfileInfo'] = () =>\n kernelMs.map((d, i) => ({name: flattenedActiveTimerNames[i], ms: d}))\n .map(d => `${d.name}: ${d.ms}`)\n .join(', ');\n this.uploadWaitMs = 0;\n this.downloadWaitMs = 0;\n return res;\n }\n\n getAndSavePipeline(key: string, getPipeline: () => GPUComputePipeline) {\n if (!(key in this.pipelineCache)) {\n this.pipelineCache[key] = getPipeline();\n }\n return this.pipelineCache[key];\n }\n\n makeTensorInfo(\n shape: number[], dtype: DataType,\n values?: backend_util.BackendValues|string[]): TensorInfo {\n let dataId;\n if (dtype === 'string' && values != null && values.length > 0 &&\n util.isString(values[0])) {\n const encodedValues =\n (values as {} as string[]).map(d => util.encodeString(d));\n\n dataId = this.write(encodedValues, shape, dtype);\n } else {\n dataId = this.write(values as backend_util.BackendValues, shape, dtype);\n }\n return {dataId, shape, dtype};\n }\n\n private tensorToBinding(tensor?: TensorInfo): GPUBindingResource {\n if (!tensor) {\n return null;\n }\n\n const tensorData = this.tensorMap.get(tensor.dataId);\n\n return {\n offset: 0,\n size: tensorData.bufferInfo.byteSize,\n buffer: tensorData.bufferInfo.buffer\n };\n }\n\n async getQueryTime(query: GPUQuerySet): Promise {\n if (this.supportTimeQuery) {\n return this.getTimeFromQuerySet(query);\n } else {\n return 0;\n }\n }\n\n uploadToGPU(dataId: DataId): void {\n const info = this.tensorMap.get(dataId);\n\n if (info.bufferInfo.buffer != null) {\n // Already on the GPU.\n return;\n }\n\n info.bufferInfo.buffer = this.acquireBuffer(info.bufferInfo.byteSize);\n if (info.values) {\n const stagingBuffer = this.bufferManager.acquireUploadBuffer(\n info.bufferInfo.byteSize,\n GPUBufferUsage.MAP_WRITE | GPUBufferUsage.COPY_SRC);\n const arrayBuffer = stagingBuffer.getMappedRange();\n if (info.dtype === 'int32' || info.dtype === 'bool') {\n new Int32Array(arrayBuffer).set(info.values as TypedArray);\n } else {\n new Float32Array(arrayBuffer).set(info.values as Float32Array);\n }\n stagingBuffer.unmap();\n this.ensureCommandEncoderReady();\n this.ensureComputePassEnded();\n this.currentCommandEncoder.copyBufferToBuffer(\n stagingBuffer, 0, info.bufferInfo.buffer, 0,\n info.bufferInfo.byteSize);\n\n const stagingInfo = {\n byteSize: info.bufferInfo.byteSize,\n usage: GPUBufferUsage.MAP_WRITE | GPUBufferUsage.COPY_SRC,\n buffer: stagingBuffer\n };\n this.stagingDisposalQueue.push(stagingInfo);\n // TODO: WebGPU doesn't support read data synchronously from GPU to CPU.\n // So it will report error when switching backend from WebGPU to others.\n // There are two situations: 1) swithcing the backend after running a\n // model; 2) swithcing the backend within the model. Temporarilly keep the\n // values on CPU to solve the first issue.\n // info.values = null;\n }\n }\n\n private makeUniforms(uniformsWithType: ProgramUniform): GPUBindingResource {\n let currentOffset = 0;\n let preLength = 0;\n const offsets: number[] = [];\n uniformsWithType.forEach((d) => {\n if (d.data.length === 0) {\n d.data = [1];\n }\n // https://www.w3.org/TR/WGSL/#alignof\n let baseAlignment: number;\n switch (d.data.length) {\n case 1:\n baseAlignment = 4;\n break;\n case 2:\n baseAlignment = 8;\n break;\n case 3:\n baseAlignment = 16;\n break;\n case 4:\n baseAlignment = 16;\n break;\n case 5:\n baseAlignment = 16;\n break;\n case 6:\n baseAlignment = 16;\n break;\n default:\n util.assert(false, () => `Unsupported ${d.data.length}D shape`);\n }\n\n if (preLength === 5 || preLength === 6) {\n baseAlignment = 16;\n }\n currentOffset = Math.ceil(currentOffset / baseAlignment) * baseAlignment;\n preLength = d.data.length;\n offsets.push(currentOffset);\n currentOffset += d.data.length * 4;\n });\n\n const arrayBuffer = new ArrayBuffer(currentOffset);\n uniformsWithType.forEach((d, i) => {\n const offset = offsets[i];\n if (d.type === 'int32') {\n new Int32Array(arrayBuffer, offset, d.data.length).set(d.data);\n } else if (d.type === 'uint32') {\n new Uint32Array(arrayBuffer, offset, d.data.length).set(d.data);\n } else {\n new Float32Array(arrayBuffer, offset, d.data.length).set(d.data);\n }\n });\n\n const uniformBuffer = this.acquireBuffer(\n currentOffset, GPUBufferUsage.COPY_DST | GPUBufferUsage.UNIFORM);\n this.queue.writeBuffer(uniformBuffer, 0, arrayBuffer, 0, currentOffset);\n\n const uniformInfo = {\n byteSize: currentOffset,\n usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.UNIFORM,\n buffer: uniformBuffer\n };\n this.uniformDisposalQueue.push(uniformInfo);\n\n return {offset: 0, size: currentOffset, buffer: uniformBuffer};\n }\n\n // This layout is used by all programs except fromPixel.\n private createLayout(inputEntrySize: number): WebGPULayout {\n const bindGroupLayoutEntries: GPUBindGroupLayoutEntry[] = [];\n // Output buffer binding layout.\n bindGroupLayoutEntries.push({\n binding: 0,\n visibility: GPUShaderStage.COMPUTE,\n buffer: {type: 'storage' as const}\n });\n // Input buffer binding layout. Depends on variableNames length.\n for (let i = 0; i < inputEntrySize; i++) {\n bindGroupLayoutEntries.push({\n binding: i + 1,\n visibility: GPUShaderStage.COMPUTE,\n buffer: {type: 'read-only-storage' as const}\n });\n }\n bindGroupLayoutEntries.push({\n binding: inputEntrySize + 1,\n visibility: GPUShaderStage.COMPUTE,\n buffer: {type: 'uniform' as const}\n });\n const bindGroupLayout =\n this.device.createBindGroupLayout({entries: bindGroupLayoutEntries});\n const pipelineLayout =\n this.device.createPipelineLayout({bindGroupLayouts: [bindGroupLayout]});\n return {bindGroupLayout, pipelineLayout};\n }\n\n private getCachedOrCreateLayout(inputEntrySize: number): WebGPULayout {\n if (!(inputEntrySize in this.layoutCache)) {\n this.layoutCache[inputEntrySize] = this.createLayout(inputEntrySize);\n }\n return this.layoutCache[inputEntrySize];\n }\n\n public runWebGPUProgram(\n program: webgpu_program.WebGPUProgram, inputs: TensorInfo[],\n outputDtype: DataType, programUniforms?: ProgramUniform,\n output?: TensorInfo): TensorInfo {\n if (!output) {\n output = this.makeTensorInfo(program.outputShape, outputDtype);\n if (util.sizeFromShape(output.shape) === 0) {\n // Short-circuit the computation since the result is empty (has 0 in its\n // shape).\n const outData = this.tensorMap.get(output.dataId);\n outData.values =\n util.getTypedArrayFromDType(output.dtype as 'float32', 0);\n return output;\n }\n this.uploadToGPU(output.dataId);\n }\n program.dispatch = reshapeDispatch(this.device, program);\n\n // There are five kinds of uniforms: NAN, shapes, shape strides, program\n // size, program defined uniforms.\n let uniformsWithType: Array<{type: string; data: number[];}> =\n [{type: 'float32', data: [NaN]}];\n const bufferShapes = inputs.concat(output).map(d => d.shape);\n const uniformsType = 'int32';\n bufferShapes.map(d => {\n uniformsWithType.push({type: uniformsType, data: d});\n });\n const strides = util.computeStrides(output.shape);\n uniformsWithType.push({type: uniformsType, data: strides});\n if (program.size) {\n const size = util.sizeFromShape(program.outputShape);\n uniformsWithType.push(\n {type: uniformsType, data: [program.isVec4 ? size / 4 : size]});\n }\n if (programUniforms) {\n uniformsWithType = [...uniformsWithType, ...programUniforms];\n }\n\n const uniforms = this.makeUniforms(uniformsWithType);\n\n const inputsData = inputs.map((input: TensorInfo, i: number) => {\n if (input.dtype === 'complex64') {\n throw new Error(\n `GPGPUProgram does not support complex64 input. For complex64 ` +\n `dtypes, please separate the program into real and imaginary ` +\n `parts.`);\n }\n this.uploadToGPU(input.dataId);\n\n return {\n // Returning dtype from tensorMap because it reflects dtype\n // of underlying buffer, rather than abstract dtype.\n dtype: this.tensorMap.get(input.dataId).dtype,\n shape: input.shape,\n name: program.variableNames[i]\n };\n });\n const bufferTypes = inputsData.map(d => d.dtype).concat(output.dtype);\n const broadcastDims = inputsData.map(\n d => backend_util.getBroadcastDims(d.shape, output.shape));\n const inputShapesEqualsOutShape =\n inputsData.map(d => util.arraysEqual(d.shape, output.shape)).join('_');\n const broadcastDimsKey = broadcastDims.map(d => d.join('_')).join(';');\n const key = webgpu_program.makeShaderKey(\n program, bufferShapes, bufferTypes, broadcastDimsKey,\n inputShapesEqualsOutShape);\n\n const {bindGroupLayout, pipelineLayout} =\n this.getCachedOrCreateLayout(program.variableNames.length);\n\n const pipeline = this.getAndSavePipeline(key, () => {\n return webgpu_program.compileProgram(\n this.device, program, pipelineLayout, inputsData, output);\n });\n\n const shouldTimeProgram = this.activeTimers != null;\n\n // Creating bind groups on the fly should never be a bottleneck.\n const bg = webgpu_program.makeBindGroup(\n this.device, bindGroupLayout, inputs.map(t => this.tensorToBinding(t)),\n this.tensorToBinding(output), uniforms);\n\n this.ensureCommandEncoderReady();\n const pass = this.getComputePass();\n if (shouldTimeProgram) {\n if (this.supportTimeQuery) {\n // tslint:disable-next-line:no-any\n (pass as any).writeTimestamp(this.querySet, 0);\n }\n }\n pass.setPipeline(pipeline);\n pass.setBindGroup(0, bg);\n pass.dispatch(\n program.dispatch[0], program.dispatch[1], program.dispatch[2]);\n if (shouldTimeProgram) {\n if (this.supportTimeQuery) {\n // tslint:disable-next-line:no-any\n (pass as any).writeTimestamp(this.querySet, 1);\n }\n }\n this.dispatchNumberInEncoder++;\n\n inputs.forEach(input => {\n this.commandQueueOwnedIds.add(input.dataId);\n });\n this.commandQueueOwnedIds.add(output.dataId);\n\n if (env().get('WEBGPU_DEFERRED_SUBMIT_BATCH_SIZE') as\n number <= this.dispatchNumberInEncoder) {\n this.submitQueue();\n }\n\n if (shouldTimeProgram) {\n this.activeTimers.push({\n name: program.constructor.name,\n query: this.getQueryTime(this.querySet)\n });\n }\n return output;\n }\n\n private getFromPixelTextureLayout(useImport: boolean): WebGPULayout {\n if (useImport) {\n if (this.fromPixelImportTextureLayout === null) {\n this.fromPixelImportTextureLayout =\n this.createFromPixelTextureLayout(true /* useImport */);\n }\n return this.fromPixelImportTextureLayout;\n }\n\n if (this.fromPixelTextureLayout === null) {\n this.fromPixelTextureLayout =\n this.createFromPixelTextureLayout(false /* useImport */);\n }\n return this.fromPixelTextureLayout;\n }\n\n private createFromPixelTextureLayout(useImport: boolean): WebGPULayout {\n const bindGroupLayoutEntries: GPUBindGroupLayoutEntry[] = [];\n // Output buffer binding layout.\n bindGroupLayoutEntries.push({\n binding: 0,\n visibility: GPUShaderStage.COMPUTE,\n buffer: {type: 'storage' as const}\n });\n // Input texture binding layout.\n if (useImport) {\n bindGroupLayoutEntries.push({\n binding: 1,\n visibility: GPUShaderStage.COMPUTE,\n externalTexture: {},\n });\n } else {\n bindGroupLayoutEntries.push(\n {binding: 1, visibility: GPUShaderStage.COMPUTE, texture: {}});\n }\n // Uniform buffer binding layout.\n bindGroupLayoutEntries.push(\n {binding: 2, visibility: GPUShaderStage.COMPUTE, buffer: {}});\n const fromPixelBindGroupLayout =\n this.device.createBindGroupLayout({entries: bindGroupLayoutEntries});\n const fromPixelPipelineLayout = this.device.createPipelineLayout(\n {bindGroupLayouts: [fromPixelBindGroupLayout]});\n return {\n bindGroupLayout: fromPixelBindGroupLayout,\n pipelineLayout: fromPixelPipelineLayout\n };\n }\n\n private copyExternalImageToTexture(\n externalImage: ExternalImage, outShape: number[]): GPUTextureView {\n const textureUsage = GPUTextureUsage.COPY_DST |\n GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING;\n const textureFormat = 'rgba8unorm' as GPUTextureFormat;\n const texture = this.textureManager.acquireTexture(\n outShape[1], outShape[0], textureFormat, textureUsage);\n const externalResource = texture.createView();\n\n this.queue.copyExternalImageToTexture(\n {source: externalImage}, {texture}, [outShape[1], outShape[0]]);\n\n const textureInfo = {\n width: outShape[1],\n height: outShape[0],\n format: textureFormat,\n usage: textureUsage,\n texture\n };\n this.textureDisposalQueue.push(textureInfo);\n return externalResource;\n }\n\n runFromPixelsProgram(\n program: webgpu_program.WebGPUProgram, outShape: number[],\n programUniforms: ProgramUniform, useImport: boolean,\n externalImage: ExternalImage|HTMLVideoElement) {\n program.dispatch = reshapeDispatch(this.device, program);\n\n const output = this.makeTensorInfo(outShape, 'int32');\n if (util.sizeFromShape(output.shape) === 0) {\n // Short-circuit the computation since the result is empty (has 0 in its\n // shape).\n const outData = this.tensorMap.get(output.dataId);\n outData.values =\n util.getTypedArrayFromDType(output.dtype as 'float32', 0);\n return output;\n }\n this.uploadToGPU(output.dataId);\n const key = webgpu_program.makeShaderKey(program, [output.shape]);\n\n const layout = this.getFromPixelTextureLayout(useImport);\n\n const pipeline = this.getAndSavePipeline(key, () => {\n return webgpu_program.compileProgram(\n this.device, program, layout.pipelineLayout, [], output, true);\n });\n\n let externalResource: GPUExternalTexture|GPUTextureView;\n if (useImport) {\n const externalTextureDescriptor = {\n source: externalImage as HTMLVideoElement\n };\n externalResource =\n this.device.importExternalTexture(externalTextureDescriptor);\n } else {\n externalResource = this.copyExternalImageToTexture(\n externalImage as ExternalImage, output.shape);\n }\n\n const binding = this.tensorToBinding(output);\n const uniforms = this.makeUniforms(programUniforms);\n const bindGroup = this.device.createBindGroup({\n layout: layout.bindGroupLayout,\n entries: [\n {\n binding: 0,\n resource: {\n // tslint:disable-next-line: no-unnecessary-type-assertion\n buffer: (binding as GPUBufferBinding).buffer,\n }\n },\n {\n binding: 1,\n resource: externalResource,\n },\n {\n binding: 2,\n resource: {\n // tslint:disable-next-line: no-unnecessary-type-assertion\n buffer: (uniforms as GPUBufferBinding).buffer,\n }\n }\n ],\n });\n this.ensureCommandEncoderReady();\n const pass = this.getComputePass();\n const shouldTimeProgram = this.activeTimers != null;\n if (shouldTimeProgram) {\n if (this.supportTimeQuery) {\n // tslint:disable-next-line:no-any\n (pass as any).writeTimestamp(this.querySet, 0);\n }\n }\n pass.setPipeline(pipeline);\n pass.setBindGroup(0, bindGroup);\n pass.dispatch(\n program.dispatch[0], program.dispatch[1], program.dispatch[2]);\n if (shouldTimeProgram) {\n if (this.supportTimeQuery) {\n // tslint:disable-next-line:no-any\n (pass as any).writeTimestamp(this.querySet, 1);\n }\n }\n this.commandQueueOwnedIds.add(output.dataId);\n this.dispatchNumberInEncoder++;\n if (env().get('WEBGPU_DEFERRED_SUBMIT_BATCH_SIZE') as\n number <= this.dispatchNumberInEncoder) {\n this.submitQueue();\n }\n\n if (shouldTimeProgram) {\n this.activeTimers.push({\n name: program.constructor.name,\n query: this.getQueryTime(this.querySet)\n });\n }\n return output;\n }\n\n async getTimeFromQuerySet(querySet: GPUQuerySet) {\n const queryBuffer = this.acquireBuffer(\n 16, GPUBufferUsage.COPY_SRC | GPUBufferUsage.QUERY_RESOLVE);\n const dst = this.acquireBuffer(\n 16, GPUBufferUsage.MAP_READ | GPUBufferUsage.COPY_DST);\n\n this.ensureCommandEncoderReady();\n this.ensureComputePassEnded();\n this.currentCommandEncoder.resolveQuerySet(querySet, 0, 2, queryBuffer, 0);\n this.currentCommandEncoder.copyBufferToBuffer(queryBuffer, 0, dst, 0, 16);\n this.submitQueue();\n await dst.mapAsync(GPUMapMode.READ);\n const arrayBuf = new BigUint64Array(dst.getMappedRange());\n const timeElapsedNanos = Number((arrayBuf[1] - arrayBuf[0]));\n dst.unmap();\n this.bufferManager.releaseBuffer(\n dst, 16, GPUBufferUsage.MAP_READ | GPUBufferUsage.COPY_DST);\n this.bufferManager.releaseBuffer(\n queryBuffer, 16,\n GPUBufferUsage.COPY_SRC | GPUBufferUsage.QUERY_RESOLVE);\n // Return milliseconds.\n return timeElapsedNanos / 1000000;\n }\n\n shouldExecuteOnCPU(\n inputs: TensorInfo[],\n sizeThreshold = CPU_HANDOFF_SIZE_THRESHOLD): boolean {\n return env().getBool('WEBGPU_CPU_FORWARD') &&\n inputs.every(\n input =>\n this.tensorMap.get(input.dataId).bufferInfo.buffer == null &&\n util.sizeFromShape(input.shape) < sizeThreshold);\n }\n\n numDataIds() {\n return this.tensorMap.numDataIds() - this.tensorDisposalQueue.length;\n }\n\n dispose() {\n if (this.disposed) {\n return;\n }\n this.bufferManager.dispose();\n this.textureManager.dispose();\n this.disposed = true;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport * as webgpu_util from './webgpu_util';\nexport {WebGPUBackend} from './backend_webgpu';\nexport {WebGPUProgram} from './webgpu_program';\nexport {webgpu_util};\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport './flags_webgpu';\nimport './register_all_kernels';\n\nimport {env, registerBackend} from '@tensorflow/tfjs-core';\n\nimport {WebGPUBackend} from './backend_webgpu';\nimport * as webgpu from './webgpu';\nimport {isWebGPUSupported} from './webgpu_util';\n\nif (isWebGPUSupported()) {\n registerBackend('webgpu', async () => {\n // Remove it once we figure out how to correctly read the tensor data\n // before the tensor is disposed in profiling mode.\n env().set('CHECK_COMPUTATION_FOR_ERRORS', false);\n\n const gpuDescriptor: GPURequestAdapterOptions = {\n powerPreference: env().get('WEBGPU_USE_LOW_POWER_GPU') ?\n 'low-power' :\n 'high-performance'\n };\n\n const adapter = await navigator.gpu.requestAdapter(gpuDescriptor);\n const adapterLimits = adapter.limits;\n const deviceDescriptor: GPUDeviceDescriptor = {};\n const supportTimeQuery = adapter.features.has('timestamp-query');\n deviceDescriptor.requiredLimits = {\n 'maxComputeWorkgroupStorageSize':\n adapterLimits.maxComputeWorkgroupStorageSize,\n 'maxComputeWorkgroupsPerDimension':\n adapterLimits.maxComputeWorkgroupsPerDimension,\n };\n\n if (supportTimeQuery) {\n deviceDescriptor.requiredFeatures = ['timestamp-query' as const];\n } else {\n console.warn(\n `This device doesn't support timestamp-query extension. ` +\n `Start Chrome browser with flag ` +\n `--disable-dawn-features=disallow_unsafe_apis then try again. ` +\n `Or zero will shown for the kernel time when profiling mode is` +\n `enabled. Using performance.now is not workable for webgpu since` +\n `it doesn't support synchronously to read data from GPU.`);\n }\n const device: GPUDevice = await adapter.requestDevice(deviceDescriptor);\n return new WebGPUBackend(device, supportTimeQuery);\n }, 3 /*priority*/);\n}\n\nexport {webgpu};\n"],"names":["env","util","backend_util","Reshape","broadcast_util","tslib_1.__values","_FusedMatMul","Identity","Complex","upcastType","concatImpl","buffer","slice_util","select","Abs","cpuAdd","Add","AddN","cpuTranspose","Transpose","ArgMax","ArgMin","AvgPool","BatchMatMul","getCoords","Slice","BatchToSpaceND","cpuNotEqual","NotEqual","Real","tf","Cast","Ceil","ClipByValue","Imag","Concat","Conv2D","Conv2DBackpropInput","Cos","Cosh","CropAndResize","Cumprod","Cumsum","DepthToSpace","DepthwiseConv2dNative","cpuMultiply","Multiply","sumOutType","Sum","Einsum","Elu","cpuEqual","Equal","Exp","ExpandDims","Expm1","Fill","FlipLeftRight","Floor","FloorDiv","FromPixels","FusedBatchNorm","FusedConv2D","FusedDepthwiseConv2D","GatherNd","getSourceCoords","GatherV2","cpuGreater","Greater","cpuGreaterEqual","GreaterEqual","LeakyRelu","cpuLess","Less","cpuLessEqual","LessEqual","Log","LogicalAnd","LogicalNot","Max","cpuMaximum","Maximum","MaxPool","Mean","Min","cpuMinimum","Minimum","MirrorPad","Neg","NonMaxSuppressionV3","NonMaxSuppressionV5","ZerosLike","OnesLike","Pack","PadV2","Pow","Prelu","Prod","Range","RealDiv","Relu","Relu6","ResizeBilinear","ResizeNearestNeighbor","RotateWithOffset","Rsqrt","ScatterNd","Select","Sigmoid","Sin","Sinh","cpuSub","Sub","Softmax","SpaceToBatchND","SparseToDense","SplitV","Sqrt","Square","SquaredDifference","StridedSlice","StringNGrams","Tanh","Tile","TopK","Transform","Unpack","registerKernel","shader_preprocessor.makeShader","tslib_1.__extends","webgpu_util.isWebGPUSupported","DataStorage","engine","webgpu_util.GPUBytesPerElement","webgpu_util.ArrayBufferToTypedArray","webgpu_program.makeShaderKey","webgpu_program.compileProgram","webgpu_program.makeBindGroup","KernelBackend","registerBackend"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;IAAA;;;;;;;;;;;;;;IAcA;IAEA,IAAI,aAAa,GAAG,UAAS,CAAC,EAAE,CAAC;QAC7B,aAAa,GAAG,MAAM,CAAC,cAAc;aAChC,EAAE,SAAS,EAAE,EAAE,EAAE,YAAY,KAAK,IAAI,UAAU,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,SAAS,GAAG,CAAC,CAAC,EAAE,CAAC;YAC5E,UAAU,CAAC,EAAE,CAAC,IAAI,KAAK,IAAI,CAAC,IAAI,CAAC;gBAAE,IAAI,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC;oBAAE,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC;QAC/E,OAAO,aAAa,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC;IAC/B,CAAC,CAAC;aAEc,SAAS,CAAC,CAAC,EAAE,CAAC;QAC1B,aAAa,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC;QACpB,SAAS,EAAE,KAAK,IAAI,CAAC,WAAW,GAAG,CAAC,CAAC,EAAE;QACvC,CAAC,CAAC,SAAS,GAAG,CAAC,KAAK,IAAI,GAAG,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,EAAE,CAAC,SAAS,GAAG,CAAC,CAAC,SAAS,EAAE,IAAI,EAAE,EAAE,CAAC,CAAC;IACzF,CAAC;aAwCe,SAAS,CAAC,OAAO,EAAE,UAAU,EAAE,CAAC,EAAE,SAAS;QACvD,SAAS,KAAK,CAAC,KAAK,IAAI,OAAO,KAAK,YAAY,CAAC,GAAG,KAAK,GAAG,IAAI,CAAC,CAAC,UAAU,OAAO,IAAI,OAAO,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE;QAC5G,OAAO,KAAK,CAAC,KAAK,CAAC,GAAG,OAAO,CAAC,EAAE,UAAU,OAAO,EAAE,MAAM;YACrD,SAAS,SAAS,CAAC,KAAK,IAAI,IAAI;gBAAE,IAAI,CAAC,SAAS,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC;aAAE;YAAC,OAAO,CAAC,EAAE;gBAAE,MAAM,CAAC,CAAC,CAAC,CAAC;aAAE,EAAE;YAC3F,SAAS,QAAQ,CAAC,KAAK,IAAI,IAAI;gBAAE,IAAI,CAAC,SAAS,CAAC,OAAO,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC;aAAE;YAAC,OAAO,CAAC,EAAE;gBAAE,MAAM,CAAC,CAAC,CAAC,CAAC;aAAE,EAAE;YAC9F,SAAS,IAAI,CAAC,MAAM,IAAI,MAAM,CAAC,IAAI,GAAG,OAAO,CAAC,MAAM,CAAC,KAAK,CAAC,GAAG,KAAK,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,IAAI,CAAC,SAAS,EAAE,QAAQ,CAAC,CAAC,EAAE;YAC9G,IAAI,CAAC,CAAC,SAAS,GAAG,SAAS,CAAC,KAAK,CAAC,OAAO,EAAE,UAAU,IAAI,EAAE,CAAC,EAAE,IAAI,EAAE,CAAC,CAAC;SACzE,CAAC,CAAC;IACP,CAAC;aAEe,WAAW,CAAC,OAAO,EAAE,IAAI;QACrC,IAAI,CAAC,GAAG,EAAE,KAAK,EAAE,CAAC,EAAE,IAAI,EAAE,cAAa,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC;gBAAE,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,EAAE,IAAI,EAAE,EAAE,EAAE,GAAG,EAAE,EAAE,EAAE,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC;QACjH,OAAO,CAAC,GAAG,EAAE,IAAI,EAAE,IAAI,CAAC,CAAC,CAAC,EAAE,OAAO,EAAE,IAAI,CAAC,CAAC,CAAC,EAAE,QAAQ,EAAE,IAAI,CAAC,CAAC,CAAC,EAAE,EAAE,OAAO,MAAM,KAAK,UAAU,KAAK,CAAC,CAAC,MAAM,CAAC,QAAQ,CAAC,GAAG,cAAa,OAAO,IAAI,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC;QACzJ,SAAS,IAAI,CAAC,CAAC,IAAI,OAAO,UAAU,CAAC,IAAI,OAAO,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE;QAClE,SAAS,IAAI,CAAC,EAAE;YACZ,IAAI,CAAC;gBAAE,MAAM,IAAI,SAAS,CAAC,iCAAiC,CAAC,CAAC;YAC9D,OAAO,CAAC;gBAAE,IAAI;oBACV,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,GAAG,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,GAAG,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,OAAO,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,GAAG,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,IAAI;wBAAE,OAAO,CAAC,CAAC;oBAC7J,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC;wBAAE,EAAE,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC;oBACxC,QAAQ,EAAE,CAAC,CAAC,CAAC;wBACT,KAAK,CAAC,CAAC;wBAAC,KAAK,CAAC;4BAAE,CAAC,GAAG,EAAE,CAAC;4BAAC,MAAM;wBAC9B,KAAK,CAAC;4BAAE,CAAC,CAAC,KAAK,EAAE,CAAC;4BAAC,OAAO,EAAE,KAAK,EAAE,EAAE,CAAC,CAAC,CAAC,EAAE,IAAI,EAAE,KAAK,EAAE,CAAC;wBACxD,KAAK,CAAC;4BAAE,CAAC,CAAC,KAAK,EAAE,CAAC;4BAAC,CAAC,GAAG,EAAE,CAAC,CAAC,CAAC,CAAC;4BAAC,EAAE,GAAG,CAAC,CAAC,CAAC,CAAC;4BAAC,SAAS;wBACjD,KAAK,CAAC;4BAAE,EAAE,GAAG,CAAC,CAAC,GAAG,CAAC,GAAG,EAAE,CAAC;4BAAC,CAAC,CAAC,IAAI,CAAC,GAAG,EAAE,CAAC;4BAAC,SAAS;wBACjD;4BACI,IAAI,EAAE,CAAC,GAAG,CAAC,CAAC,IAAI,EAAE,CAAC,GAAG,CAAC,CAAC,MAAM,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE;gCAAE,CAAC,GAAG,CAAC,CAAC;gCAAC,SAAS;6BAAE;4BAC5G,IAAI,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;gCAAE,CAAC,CAAC,KAAK,GAAG,EAAE,CAAC,CAAC,CAAC,CAAC;gCAAC,MAAM;6BAAE;4BACtF,IAAI,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;gCAAE,CAAC,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC;gCAAC,CAAC,GAAG,EAAE,CAAC;gCAAC,MAAM;6BAAE;4BACrE,IAAI,CAAC,IAAI,CAAC,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,EAAE;gCAAE,CAAC,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC;gCAAC,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC;gCAAC,MAAM;6BAAE;4BACnE,IAAI,CAAC,CAAC,CAAC,CAAC;gCAAE,CAAC,CAAC,GAAG,CAAC,GAAG,EAAE,CAAC;4BACtB,CAAC,CAAC,IAAI,CAAC,GAAG,EAAE,CAAC;4BAAC,SAAS;qBAC9B;oBACD,EAAE,GAAG,IAAI,CAAC,IAAI,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC;iBAC9B;gBAAC,OAAO,CAAC,EAAE;oBAAE,EAAE,GAAG,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC;oBAAC,CAAC,GAAG,CAAC,CAAC;iBAAE;wBAAS;oBAAE,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC;iBAAE;YAC1D,IAAI,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC;gBAAE,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC;YAAC,OAAO,EAAE,KAAK,EAAE,EAAE,CAAC,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,CAAC,GAAG,KAAK,CAAC,EAAE,IAAI,EAAE,IAAI,EAAE,CAAC;SACpF;IACL,CAAC;aAWe,QAAQ,CAAC,CAAC;QACtB,IAAI,CAAC,GAAG,OAAO,MAAM,KAAK,UAAU,IAAI,MAAM,CAAC,QAAQ,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC;QAC9E,IAAI,CAAC;YAAE,OAAO,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;QACxB,IAAI,CAAC,IAAI,OAAO,CAAC,CAAC,MAAM,KAAK,QAAQ;YAAE,OAAO;gBAC1C,IAAI,EAAE;oBACF,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,MAAM;wBAAE,CAAC,GAAG,KAAK,CAAC,CAAC;oBACnC,OAAO,EAAE,KAAK,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,IAAI,EAAE,CAAC,CAAC,EAAE,CAAC;iBAC3C;aACJ,CAAC;QACF,MAAM,IAAI,SAAS,CAAC,CAAC,GAAG,yBAAyB,GAAG,iCAAiC,CAAC,CAAC;IAC3F,CAAC;aAEe,MAAM,CAAC,CAAC,EAAE,CAAC;QACvB,IAAI,CAAC,GAAG,OAAO,MAAM,KAAK,UAAU,IAAI,CAAC,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC;QAC3D,IAAI,CAAC,CAAC;YAAE,OAAO,CAAC,CAAC;QACjB,IAAI,CAAC,GAAG,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,EAAE,GAAG,EAAE,EAAE,CAAC,CAAC;QACjC,IAAI;YACA,OAAO,CAAC,CAAC,KAAK,KAAK,CAAC,IAAI,CAAC,EAAE,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,IAAI,EAAE,EAAE,IAAI;gBAAE,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC;SAC9E;QACD,OAAO,KAAK,EAAE;YAAE,CAAC,GAAG,EAAE,KAAK,EAAE,KAAK,EAAE,CAAC;SAAE;gBAC/B;YACJ,IAAI;gBACA,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,KAAK,CAAC,GAAG,CAAC,CAAC,QAAQ,CAAC,CAAC;oBAAE,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;aACpD;oBACO;gBAAE,IAAI,CAAC;oBAAE,MAAM,CAAC,CAAC,KAAK,CAAC;aAAE;SACpC;QACD,OAAO,EAAE,CAAC;IACd,CAAC;aAEe,QAAQ;QACpB,KAAK,IAAI,EAAE,GAAG,EAAE,EAAE,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,SAAS,CAAC,MAAM,EAAE,CAAC,EAAE;YAC9C,EAAE,GAAG,EAAE,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;QACzC,OAAO,EAAE,CAAC;IACd;;ICnJA;;;;;;;;;;;;;;;;IAmBA,IAAM,GAAG,GAAGA,MAAG,EAAE,CAAC;IAElB;IACA,GAAG,CAAC,YAAY,CAAC,mCAAmC,EAAE,cAAM,OAAA,EAAE,GAAA,CAAC,CAAC;IAEhE;;;;IAIA,GAAG,CAAC,YAAY,CAAC,oBAAoB,EAAE,cAAM,OAAA,IAAI,GAAA,CAAC,CAAC;IAEnD;;;IAGA,GAAG,CAAC,YAAY,CAAC,+BAA+B,EAAE,cAAM,OAAA,CAAC,GAAA,CAAC,CAAC;IAE3D;;;;;IAKA,GAAG,CAAC,YAAY,CAAC,yBAAyB,EAAE,cAAM,OAAA,KAAK,GAAA,CAAC,CAAC;IAEzD;;;;IAIA,GAAG,CAAC,YAAY,CAAC,mCAAmC,EAAE,cAAM,OAAA,KAAK,GAAA,CAAC,CAAC;IAEnE;;;;IAIA,GAAG,CAAC,YAAY,CAAC,oCAAoC,EAAE,cAAM,OAAA,KAAK,GAAA,CAAC,CAAC;IAEpE;;;;IAIA,GAAG,CAAC,YAAY,CAAC,0BAA0B,EAAE,cAAM,OAAA,KAAK,GAAA,CAAC,CAAC;IAE1D;;;;;;IAMA,GAAG,CAAC,YAAY,CAAC,mCAAmC,EAAE,cAAM,OAAA,IAAI,GAAA,CAAC,CAAC;IAElE;;;;IAIA,GAAG,CAAC,YAAY,CAAC,yBAAyB,EAAE,cAAM,OAAA,KAAK,GAAA,CAAC,CAAC;IAEzD;;;IAGA,GAAG,CAAC,YAAY,CAAC,mBAAmB,EAAE,cAAM,OAAA,KAAK,GAAA,CAAC;;IC7ElD;;;;;;;;;;;;;;;;IAiBA,IAAY,YAoBX;IApBD,WAAY,YAAY;QACtB,6CAAG,CAAA;QACH,6CAAG,CAAA;QACH,6CAAG,CAAA;QACH,6CAAG,CAAA;QACH,iDAAK,CAAA;QACL,qDAAO,CAAA;QACP,iEAAa,CAAA;QACb,+CAAI,CAAA;QACJ,2DAAU,CAAA;QACV,6DAAW,CAAA;QACX,0DAAS,CAAA;QACT,4EAAkB,CAAA;QAClB,sDAAO,CAAA;QACP,8CAAG,CAAA;QACH,kDAAK,CAAA;QACL,8CAAG,CAAA;QACH,8CAAG,CAAA;QACH,kFAAqB,CAAA;QACrB,kFAAqB,CAAA;IACvB,CAAC,EApBW,YAAY,KAAZ,YAAY,QAoBvB;IAED,IAAM,GAAG,GAAG,eAAe,CAAC;IAC5B;IACA;IACA;IACA;IACA,IAAM,qBAAqB,GAAG,uCAAuC,CAAC;IACtE,IAAM,qBAAqB,GAAG,uCAAuC,CAAC;IACtE,IAAM,GAAG,GAAG,eAAe,CAAC;IAC5B,IAAM,GAAG,GAAG,eAAe,CAAC;IAC5B,IAAM,kBAAkB,GAAG,2BAA2B,CAAC;IACvD,IAAM,GAAG,GAAG,eAAe,CAAC;IAC5B,IAAM,KAAK,GAAG,qBAAqB,CAAC;IACpC,IAAM,UAAU,GAAG,2BAA2B,CAAC;IAC/C,IAAM,OAAO,GAAG,oBAAoB,CAAC;IACrC,IAAM,YAAY,GAAG,0BAA0B,CAAC;IAChD,IAAM,aAAa,GAAG,qBAAqB,CAAC;IAC5C,IAAM,kBAAkB,GAAG,2BAA2B,CAAC;IACvD,IAAM,IAAI,GAAG,oBAAoB,CAAC;IAClC,IAAM,SAAS,GAAG,0BAA0B,CAAC;IAC7C,IAAM,UAAU,GAAG,qBAAqB,CAAC;IACzC,IAAM,eAAe,GAAG,2BAA2B,CAAC;IACpD,IAAM,WAAW,GAAG,6CAA6C,CAAC;IAClE,IAAM,gBAAgB,GAAG,8EACU,CAAC;IACpC,IAAM,iBAAiB,GAAG,oEAGvB,CAAC;IACJ,IAAM,sBAAsB,GAAG,0OAa5B,CAAC;IACJ,IAAM,OAAO,GAAG,0HAKb,CAAC;IAEJ,IAAM,YAAY,GAAG,ijBAqBlB,CAAC;IAEJ,IAAM,SAAS,GAAG,qBAAqB,CAAC;IACxC,IAAM,cAAc,GAAG,2BAA2B,CAAC;IACnD,IAAM,GAAG,GAAG,yNAWT,CAAC;IACJ,IAAM,QAAQ,GAAG,ooBAqBb,sBAAsB,+BAEvB,CAAC;IAEJ,IAAM,KAAK,GAAG,2CAA2C,CAAC;IAC1D,IAAM,UAAU,GAAG,0IAGhB,CAAC;IAEJ,SAAS,eAAe,CAAC,EAAU,EAAE,OAAgB;QACnD,IAAM,eAAe,GAAG,OAAO,GAAG,sBAAsB,GAAG,iBAAiB,CAAC;QAC7E,OAAO,OAAO,GAAG,sCACc,EAAE,iEAE9B,GAAG,eAAe;YACb,8BAEP;YACgB,eAAe,IAAG,kBACxB,EAAE,gBACZ,CAAA,CAAC;IACJ,CAAC;aAEe,iBAAiB,CAC7B,IAAkB,EAAE,OAAiB;QACvC,QAAQ,IAAI;YACV,KAAK,YAAY,CAAC,GAAG;gBACnB,OAAO,GAAG,CAAC;YACb,KAAK,YAAY,CAAC,GAAG;gBACnB,OAAO,GAAG,CAAC;YACb,KAAK,YAAY,CAAC,GAAG;gBACnB,OAAO,GAAG,CAAC;YACb,KAAK,YAAY,CAAC,GAAG;gBACnB,OAAO,GAAG,CAAC;YACb,KAAK,YAAY,CAAC,KAAK;gBACrB,OAAO,OAAO,GAAG,UAAU,GAAG,KAAK,CAAC;YACtC,KAAK,YAAY,CAAC,OAAO;gBACvB,OAAO,OAAO,GAAG,YAAY,GAAG,OAAO,CAAC;YAC1C,KAAK,YAAY,CAAC,aAAa;gBAC7B,OAAO,OAAO,GAAG,kBAAkB,GAAG,aAAa,CAAC;YACtD,KAAK,YAAY,CAAC,IAAI;gBACpB,OAAO,OAAO,GAAG,SAAS,GAAG,IAAI,CAAC;YACpC,KAAK,YAAY,CAAC,UAAU;gBAC1B,OAAO,OAAO,GAAG,eAAe,GAAG,UAAU,CAAC;YAChD,KAAK,YAAY,CAAC,WAAW;gBAC3B,OAAO,OAAO,GAAG,gBAAgB,GAAG,WAAW,CAAC;YAClD,KAAK,YAAY,CAAC,SAAS;gBACzB,OAAO,OAAO,GAAG,cAAc,GAAG,SAAS,CAAC;YAC9C,KAAK,YAAY,CAAC,kBAAkB;gBAClC,OAAO,kBAAkB,CAAC;YAC5B,KAAK,YAAY,CAAC,OAAO;gBACvB,OAAO,OAAO,GAAG,YAAY,GAAG,OAAO,CAAC;YAC1C,KAAK,YAAY,CAAC,KAAK;gBACrB,OAAO,OAAO,GAAG,UAAU,GAAG,KAAK,CAAC;YACtC,KAAK,YAAY,CAAC,GAAG;gBACnB,OAAO,eAAe,CAAC,KAAK,EAAE,OAAO,CAAC,CAAC;YACzC,KAAK,YAAY,CAAC,GAAG;gBACnB,OAAO,eAAe,CAAC,KAAK,EAAE,OAAO,CAAC,CAAC;YACzC,KAAK,YAAY,CAAC,GAAG;gBACnB,OAAO,OAAO,GAAG,QAAQ,GAAG,GAAG,CAAC;YAClC,KAAK,YAAY,CAAC,qBAAqB;gBACrC,OAAO,qBAAqB,CAAC;YAC/B,KAAK,YAAY,CAAC,qBAAqB;gBACrC,OAAO,qBAAqB,CAAC;YAC/B;gBACE,MAAM,IAAI,KAAK,CAAC,gBAAc,IAAI,yBAAsB,CAAC,CAAC;SAC7D;IACH;;ICtNA;;;;;;;;;;;;;;;;IAiBA,IAAY,WAwBX;IAxBD,WAAY,WAAW;QACrB,2CAAG,CAAA;QACH,6CAAI,CAAA;QACJ,2CAAG,CAAA;QACH,6CAAI,CAAA;QACJ,2CAAG,CAAA;QACH,2CAAG,CAAA;QACH,+CAAK,CAAA;QACL,+CAAK,CAAA;QACL,iDAAM,CAAA;QACN,2CAAG,CAAA;QACH,4DAAW,CAAA;QACX,4CAAG,CAAA;QACH,8CAAI,CAAA;QACJ,gDAAK,CAAA;QACL,wDAAS,CAAA;QACT,gDAAK,CAAA;QACL,4CAAG,CAAA;QACH,8CAAI,CAAA;QACJ,oDAAO,CAAA;QACP,8CAAI,CAAA;QACJ,kDAAM,CAAA;QACN,8CAAI,CAAA;QACJ,kDAAM,CAAA;IACR,CAAC,EAxBW,WAAW,KAAX,WAAW,QAwBtB;IAED,IAAM,GAAG,GAAG,gBAAgB,CAAC;IAC7B,IAAM,IAAI,GAAG,iBAAiB,CAAC;IAC/B,IAAM,GAAG,GAAG,gBAAgB,CAAC;IAC7B,IAAM,IAAI,GAAG,6DAGZ,CAAC;IACF,IAAM,KAAK,GAAG,sBAAsB,CAAC;IACrC,IAAM,GAAG,GAAG,qDAAqD,CAAC;IAClE,IAAM,QAAQ,GAAG,uQAehB,CAAC;IACF,IAAM,GAAG,GAAG,gBAAgB,CAAC;IAC7B,IAAM,KAAK,GAAG,kBAAkB,CAAC;IACjC,IAAM,MAAM,GAAG,WAAW,CAAC;IAC3B,IAAM,GAAG,GAAG,oDACK,CAAC;IAClB,IAAM,WAAW,GAAG,0BAA0B,CAAC;IAC/C,IAAM,GAAG,GAAG,YAAY,CAAC;IACzB,IAAM,SAAS,GAAG,uDAAuD,CAAC;IAC1E,IAAM,cAAc,GAAG,qJAGtB,CAAC;IACF,IAAM,IAAI,GAAG,uCAAuC,CAAC;IACrD,IAAM,KAAK,GAAG,4BAA4B,CAAC;IAC3C,IAAM,UAAU,GACZ,gFAAgF,CAAC;IACrF,IAAM,SAAS,GAAG,qSAiBjB,CAAC;IACF,IAAM,KAAK,GAAG,qBAAqB,CAAC;IACpC,IAAM,OAAO,GAAG,qCAAqC,CAAC;IACtD,IAAM,GAAG,GAAG,gBAAgB,CAAC;IAC7B,IAAM,IAAI,GAAG,4DAGZ,CAAC;IACF,IAAM,IAAI,GAAG,iBAAiB,CAAC;IAC/B,IAAM,MAAM,GAAG,eAAe,CAAC;IAC/B,IAAM,IAAI,GAAG,oFAGZ,CAAC;IACF,IAAM,MAAM,GAAG,uBAAuB,CAAC;aAEvB,gBAAgB,CAAC,IAAiB,EAAE,OAAiB;QACnE,QAAQ,IAAI;YACV,KAAK,WAAW,CAAC,GAAG;gBAClB,OAAO,GAAG,CAAC;YACb,KAAK,WAAW,CAAC,GAAG;gBAClB,OAAO,GAAG,CAAC;YACb,KAAK,WAAW,CAAC,IAAI;gBACnB,OAAO,IAAI,CAAC;YACd,KAAK,WAAW,CAAC,IAAI;gBACnB,OAAO,IAAI,CAAC;YACd,KAAK,WAAW,CAAC,GAAG;gBAClB,OAAO,OAAO,GAAG,QAAQ,GAAG,GAAG,CAAC;YAClC,KAAK,WAAW,CAAC,GAAG;gBAClB,OAAO,GAAG,CAAC;YACb,KAAK,WAAW,CAAC,KAAK;gBACpB,OAAO,KAAK,CAAC;YACf,KAAK,WAAW,CAAC,KAAK;gBACpB,OAAO,KAAK,CAAC;YACf,KAAK,WAAW,CAAC,MAAM;gBACrB,OAAO,MAAM,CAAC;YAChB,KAAK,WAAW,CAAC,GAAG;gBAClB,OAAO,GAAG,CAAC;YACb,KAAK,WAAW,CAAC,WAAW;gBAC1B,OAAO,WAAW,CAAC;YACrB,KAAK,WAAW,CAAC,GAAG;gBAClB,OAAO,GAAG,CAAC;YACb,KAAK,WAAW,CAAC,SAAS;gBACxB,OAAO,OAAO,GAAG,cAAc,GAAG,SAAS,CAAC;YAC9C,KAAK,WAAW,CAAC,IAAI;gBACnB,OAAO,OAAO,GAAG,SAAS,GAAG,IAAI,CAAC;YACpC,KAAK,WAAW,CAAC,KAAK;gBACpB,OAAO,OAAO,GAAG,UAAU,GAAG,KAAK,CAAC;YACtC,KAAK,WAAW,CAAC,KAAK;gBACpB,OAAO,KAAK,CAAC;YACf,KAAK,WAAW,CAAC,OAAO;gBACtB,OAAO,OAAO,CAAC;YACjB,KAAK,WAAW,CAAC,GAAG;gBAClB,OAAO,GAAG,CAAC;YACb,KAAK,WAAW,CAAC,IAAI;gBACnB,OAAO,IAAI,CAAC;YACd,KAAK,WAAW,CAAC,IAAI;gBACnB,OAAO,IAAI,CAAC;YACd,KAAK,WAAW,CAAC,MAAM;gBACrB,OAAO,MAAM,CAAC;YAChB,KAAK,WAAW,CAAC,IAAI;gBACnB,OAAO,IAAI,CAAC;YACd,KAAK,WAAW,CAAC,MAAM;gBACrB,OAAO,MAAM,CAAC;YAEhB;gBACE,MAAM,IAAI,KAAK,CAAC,gBAAc,IAAI,yBAAsB,CAAC,CAAC;SAC7D;IACH;;ICzKA;;;;;;;;;;;;;;;;aAsBgB,4BAA4B,CACxC,UAAmC,EAAE,MAAc;QAAd,uBAAA,EAAA,cAAc;QACrD,IAAI,UAAU,KAAK,IAAI,EAAE;YACvB,OAAO,IAAI,CAAC;SACb;aAAM,IAAI,UAAU,KAAK,QAAQ,EAAE;YAClC,OAAO,gBAAgB,CAAC,WAAW,CAAC,MAAM,CAAC,CAAC;SAC7C;aAAM,IAAI,UAAU,KAAK,MAAM,EAAE;YAChC,OAAO,gBAAgB,CAAC,WAAW,CAAC,IAAI,EAAE,MAAM,CAAC,CAAC;SACnD;aAAM,IAAI,UAAU,KAAK,KAAK,EAAE;YAC/B,OAAO,gBAAgB,CAAC,WAAW,CAAC,GAAG,EAAE,MAAM,CAAC,CAAC;SAClD;aAAM,IAAI,UAAU,KAAK,OAAO,EAAE;YACjC,OAAO,gBAAgB,CAAC,WAAW,CAAC,KAAK,EAAE,MAAM,CAAC,CAAC;SACpD;aAAM,IAAI,UAAU,KAAK,OAAO,EAAE;YACjC,OAAO,iBAAiB,CAAC,YAAY,CAAC,KAAK,EAAE,MAAM,CAAC,CAAC;SACtD;aAAM,IAAI,UAAU,KAAK,SAAS,EAAE;YACnC,OAAO,gBAAgB,CAAC,WAAW,CAAC,OAAO,EAAE,MAAM,CAAC,CAAC;SACtD;aAAM,IAAI,UAAU,KAAK,WAAW,EAAE;YACrC,OAAO,gBAAgB,CAAC,WAAW,CAAC,SAAS,EAAE,MAAM,CAAC,CAAC;SACxD;QACD,MAAM,IAAI,KAAK,CAAC,gBACZ,UAAU,sDAAmD,CAAC,CAAC;IACrE;;IC3CA;;;;;;;;;;;;;;;;IAiBA;aACgB,0BAA0B,CACtC,UAAoB,EAAE,YAAoB;QAC5C,IAAI,IAAI,CAAC,GAAG,OAAR,IAAI,WAAQ,UAAU,KAAI,CAAC,EAAE;YAC/B,MAAM,IAAI,KAAK,CAAC,0DAA0D,CAAC,CAAC;SAC7E;QAED,IAAM,SAAS,GAAG,UAAU,CAAC,MAAM,CAAC;QACpC,IAAM,KAAK,GAAG,UAAU,CAAC,GAAG,CAAC,UAAA,CAAC,IAAI,OAAG,YAAY,SAAI,CAAC,MAAG,GAAA,CAAC,CAAC;QAC3D,IAAM,OAAO,GAAG,IAAI,KAAK,CAAC,SAAS,GAAG,CAAC,CAAC,CAAC;QACzC,OAAO,CAAC,SAAS,GAAG,CAAC,CAAC,GAAG,KAAK,CAAC,SAAS,GAAG,CAAC,CAAC,CAAC;QAC9C,KAAK,IAAI,CAAC,GAAG,SAAS,GAAG,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,EAAE,CAAC,EAAE;YACvC,OAAO,CAAC,CAAC,CAAC,GAAG,MAAI,OAAO,CAAC,CAAC,GAAG,CAAC,CAAC,WAAM,KAAK,CAAC,CAAC,GAAG,CAAC,CAAC,MAAG,CAAC;SACtD;QAED,OAAO,OAAO,CAAC;IACjB;;aCbgB,iBAAiB,CAAC,IAAY;QAC5C,IAAI,IAAI,IAAI,CAAC,EAAE;YACb,OAAO,KAAK,CAAC;SACd;aAAM,IAAI,IAAI,KAAK,CAAC,EAAE;YACrB,OAAO,WAAW,CAAC;SACpB;aAAM,IAAI,IAAI,KAAK,CAAC,EAAE;YACrB,OAAO,WAAW,CAAC;SACpB;aAAM,IAAI,IAAI,KAAK,CAAC,EAAE;YACrB,OAAO,WAAW,CAAC;SACpB;aAAM,IAAI,IAAI,KAAK,CAAC,EAAE;YACrB,OAAO,MAAM,CAAC;SACf;aAAM,IAAI,IAAI,KAAK,CAAC,EAAE;YACrB,OAAO,MAAM,CAAC;SACf;aAAM;YACL,MAAM,KAAK,CAAC,kBAAgB,IAAI,0BAAuB,CAAC,CAAC;SAC1D;IACH,CAAC;aAEe,YAAY,CAAC,KAAa;QACxC,IAAI,KAAK,KAAK,CAAC,EAAE;YACf,OAAO,GAAG,CAAC;SACZ;aAAM,IAAI,KAAK,KAAK,CAAC,EAAE;YACtB,OAAO,GAAG,CAAC;SACZ;aAAM,IAAI,KAAK,KAAK,CAAC,EAAE;YACtB,OAAO,GAAG,CAAC;SACZ;aAAM,IAAI,KAAK,KAAK,CAAC,EAAE;YACtB,OAAO,GAAG,CAAC;SACZ;aAAM,IAAI,KAAK,KAAK,CAAC,EAAE;YACtB,OAAO,GAAG,CAAC;SACZ;aAAM,IAAI,KAAK,KAAK,CAAC,EAAE;YACtB,OAAO,GAAG,CAAC;SACZ;aAAM;YACL,MAAM,KAAK,CAAC,WAAS,KAAK,0BAAuB,CAAC,CAAC;SACpD;IACH,CAAC;IAGD,SAAS,cAAc,CAAC,IAAc,EAAE,MAAe;QAErD,IAAI,IAAI,KAAK,SAAS,EAAE;YACtB,OAAO,MAAM,GAAG,WAAW,GAAG,KAAK,CAAC;SACrC;aAAM,IAAI,IAAI,KAAK,OAAO,EAAE;YAC3B,OAAO,MAAM,GAAG,WAAW,GAAG,KAAK,CAAC;SACrC;aAAM,IAAI,IAAI,KAAK,MAAM,EAAE;;;YAG1B,OAAO,MAAM,GAAG,WAAW,GAAG,KAAK,CAAC;SACrC;QAED,OAAO,IAAI,CAAC;IACd,CAAC;aAmBe,sBAAsB;QACpC,OAAO,uFAER,CAAC;IACF,CAAC;aAEe,mBAAmB;QACjC,OAAO,SACL,sBAAsB,EAAE,2RAO3B,CAAC;IACF,CAAC;aAEe,iCAAiC;QAC/C,OAAO,WACH,mBAAmB,EAAE,4CAE1B,CAAC;IACF,CAAC;aAEe,UAAU,CACtB,SAAsB,EAAE,UAA8C,EACtE,OAAsB,EAAE,WAAmB;QAAnB,4BAAA,EAAA,mBAAmB;QAC7C,IAAM,cAAc,GAAa,EAAE,CAAC;QACpC,cAAc,CAAC,IAAI,CAAC,gCACK,OAAO,CAAC,aAAa,CAAC,CAAC,CAAC,qCACxB,OAAO,CAAC,aAAa,CAAC,CAAC,CAAC,qCACxB,OAAO,CAAC,aAAa,CAAC,CAAC,CAAC,szBAsBhD,CAAC,CAAC;QAEH,IAAI,WAAW,KAAK,IAAI,EAAE;YACxB,cAAc,CAAC,IAAI,CAAC,qPAShB,cAAc,CAAC,UAAU,CAAC,KAAK,EAAE,OAAO,CAAC,MAAM,CAAC,0EAEnD,CAAC,CAAC;YACH,OAAO;gBACL,aAAa;gBACb,cAAc,CAAC,IAAI,CAAC,IAAI,CAAC;gBACzB,yBAAyB,CAAC,UAAU,CAAC,KAAK,CAAC;gBAC3C,OAAO,CAAC,WAAW,EAAE;aACtB,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;SACd;QAED,IAAI,iBAAiB,GAAG,KAAK,CAAC;QAC9B,IAAI,qBAAqB,GAAG,KAAK,CAAC;QAClC,IAAI,kBAAkB,GAAG,+BAA+B,CAAC;QACzD,OAAO,CAAC,aAAa,CAAC,OAAO,CAAC,UAAC,CAAC,EAAE,CAAC;YACjC,IAAM,WAAW,GAAG,iBAAiB,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC;YACjE,IAAI,WAAW,KAAK,MAAM,IAAI,WAAW,KAAK,MAAM,EAAE;gBACpD,qBAAqB,GAAG,IAAI,CAAC;aAC9B;YACD,IAAI,iBAAiB,IAAI,qBAAqB,EAAE;gBAC9C,kBAAkB,IAAI,aAAa,CAAC;aACrC;YACD,iBAAiB,GAAG,qBAAqB,CAAC;YAC1C,kBAAkB;gBACX,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,WAAW,EAAE,GAAG,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,gBAAW,WAAW,OAAI,CAAC;SACzE,CAAC,CAAC;QACH,IAAM,cAAc,GAAG,iBAAiB,CAAC,UAAU,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC;QAClE,qBAAqB;YACjB,cAAc,KAAK,MAAM,IAAI,cAAc,KAAK,MAAM,CAAC;QAC3D,IAAI,iBAAiB,IAAI,qBAAqB,EAAE;YAC9C,kBAAkB,IAAI,aAAa,CAAC;SACrC;QACD,iBAAiB,GAAG,qBAAqB,CAAC;QAC1C,kBAAkB,IAAI,gBAAc,cAAc,OAAI,CAAC;QACvD,IAAM,aAAa,GAAG,UAAU,CAAC,KAAK,CAAC,MAAM,GAAG,CAAC,CAAC;QAClD,IAAM,eAAe,GAAG,iBAAiB,CAAC,aAAa,CAAC,CAAC;QACzD,qBAAqB;YACjB,eAAe,KAAK,MAAM,IAAI,eAAe,KAAK,MAAM,CAAC;QAC7D,IAAI,iBAAiB,IAAI,qBAAqB,EAAE;YAC9C,kBAAkB,IAAI,aAAa,CAAC;SACrC;QACD,iBAAiB,GAAG,qBAAqB,CAAC;QAC1C,kBAAkB,IAAI,+BACE,eAAe,OAAI,CAAC;QAE5C,IAAI,OAAO,CAAC,IAAI,EAAE;YAChB,IAAI,iBAAiB,EAAE;gBACrB,kBAAkB,IAAI,aAAa,CAAC;aACrC;YACD,iBAAiB,GAAG,KAAK,CAAC;YAC1B,kBAAkB,IAAI,cAAc,CAAC;SACtC;QAED,IAAI,OAAO,CAAC,QAAQ,EAAE;YACpB,IAAI,iBAAiB,EAAE;gBACrB,kBAAkB,IAAI,aAAa,CAAC;aACrC;YACD,kBAAkB,IAAI,OAAO,CAAC,QAAQ,CAAC;SACxC;QACD,kBAAkB,IAAI,IAAI,CAAC;QAE3B,cAAc,CAAC,IAAI,CAAC,kBAAkB,CAAC,CAAC;;QAGxC,IAAI,OAAO,CAAC,MAAM,EAAE;YAClB,cAAc,CAAC,IAAI,CAAC,sFAErB,CAAC,CAAC;SACF;aAAM;YACL,cAAc,CAAC,IAAI,CAAC,mEAEhB,cAAc,CAAC,UAAU,CAAC,KAAK,EAAE,OAAO,CAAC,MAAM,CAAC,WACrD,CAAC,CAAC;SACF;QACD,OAAO,CAAC,aAAa,CAAC,OAAO,CAAC,UAAC,CAAC,EAAE,CAAC;YACjC,cAAc,CAAC,IAAI,CAAC,+BACC,CAAC,GAAG,CAAC,8BAAwB,CAAC,gBAC/C,cAAc,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,OAAO,CAAC,MAAM,CAAC,aACrD,CAAC,CAAC;SACJ,CAAC,CAAC;QAEH,IAAI,kBAAkB,KAAK,EAAE,EAAE;YAC7B,cAAc,CAAC,IAAI,CAAC,+BAEhB,CAAC,GAAG,OAAO,CAAC,aAAa,CAAC,MAAM,8CACnC,CAAC,CAAC;SACJ;QAEK,IAAA,gFAC8D,EAD7D,qBAAa,EAAE,0BAC8C,CAAC;QAErE,IAAM,OAAO,GAAG;YACd,aAAa,EAAE,cAAc,CAAC,IAAI,CAAC,IAAI,CAAC;YACxC,yBAAyB,CAAC,UAAU,CAAC,KAAK,CAAC,EAAE,aAAa;YAC1D,+BAA+B,CAAC,UAAU,CAAC,KAAK,CAAC,MAAM,CAAC;SACzD,CAAC;QACF,IAAI,CAAC,OAAO,CAAC,MAAM,EAAE;YACnB,OAAO,CAAC,IAAI,CACR,gBAAgB,CAAC,UAAU,CAAC,KAAK,EAAE,UAAU,CAAC,KAAK,EAAE,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC;SAC3E;QACD,IAAI,kBAAkB,KAAK,UAAU,CAAC,KAAK,CAAC,MAAM,EAAE;;;YAGlD,IAAM,YAAY,GAAG,SAAS;iBACJ,GAAG,CACA,UAAA,CAAC,IAAI,OAAA,eAAe,CAChB,CAAC,EAAE,UAAU,CAAC,KAAK,EAAE,OAAO,CAAC,MAAM,EACnC,OAAO,CAAC,cAAc,CAAC,CAAC,CAAC,MAAM;gBAC3B,UAAU,CAAC,KAAK,CAAC,MAAM,CAAC,GAAA,CAAC;iBACpC,IAAI,CAAC,IAAI,CAAC,CAAC;YACrC,OAAO,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;SAC5B;QAED,OAAO,CAAC,IAAI,CAAC,OAAO,CAAC,WAAW,EAAE,CAAC,CAAC;QACpC,IAAM,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;QAClC,OAAO,MAAM,CAAC;IAChB,CAAC;IAED,IAAM,aAAa,GAAG,qnFA0DrB,CAAC;IAEF,SAAS,+BAA+B,CAAC,OAAe;QACtD,IAAI,OAAO,GAAG,EAAE,CAAC;QACjB,QAAQ,OAAO;YACb,KAAK,CAAC,CAAC;YACP,KAAK,CAAC;gBACJ,OAAO,IAAI,6GAIR,CAAC;gBACJ,MAAM;YACR,KAAK,CAAC;gBACJ,OAAO,IAAI,gKAIR,CAAC;gBACJ,MAAM;YACR,KAAK,CAAC;gBACJ,OAAO,IAAI,8LAIR,CAAC;gBACJ,MAAM;YACR,KAAK,CAAC;gBACJ,OAAO,IAAI,wOAKR,CAAC;gBACJ,MAAM;YACR,KAAK,CAAC;gBACJ,OAAO,IAAI,4UAQR,CAAC;gBACJ,MAAM;YACR,KAAK,CAAC;gBACJ,OAAO,IAAI,mYASR,CAAC;gBACJ,MAAM;YACR;gBACEC,OAAI,CAAC,MAAM,CAAC,KAAK,EAAE,cAAM,OAAA,iBAAe,OAAO,YAAS,GAAA,CAAC,CAAC;gBAC1D,MAAM;SACT;QACD,OAAO,OAAO,CAAC;IACjB,CAAC;IAED,SAAS,gBAAgB,CACrB,QAAkB,EAAE,aAAuB,EAAE,MAAe;QAC9D,IAAM,OAAO,GAAG,QAAQ,CAAC,MAAM,CAAC;QAChC,IAAM,QAAQ,GAAG,cAAc,CAAC,aAAa,EAAE,MAAM,CAAC,CAAC;QACvD,IAAI,OAAO,CAAC;QACZ,IAAI,MAAM,EAAE;YACV,OAAO,GAAG,0FACc,QAAQ,qHAGR,QAAQ,oBAC9B,CAAC;SACJ;aAAM;YACL,OAAO,GAAG,oFACc,QAAQ,+GAGR,QAAQ,oBAC9B,CAAC;SACJ;QACD,IAAI,OAAO,IAAI,CAAC,EAAE;YAChB,IAAM,IAAI,GAAG,CAAC,IAAI,EAAE,IAAI,EAAE,IAAI,EAAE,IAAI,EAAE,IAAI,EAAE,IAAI,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC;YACpE,IAAM,IAAI,GAAG,iBAAiB,CAAC,OAAO,CAAC,CAAC;YAExC,IAAI,MAAM,EAAE;gBACV,OAAO,IAAI,kCAEP,IAAI,CAAC,GAAG,CAAC,UAAA,CAAC,IAAI,OAAG,CAAC,WAAQ,GAAA,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,iFACG,IAAI,SAAI,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,qGAIhE,IAAI,CAAC,GAAG,CAAC,UAAA,CAAC,IAAI,OAAG,CAAC,WAAQ,GAAA,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,iFACG,IAAI,SAAI,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,2EAGrE,CAAC;aACD;iBAAM;gBACL,OAAO,IAAI,kCAEP,IAAI,CAAC,GAAG,CAAC,UAAA,CAAC,IAAI,OAAG,CAAC,WAAQ,GAAA,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,2EACG,IAAI,SAAI,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,iGAIhE,IAAI,CAAC,GAAG,CAAC,UAAA,CAAC,IAAI,OAAG,CAAC,WAAQ,GAAA,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,2EACG,IAAI,SAAI,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,uEAGrE,CAAC;aACD;SACF;QAED,OAAO,OAAO,CAAC;IACjB,CAAC;IAED,SAAS,eAAe,CACpB,SAAoB,EAAE,QAAkB,EAAE,MAAe,EACzD,oBAA6B;QAC/B,IAAI,GAAG,GAAG,uBAAuB,CAAC,SAAS,EAAE,MAAM,CAAC,CAAC;QAErD,IAAM,OAAO,GAAG,SAAS,CAAC,KAAK,CAAC;QAChC,IAAI,OAAO,CAAC,MAAM,IAAI,QAAQ,CAAC,MAAM,EAAE;YACrC,GAAG,IAAI,uBAAuB,CAC1B,SAAS,EAAE,QAAQ,EAAE,MAAM,EAAE,oBAAoB,CAAC,CAAC;SACxD;QAED,OAAO,GAAG,CAAC;IACb,CAAC;IAED,SAAS,uBAAuB,CAC5B,SAAoB,EAAE,MAAe;QACvC,IAAM,OAAO,GAAG,SAAS,CAAC,IAAI,CAAC;QAC/B,IAAM,IAAI,GAAG,SAAS,CAAC,KAAK,CAAC,MAAM,CAAC;QACpC,IAAM,IAAI,GAAG,iBAAiB,CAAC,IAAI,CAAC,CAAC;QACrC,IAAM,QAAQ,GAAG,KAAK,GAAG,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,WAAW,EAAE,GAAG,OAAO,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC;QAC5E,IAAM,IAAI,GAAG,CAAC,IAAI,EAAE,IAAI,EAAE,IAAI,EAAE,IAAI,EAAE,IAAI,EAAE,IAAI,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC;QACjE,IAAM,MAAM,GAAG,IAAI,CAAC,GAAG,CAAC,UAAA,CAAC,IAAI,OAAG,CAAC,WAAQ,GAAA,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;QAEtD,IAAI,IAAI,GAAG,CAAC,EAAE;YACZ,IAAI,MAAM,EAAE;gBACV,OAAO,kBACA,QAAQ,sDACQ,OAAO,6BAE7B,CAAC;aACH;YAED,OAAO,gBACA,QAAQ,uCACE,OAAO,yBAEvB,CAAC;SACH;QAED,IAAM,QAAQ,GACV,eAAY,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,WAAW,EAAE,GAAG,OAAO,CAAC,KAAK,CAAC,CAAC,CAAC,WAAO,CAAC;QAC1E,IAAI,OAAO,GAAM,IAAI,MAAG,CAAC;QACzB,IAAI,IAAI,KAAK,CAAC,EAAE;YACd,OAAO,GAAG,IAAI,CAAC;SAChB;QAED,IAAI,MAAM,EAAE;YACV,OAAO,gBACA,QAAQ,SAAI,MAAM,mDACF,OAAO,2BAAsB,OAAO,SAAI,IAAI,SAC/D,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,sBACV,QAAQ,8BAEb,CAAC;SACL;QAED,OAAO,cACA,QAAQ,SAAI,MAAM,qCACR,OAAO,2BAAsB,OAAO,SAAI,IAAI,SACzD,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,oBACV,QAAQ,qBAEd,CAAC;IACL,CAAC;aAEe,uBAAuB,CACnC,SAAoB,EAAE,QAAkB,EAAE,MAAe,EACzD,oBAA6B;QAC/B,IAAM,OAAO,GAAG,SAAS,CAAC,IAAI,CAAC;QAC/B,IAAM,cAAc,GAAG,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,WAAW,EAAE,GAAG,OAAO,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC;QAE1E,IAAM,QAAQ,GAAG,KAAK,GAAG,cAAc,GAAG,UAAU,CAAC;QAErD,IAAM,MAAM,GAAG,SAAS,CAAC,KAAK,CAAC,MAAM,CAAC;QACtC,IAAM,OAAO,GAAG,QAAQ,CAAC,MAAM,CAAC;QAChC,IAAM,IAAI,GAAG,iBAAiB,CAAC,OAAO,CAAC,CAAC;;;;QAKxC,IAAIA,OAAI,CAAC,WAAW,CAAC,SAAS,CAAC,KAAK,EAAE,QAAQ,CAAC,IAAI,oBAAoB,EAAE;YACvE,IAAI,MAAM,EAAE;gBACV,OAAO,kBACA,QAAQ,4EACQ,OAAO,iDAGvB,QAAQ,wBAAmB,IAAI,qDACf,OAAO,UAC1B,OAAO,GAAG,CAAC,GAAG,kCAAkC,GAAG,QAAQ,kCAE5D,CAAC;aACL;iBAAM;gBACL,OAAO,gBACF,QAAQ,8DACE,OAAO,6CAGjB,QAAQ,wBAAmB,IAAI,uCACrB,OAAO,UAClB,OAAO,GAAG,CAAC,GAAG,kCAAkC,GAAG,QAAQ,0BAE9D,CAAC;aACH;SACF;QAED,IAAM,aAAa,GACfC,eAAY,CAAC,gBAAgB,CAAC,SAAS,CAAC,KAAK,EAAE,QAAQ,CAAC,CAAC;QAC7D,IAAM,QAAQ,GAAG,OAAO,GAAG,MAAM,CAAC;QAElC,IAAI,aAAa,GAAG,EAAE,CAAC;QAEvB,IAAI,MAAM,KAAK,CAAC,EAAE;YAChB,IAAI,MAAM,EAAE;gBACV,OAAO,gBACF,QAAQ,mEACC,cAAc,iCAGvB,QAAQ,wBAAmB,IAAI,4CACtB,cAAc,uBAE7B,CAAC;aACD;YACD,OAAO,gBACA,QAAQ,4DACC,cAAc,iCAGvB,QAAQ,wBAAmB,IAAI,qCACtB,cAAc,uBAE7B,CAAC;SACH;aAAM;YACL,IAAI,OAAO,GAAG,CAAC,IAAI,aAAa,CAAC,MAAM,IAAI,CAAC,EAAE;gBAC5C,aAAa,GAAG,aAAa,CAAC;aAC/B;iBAAM;gBACL,aAAa;oBACT,aAAa,CAAC,GAAG,CAAC,UAAA,CAAC,IAAI,OAAA,YAAU,YAAY,CAAC,CAAC,GAAG,QAAQ,CAAC,UAAO,GAAA,CAAC;yBAC9D,IAAI,CAAC,IAAI,CAAC,CAAC;aACrB;SACF;QAED,IAAI,qBAAqB,GAAG,EAAE,CAAC;QAC/B,IAAI,OAAO,GAAG,CAAC,IAAI,MAAM,GAAG,CAAC,EAAE;YAC7B,qBAAqB,GAAG,QAAQ,CAAC;SAClC;aAAM;YACL,IAAI,OAAO,GAAG,CAAC,EAAE;gBACf,IAAM,UAAU,GAAG,iBAAiB,CAAC,MAAM,CAAC,CAAC;gBAC7C,IAAM,YAAY,GACd,SAAS,CAAC,KAAK,CAAC,GAAG,CAAC,UAAC,CAAC,EAAE,CAAC,IAAK,OAAA,YAAU,YAAY,CAAC,CAAC,GAAG,QAAQ,CAAG,GAAA,CAAC;qBAChE,IAAI,CAAC,IAAI,CAAC,CAAC;gBACpB,qBAAqB,GAAM,UAAU,SAAI,YAAY,MAAG,CAAC;aAC1D;iBAAM;gBACL,qBAAqB,GAAG,QAAQ,CAAC;aAClC;SACF;QAED,IAAM,QAAQ,GACV,eAAY,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,WAAW,EAAE,GAAG,OAAO,CAAC,KAAK,CAAC,CAAC,CAAC,WAAO,CAAC;QAC1E,IAAM,OAAO,GAAM,MAAM,MAAG,CAAC;QAC7B,IAAI,MAAM,EAAE;YACV,OAAO,gBACA,QAAQ,gHAET,aAAa,yBACN,OAAO,2BAAsB,OAAO,SAC7C,qBAAqB,UAAK,QAAQ,qCAG/B,QAAQ,0BAAqB,IAAI,kEAElC,aAAa,yBACN,OAAO,2BAAsB,OAAO,SAC7C,qBAAqB,UAAK,QAAQ,2BAErC,CAAC;SACH;QAED,OAAO,cACA,QAAQ,sGAET,aAAa,2BACF,OAAO,2BAAsB,OAAO,SACjD,qBAAqB,UAAK,QAAQ,8BAG/B,QAAQ,0BAAqB,IAAI,wDAElC,aAAa,2BACF,OAAO,2BAAsB,OAAO,SACjD,qBAAqB,UAAK,QAAQ,oBAErC,CAAC;IACJ,CAAC;IAED;;;;aAIgB,sBAAsB,CAClC,QAAkB,EAClB,cAAyD;QAEpD,IAAA,oBAAC,EAAE,qBAAM,EAAN,2BAAM,EAAE,qBAAM,EAAN,2BAAM,CAAmB;QAE3C,IAAM,OAAO,GAAG,QAAQ,CAAC,MAAM,CAAC;QAChC,IAAI,CAAC,CAAC,MAAM,KAAK,OAAO,EAAE;YACxB,IAAM,OAAK,GAAG,iBAAiB,CAAC,OAAO,CAAC,CAAC;YACzC,IAAM,SAAO,GAAG,6BAA2B,OAAK,6GAI/C,CAAC;YACF,OAAO,CAAC,SAAO,EAAE,OAAO,CAAC,CAAC;SAC3B;QAED,IAAI,mBAAmB,GAAG,EAAE,CAAC;QAC7B,IAAM,IAAI,GAAG,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;QAEvB,IAAI,IAAI,GAAG,CAAC,CAAC;QAEb,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,IAAI,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE;YACpC,IAAM,GAAG,GAAG,IAAI,CAAC,CAAC,CAAC,CAAC;YAEpB,IAAI,GAAG,CAAC,MAAM,KAAK,CAAC,EAAE;gBACpB,SAAS;aACV;YAED,IAAI,IAAI,GAAG,CAAC,MAAM,CAAC;YAEnB,IAAI,GAAG,CAAC,MAAM,KAAK,CAAC,EAAE;gBACpB,mBAAmB,IAAI,UAAQ,GAAG,CAAC,CAAC,CAAC,wBAAmB,CAAC,QAAK,CAAC;aAChE;iBAAM;gBACL,IAAM,OAAO,GAAG,0BAA0B,CAAC,GAAG,EAAE,mBAAmB,CAAC,CAAC;gBACrE,mBAAmB,IAAI,cAAY,CAAC,wBAAmB,CAAC,QAAK,CAAC;gBAC9D,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,OAAO,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE;oBACvC,mBAAmB,IAAI,UAAQ,GAAG,CAAC,CAAC,CAAC,gBAAW,CAAC,WAAM,OAAO,CAAC,CAAC,CAAC,MAAG,CAAC;oBAErE,IAAI,CAAC,KAAK,OAAO,CAAC,MAAM,GAAG,CAAC,EAAE;wBAC5B,mBAAmB,IAAI,UAAQ,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC,QAAK;6BAC1C,UAAQ,CAAC,YAAO,GAAG,CAAC,CAAC,CAAC,WAAM,OAAO,CAAC,CAAC,CAAC,MAAG,CAAA,CAAC;qBAC/C;yBAAM;wBACL,mBAAmB;4BACf,UAAQ,CAAC,gBAAW,CAAC,YAAO,GAAG,CAAC,CAAC,CAAC,WAAM,OAAO,CAAC,CAAC,CAAC,MAAG,CAAC;qBAC3D;iBACF;aACF;SACF;QAED,IAAM,UAAU,GAAG,EAAE,CAAC;QACtB,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,IAAI,EAAE,CAAC,EAAE,EAAE;YAC7B,UAAU,CAAC,IAAI,CAAC,MAAI,CAAG,CAAC,CAAC;SAC1B;QAED,IAAM,KAAK,GAAG,iBAAiB,CAAC,IAAI,CAAC,CAAC;QACtC,IAAI,OAAO,GAAG,6BAA2B,KAAK,gBAC1C,mBAAmB,SACtB,CAAC;QACF,IAAI,UAAU,CAAC,MAAM,KAAK,CAAC,EAAE;YAC3B,OAAO,IAAI,YAAU,KAAK,WAAQ,CAAC;SACpC;aAAM;YACL,OAAO,IAAI,YAAU,KAAK,SAAI,UAAU,CAAC,IAAI,CAAC,GAAG,CAAC,SAAM,CAAC;SAC1D;QAED,OAAO,CAAC,OAAO,EAAE,IAAI,CAAC,CAAC;IACzB,CAAC;IAED;;;;;IAKA,SAAS,yBAAyB,CAAC,KAAe;QAChD,IAAM,IAAI,GAAG,KAAK,CAAC,MAAM,CAAC;QAE1B,IAAI,IAAI,IAAI,CAAC,EAAE;YACb,OAAO,6DAA6D,CAAC;SACtE;QAED,IAAM,OAAO,GAAGD,OAAI,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC;QAC3C,IAAM,KAAK,GAAG,iBAAiB,CAAC,IAAI,CAAC,CAAC;QAEtC,IAAM,MAAM,GAAa,EAAE,CAAC;QAC5B,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,IAAI,EAAE,CAAC,EAAE,EAAE;YAC7B,MAAM,CAAC,IAAI,CAAC,MAAI,CAAG,CAAC,CAAC;SACtB;QAED,IAAI,OAAO,CAAC,MAAM,KAAK,CAAC,EAAE;YACxB,OAAO,iMAGL,CAAC;SACJ;QACD,IAAI,OAAO,CAAC;QACZ,OAAO,GAAG,qBAAqB;YAC3B,OAAO;iBACF,GAAG,CAAC,UAAC,CAAC,EAAE,CAAC;gBACR,IAAM,KAAK,GACP,SAAO,MAAM,CAAC,CAAC,CAAC,6CACZ,YAAY,CAAC,CAAC,CAAG,CAAC;gBAC1B,IAAM,KAAK,GAAG,CAAC,KAAK,OAAO,CAAC,MAAM,GAAG,CAAC;oBAClC,SAAO,MAAM,CAAC,CAAC,GAAG,CAAC,CAAC,oBAChB,MAAM,CAAC,CAAC,CAAC,oCAA+B,YAAY,CAAC,CAAC,CAAG;oBAC7D,uBAAqB,MAAM,CAAC,CAAC,CAAC,oCAC1B,YAAY,CAAC,CAAC,CAAG,CAAC;gBAC1B,OAAU,KAAK,UAAK,KAAK,MAAG,CAAC;aAC9B,CAAC;iBACD,IAAI,CAAC,EAAE,CAAC,CAAC;QAElB,OAAO,iDACmC,KAAK,kBACzC,OAAO,uBACA,KAAK,SAAI,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,kBAErC,CAAC;IACJ;;IC9uBA,IAAM,YAAY,GAAG,UAAC,GAAa;QACjC,IAAI,OAAO,GAAG,CAAC,CAAC;QAChB,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,GAAG,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE;YACnC,OAAO,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC;SACnB;QACD,OAAO,OAAO,CAAC;IACjB,CAAC,CAAC;aAEc,uBAAuB,CACnC,QAAkB,EAAE,KAAe;QACrC,IAAI,QAAQ,CAAC,MAAM,KAAK,KAAK,CAAC,MAAM,EAAE;YACpC,MAAM,IAAI,KAAK,CACX,iCAA+B,QAAQ,CAAC,MAAQ;iBAChD,iCAA+B,KAAK,CAAC,MAAM,WAAQ,CAAA;gBACnD,sBAAsB,CAAC,CAAC;SAC7B;QACD,OAAO,KAAK,CAAC,KAAK,CACd,UAAC,GAAW,EAAE,MAAc,IAAK,OAAA,GAAG,GAAG,QAAQ,CAAC,MAAM,CAAC,KAAK,CAAC,GAAA,CAAC,CAAC;IACrE,CAAC;IAED;IACA;aACgB,eAAe,CAC3B,MAAiD,EAAE,WAAqB,EACxE,aAAmD,EACnD,iBACa;QAFb,8BAAA,EAAA,iBAA2C,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC;QACnD,kCAAA,EAAA,qBACK,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC;QACT,IAAA;;;;;;;;;aAYL,EAZM,iBAAS,EAAE,iBAAS,EAAE,iBAY5B,CAAC;QACF,OAAO,CAAC,SAAS,EAAE,SAAS,EAAE,SAAS,CAAC,CAAC;IAC3C,CAAC;aAEe,6BAA6B,CACzC,MAAiD,EACjD,WAAqB;QACvB,IAAM,IAAI,GAAG,YAAY,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,UAAA,CAAC,IAAI,OAAA,WAAW,CAAC,CAAC,CAAC,GAAA,CAAC,CAAC,CAAC;QAC7D,IAAM,IAAI,GAAG,YAAY,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,UAAA,CAAC,IAAI,OAAA,WAAW,CAAC,CAAC,CAAC,GAAA,CAAC,CAAC,CAAC;;;;;;;;;QAS7D,IAAI,IAAI,IAAI,CAAC,EAAE;YACb,OAAO,CAAC,CAAC,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC;SACnB;QACD,IAAI,IAAI,IAAI,CAAC,EAAE;YACb,OAAO,CAAC,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;SACnB;QAED,OAAO,CAAC,EAAE,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC;IACrB,CAAC;aAEe,6BAA6B,CACzC,SAAiB,EAAE,QAAgB,EACnC,SAAiB;;;;;;;;QAQnB,IAAI,SAAS,KAAK,CAAC,EAAE;YACnB,OAAO,CAAC,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;SACnB;aAAM,IAAI,SAAS,KAAK,CAAC,EAAE;YAC1B,OAAO,CAAC,CAAC,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC;SACnB;QAED,OAAO,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;IACnB,CAAC;aAEe,6BAA6B,CACzC,MAAiD,EACjD,WAAqB;QACvB,IAAM,IAAI,GAAG,YAAY,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,UAAA,CAAC,IAAI,OAAA,WAAW,CAAC,CAAC,CAAC,GAAA,CAAC,CAAC,CAAC;QAC7D,IAAM,IAAI,GAAG,YAAY,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,UAAA,CAAC,IAAI,OAAA,WAAW,CAAC,CAAC,CAAC,GAAA,CAAC,CAAC,CAAC;;;;QAI7D,IAAI,IAAI,IAAI,CAAC,EAAE;YACb,OAAO,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;SAClB;QACD,IAAI,IAAI,IAAI,CAAC,EAAE;YACb,OAAO,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;SAClB;QAED,OAAO,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;IACnB,CAAC;aAEe,kBAAkB,CAAC,KAAe;QAChD,OAAO,EAAC,CAAC,EAAE,KAAK,CAAC,GAAG,CAAC,UAAC,CAAC,EAAE,CAAC,IAAK,OAAA,CAAC,GAAA,CAAC,EAAC,CAAC;IACrC,CAAC;aAEe,kBAAkB,CAAC,KAAe;QAChD,IAAI,KAAK,KAAK,SAAS,IAAI,KAAK,KAAK,OAAO,IAAI,KAAK,KAAK,MAAM;YAC5D,KAAK,KAAK,QAAQ,EAAE;YACtB,OAAO,CAAC,CAAC;SACV;aAAM,IAAI,KAAK,KAAK,WAAW,EAAE;YAChC,OAAO,CAAC,CAAC;SACV;aAAM;YACL,MAAM,IAAI,KAAK,CAAC,mBAAiB,KAAO,CAAC,CAAC;SAC3C;IACH,CAAC;aAEe,uBAAuB,CAAC,IAAiB,EAAE,KAAe;QACxE,IAAI,KAAK,KAAK,SAAS,EAAE;YACvB,OAAO,IAAI,YAAY,CAAC,IAAI,CAAC,CAAC;SAC/B;aAAM,IAAI,KAAK,KAAK,OAAO,EAAE;YAC5B,OAAO,IAAI,UAAU,CAAC,IAAI,CAAC,CAAC;SAC7B;aAAM,IAAI,KAAK,KAAK,MAAM,IAAI,KAAK,KAAK,QAAQ,EAAE;YACjD,OAAO,UAAU,CAAC,IAAI,CAAC,IAAI,UAAU,CAAC,IAAI,CAAC,CAAC,CAAC;SAC9C;aAAM;YACL,MAAM,IAAI,KAAK,CAAC,mBAAiB,KAAO,CAAC,CAAC;SAC3C;IACH,CAAC;aAEe,iBAAiB;QAC/B,OAAO,CAAC,CAAC,OAAO,MAAM,KAAK,WAAW;;aAEnC,OAAO,iBAAiB,KAAK,WAAW,CAAC,KAAK,CAAC,CAAC,SAAS,CAAC,GAAG,CAAC;IACnE;;;;;;;;;;;;;;;aChIgB,0BAA0B,CACtC,aAAuB,EAAE,UAAkB,EAAE,UAAkB,EAC/D,SAAiB;QACnBA,OAAI,CAAC,MAAM,CACP,SAAS,GAAG,CAAC,KAAK,CAAC,IAAI,aAAa,CAAC,CAAC,CAAC,KAAK,CAAC,EAC7C,cAAM,OAAA,8DAA8D,GAAA,CAAC,CAAC;QAC1E,OAAO,yDAEH,SAAS,GAAG,aAAa,CAAC,CAAC,CAAC,WAAM,UAAU,8DAE5C,UAAU,GAAG,aAAa,CAAC,CAAC,CAAC,WAAM,SAAS,mCAE3B,aAAa,CAAC,CAAC,CAAC,gCAChB,aAAa,CAAC,CAAC,CAAC,6BACnB,SAAS,eAEzB,mBAAmB,EAAE,+BAEL,UAAU,KAAK,CAAC,GAAG,GAAG,GAAG,+BAA+B,sEAItE,UAAU,KAAK,CAAC,GAAG,GAAG,GAAG,gCAAgC,+pEAsD3D,CAAC;IACL,CAAC;IAED;QAsBE,iCACI,MAAgC,EAAE,WAAqC,EACvE,YAAoB,EAAE,cAAuB,EAAE,cAAuB,EACtE,IAAuB,EAAE,UAA0C,EACnE,sBAAyC;;YADzC,qBAAA,EAAA,WAAuB;YAAE,2BAAA,EAAA,iBAA0C;YACnE,uCAAA,EAAA,6BAAyC;YArB7C,kBAAa,GAAG,CAAC,GAAG,EAAE,GAAG,CAAC,CAAC;YAC3B,aAAQ,GAAG,mDAAmD,CAAC;YAC/D,kBAAa,GAA6B,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YAEpD,WAAM,GAAG,IAAI,CAAC;YAkBZ,IAAI,CAAC,WAAW,GAAG,WAAW,CAAC;YAC/B,IAAI,CAAC,cAAc,GAAG,EAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,EAAC,CAAC;;YAE/C,IAAI,WAAW,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE;gBACxB,IAAI,CAAC,iBAAiB,GAAG,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;aACpC;iBAAM;gBACL,IAAI,CAAC,iBAAiB,GAAG,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;aACpC;YACD,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,EACzD,IAAI,CAAC,iBAAiB,CAAC,CAAC;YAE5B,IAAM,OAAO,GAAG,IAAI,IAAI,IAAI,CAAC;YAC7B,IAAM,yBAAyB,GAAG,sBAAsB,IAAI,IAAI,CAAC;YACjE,IAAI,OAAO,EAAE;gBACX,IAAI,CAAC,aAAa,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;aACjC;YAED,IAAI,yBAAyB,EAAE;gBAC7B,IAAI,CAAC,aAAa,CAAC,IAAI,CAAC,wBAAwB,CAAC,CAAC;aACnD;YAED,IAAI,CAAC,UAAU,GAAG,WAAW,CAAC,CAAC,CAAC,KAAK,CAAC;gBAClC,CAAC;gBACD,IAAI,CAAC,aAAa,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,iBAAiB,CAAC,CAAC,CAAC,CAAC;YACtD,IAAI,CAAC,UAAU,GAAG,IAAI,CAAC,aAAa,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,iBAAiB,CAAC,CAAC,CAAC,CAAC;YACpE,IAAI,CAAC,SAAS,GAAG,IAAI,CAAC,UAAU,CAAC;YAEjC,IAAI,CAAC,MAAM,GAAG,MAAM,CAAC;YACrB,IAAI,CAAC,OAAO,GAAG,OAAO,CAAC;YACvB,IAAI,CAAC,UAAU,GAAG,UAAU,CAAC;YAC7B,IAAI,CAAC,yBAAyB,GAAG,yBAAyB,CAAC;YAC3D,IAAI,CAAC,cAAc,GAAG,cAAc,CAAC;YACrC,IAAI,CAAC,cAAc,GAAG,cAAc,CAAC;YAErC,kCAA2C,EAA1C,iBAAS,EAAE,iBAAS,CAAuB;YAE5C,IAAI,CAAC,SAAS,GAAG,sBAAoB,IAAI,CAAC,UAAU,SAAI,IAAI,CAAC,IAAI,SAC7D,IAAI,CAAC,IAAI,SAAI,IAAI,CAAC,iBAAiB,SAAI,IAAI,CAAC,cAAc,SAC1D,IAAI,CAAC,cAAgB,CAAC;SAC3B;QAED,6CAAW,GAAX;YACE,IAAM,QAAQ,GAAG,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC;YAChC,IAAM,SAAS,GAAG,IAAI,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC;YACtC,IAAM,MAAM,GAAG,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,QAAQ,EAAE,SAAS,CAAC,CAAC;YAE1D,IAAM,SAAS,GAAG,CAAC,IAAI,CAAC,UAAU,EAAE,IAAI,CAAC,SAAS,CAAC,CAAC;YACpD,IAAM,SAAS,GAAG,CAAC,IAAI,CAAC,SAAS,EAAE,IAAI,CAAC,UAAU,CAAC,CAAC;YACpD,OAAO;gBACL,uBAAuB,CAAC,SAAS,EAAE,IAAI,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC;gBACxD,uBAAuB,CAAC,SAAS,EAAE,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC;aACpD,CAAC;SACH;QAED,6CAAW,GAAX;YACE,IAAM,OAAO,GAAG,IAAI,CAAC,IAAI;gBACrB,kEAAkE;gBAClE,6NAGsB,CAAC;YAE3B,IAAM,OAAO,GAAG,IAAI,CAAC,IAAI;gBACrB,mEAAmE;gBACnE,8NAGsB,CAAC;YAE3B,IAAI,iBAAiB,GAAG,EAAE,EAAE,sBAAsB,GAAG,EAAE,CAAC;YACxD,IAAI,IAAI,CAAC,UAAU,EAAE;gBACnB,IAAM,YAAY,GACd,4BAA4B,CAAC,IAAI,CAAC,UAAU,EAAE,IAAI,CAAC,MAAM,CAAC,CAAC;gBAC/D,IAAI,IAAI,CAAC,yBAAyB,EAAE;oBAClC,iBAAiB;wBACb,wKAEQ,YAAY,wBACd,CAAC;iBACZ;qBAAM;oBACL,iBAAiB,GAAG,oGAEZ,YAAY,oBACd,CAAC;iBACR;gBAED,sBAAsB,GAAG,sCAAsC,CAAC;aACjE;YACD,IAAM,cAAc,GAChB,IAAI,CAAC,OAAO,GAAG,kDAAkD,GAAG,EAAE,CAAC;YAE3E,IAAM,QAAQ,GAAG,aACb,iBAAiB,kGAGjB,IAAI,CAAC,cAAc,GAAG,qEAGrB;gBACqB,6HAGrB,qBAEC,OAAO,8GAKT,IAAI,CAAC,cAAc,GAAG,uEAGnB;gBACmB,4HAGtB,mBACE,OAAO,0UASL,cAAc,oBACd,sBAAsB,gHAK1B,0BAA0B,CACtB,IAAI,CAAC,iBAAiB,EAAE,IAAI,CAAC,UAAU,EAAE,IAAI,CAAC,UAAU,EACxD,IAAI,CAAC,SAAS,CAAC,WACtB,CAAC;YAEF,OAAO,QAAQ,CAAC;SACjB;sCACF;KAAA;;aCtPe,sBAAsB,CAClC,aAAuB,EAAE,aAAuC;QAClE,IAAM,UAAU,GAAG,aAAa,CAAC,CAAC,CAAC,GAAG,aAAa,CAAC,CAAC,CAAC,CAAC;QACvD,IAAM,UAAU,GAAG,aAAa,CAAC,CAAC,CAAC,GAAG,aAAa,CAAC,CAAC,CAAC,CAAC;QACvD,IAAM,SAAS,GAAG,UAAU,GAAG,UAAU,GAAG,UAAU,GAAG,UAAU,CAAC;QACpE,OAAO,qDACuC,SAAS,WAAM,UAAU,0DACzB,UAAU,WAAM,SAAS,gBACnE,mBAAmB,EAAE,+CACY,aAAa,CAAC,CAAC,CAAC,gDAChB,aAAa,CAAC,CAAC,CAAC,qDAEb,aAAa,CAAC,CAAC,CAAC,mDAChB,aAAa,CAAC,CAAC,CAAC,4DAET,SAAS,kDAEvB,aAAa,CAAC,CAAC,CAAC,WAAM,aAAa,CAAC,CAAC,CAAC,qEAExC,aAAa,CAAC,CAAC,CAAC,2HAI3C,aAAa,CAAC,CAAC,CAAC,gFAEhB,aAAa,CAAC,CAAC,CAAC,gIAKM,SAAS,WAAM,aAAa,CAAC,CAAC,CAAC,2FAE/B,SAAS,WAAM,aAAa,CAAC,CAAC,CAAC,uPAOrD,aAAa,CAAC,CAAC,CAAC,gVAOA,SAAS,4OAMzB,aAAa,CAAC,CAAC,CAAC,gNAKF,SAAS,+MAQC,SAAS,8DACC,aAAa,CAAC,CAAC,CAAC,sJAKlD,aAAa,CAAC,CAAC,CAAC,2IAGhB,aAAa,CAAC,CAAC,CAAC,+PAUhB,aAAa,CAAC,CAAC,CAAC,gFAEhB,aAAa,CAAC,CAAC,CAAC,6VAWnB,CAAC;IACJ,CAAC;aAEe,sBAAsB,CAAC,aAAuC;QAE5E,OAAO,0BACY,aAAa,CAAC,CAAC,CAAC,GAAG,CAAC,yDACO,aAAa,CAAC,CAAC,CAAC,kBAE1D,mBAAmB,EAAE,0/CAuCxB,CAAC;IACJ,CAAC;IAED;QAoBE,6BACI,MAAgC,EAAE,WAAqC,EACvE,aAAqB,EAAE,cAAuB,EAAE,cAAuB,EACvE,UAAkB,EAAE,UAAkB,EAAE,IAAuB,EAC/D,UAA0C,EAC1C,sBAAyC;;YAFzC,2BAAA,EAAA,kBAAkB;YAAE,2BAAA,EAAA,kBAAkB;YAAE,qBAAA,EAAA,WAAuB;YAC/D,2BAAA,EAAA,iBAA0C;YAC1C,uCAAA,EAAA,6BAAyC;YAnB7C,kBAAa,GAAG,CAAC,GAAG,EAAE,GAAG,CAAC,CAAC;YAC3B,aAAQ,GAAG,mDAAmD,CAAC;YAC/D,kBAAa,GAA6B,CAAC,EAAE,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC;YAkBpD,IAAI,CAAC,WAAW,GAAG,WAAW,CAAC;YAC/B,IAAI,CAAC,cAAc,GAAG,EAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,EAAC,CAAC;YAC/C,IAAM,QAAQ,GAAG,UAAU,GAAG,MAAM,CAAC,CAAC,CAAC,GAAG,MAAM,CAAC,CAAC,CAAC,CAAC;YACpD,IAAI,CAAC,aAAa;gBACd,6BAA6B,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,QAAQ,EAAE,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC;YAC5E,IAAI,WAAW,CAAC,CAAC,CAAC,KAAK,CAAC,IAAI,WAAW,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE;gBAChD,aAAa,GAAG,CAAC,CAAC;aACnB;YACD,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,EACzD,CAAC,aAAa,EAAE,aAAa,EAAE,CAAC,CAAC,CAAC,CAAC;;;;;;YAMvC,IAAIA,OAAI,CAAC,WAAW,CAAC,IAAI,CAAC,QAAQ,EAAE,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE;gBAC9C,aAAa,GAAG,CAAC,CAAC;gBAClB,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,EACzD,CAAC,aAAa,EAAE,aAAa,EAAE,CAAC,CAAC,CAAC,CAAC;aACxC;YACD,IAAM,OAAO,GAAG,IAAI,IAAI,IAAI,CAAC;YAC7B,IAAM,yBAAyB,GAAG,sBAAsB,IAAI,IAAI,CAAC;YACjE,IAAI,OAAO,EAAE;gBACX,IAAI,CAAC,aAAa,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;aACjC;YAED,IAAI,yBAAyB,EAAE;gBAC7B,IAAI,CAAC,aAAa,CAAC,IAAI,CAAC,wBAAwB,CAAC,CAAC;aACnD;YAED,IAAI,CAAC,aAAa,GAAG,aAAa,CAAC;YACnC,IAAI,CAAC,MAAM,GAAG,MAAM,CAAC;YACrB,IAAI,CAAC,UAAU,GAAG,UAAU,CAAC;YAC7B,IAAI,CAAC,UAAU,GAAG,UAAU,CAAC;YAC7B,IAAI,CAAC,OAAO,GAAG,OAAO,CAAC;YACvB,IAAI,CAAC,UAAU,GAAG,UAAU,CAAC;YAC7B,IAAI,CAAC,yBAAyB,GAAG,yBAAyB,CAAC;YAC3D,IAAI,CAAC,cAAc,GAAG,cAAc,CAAC;YACrC,IAAI,CAAC,cAAc,GAAG,cAAc,CAAC;YAErC,IAAM,SAAS,GAAG,IAAI,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC;YACtC,IAAM,MAAM,GAAG,IAAI,CAAC,UAAU;gBAC1B,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,SAAS,EAAE,QAAQ,CAAC;gBAC1C,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,QAAQ,EAAE,SAAS,CAAC,CAAC;YAE/C,wCAAiD,EAAhD,iBAAS,EAAE,iBAAS,CAA6B;YAClD,IAAI,CAAC,SAAS,GAAG,kBAAgB,IAAI,CAAC,aAAa,SAAI,UAAU,SAC7D,UAAU,SAAI,IAAI,CAAC,UAAU,SAAI,IAAI,CAAC,IAAI,SAAI,IAAI,CAAC,IAAI,UACvD,IAAI,CAAC,WAAW,CAAC,CAAC,CAAC,GAAG,CAAC,UAAI,IAAI,CAAC,cAAc,SAAI,IAAI,CAAC,cAAgB,CAAC;SAC7E;QAED,yCAAW,GAAX,UAAY,MAAgB;YAC1B,IAAM,UAAU,GAAG,IAAI,CAAC,aAAa,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,aAAa,CAAC;YAC9D,IAAM,UAAU,GAAG,IAAI,CAAC,aAAa,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,aAAa,CAAC;YAC9D,IAAI,SAAS,GAAG,UAAU,GAAG,UAAU,GAAG,UAAU,GAAG,UAAU,CAAC;YAClE,IAAI,IAAI,CAAC,WAAW,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE;gBAC7B,SAAS,IAAI,CAAC,CAAC;aAChB;YACDA,OAAI,CAAC,MAAM,CACP,SAAS,GAAG,IAAI,CAAC,aAAa,CAAC,CAAC,CAAC,KAAK,CAAC;gBACnC,SAAS,GAAG,IAAI,CAAC,aAAa,CAAC,CAAC,CAAC,KAAK,CAAC,EAC3C,cAAM,OAAA,gDAAgD;gBAClD,qBAAqB,GAAA,CAAC,CAAC;YAC/B,IAAM,SAAS,GAAG,CAAC,UAAU,EAAE,SAAS,CAAC,CAAC;YAC1C,IAAM,SAAS,GAAG,CAAC,SAAS,EAAE,UAAU,CAAC,CAAC;YAE1C,OAAO;gBACL,uBAAuB,CAAC,SAAS,EAAE,IAAI,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC;gBACxD,uBAAuB,CAAC,SAAS,EAAE,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC;aACpD,CAAC;SACH;QAED,yCAAW,GAAX;YACE,IAAI,OAAO,CAAC;YAEZ,IAAI,IAAI,CAAC,UAAU,KAAK,KAAK,EAAE;gBAC7B,OAAO,GAAG,IAAI,CAAC,IAAI;oBACf,+DAA+D;oBAC/D,iNAGa,CAAC;aACnB;iBAAM;gBACL,OAAO,GAAG,IAAI,CAAC,IAAI;oBACf,gEAAgE;oBAChE,iNAGa,CAAC;aACnB;YAED,IAAI,OAAO,CAAC;YACZ,IAAI,IAAI,CAAC,UAAU,KAAK,KAAK,EAAE;gBAC7B,OAAO,GAAG,IAAI,CAAC,IAAI;oBACf,gEAAgE;oBAChE,kNAGa,CAAC;aACnB;iBAAM;gBACL,OAAO,GAAG,IAAI,CAAC,IAAI;oBACf,+DAA+D;oBAC/D,iNAGa,CAAC;aACnB;YAED,IAAI,iBAAiB,GAAG,EAAE,EAAE,sBAAsB,GAAG,EAAE,CAAC;YACxD,IAAI,IAAI,CAAC,UAAU,EAAE;gBACnB,IAAM,YAAY,GAAG,4BAA4B,CAAC,IAAI,CAAC,UAAU,EAAE,KAAK,CAAC,CAAC;gBAC1E,IAAI,IAAI,CAAC,yBAAyB,EAAE;oBAClC,iBAAiB;wBACb,sJAEK,YAAY,oBACf,CAAC;iBACR;qBAAM;oBACL,iBAAiB,GAAG,4FAEV,YAAY,oCAEjB,CAAC;iBACP;gBAED,sBAAsB,GAAG,sCAAsC,CAAC;aACjE;YAED,IAAM,cAAc,GAChB,IAAI,CAAC,OAAO,GAAG,kDAAkD,GAAG,EAAE,CAAC;YAE3E,IAAM,QAAQ,GAAG,aACb,iBAAiB,8FAIjB,IAAI,CAAC,cAAc,GAAG,iEAGrB;gBACqB,qHAGrB,mBACC,OAAO,uGAKT,IAAI,CAAC,cAAc,GAAG,iEAGrB;gBACqB,qHAGrB,mBACC,OAAO,qOAOP,cAAc,kBACd,sBAAsB,8EAIxB,IAAI,CAAC,WAAW,CAAC,CAAC,CAAC,GAAG,CAAC;gBACnB,sBAAsB,CAClB,CAAC,IAAI,CAAC,aAAa,EAAE,IAAI,CAAC,aAAa,EAAE,CAAC,CAAC,EAC3C,IAAI,CAAC,aAAa,CAAC;gBACvB,sBAAsB,CAAC,IAAI,CAAC,aAAa,CAAC,YACjD,CAAC;YACF,OAAO,QAAQ,CAAC;SACjB;kCACF;KAAA;;ICzXD;;;;;;;;;;;;;;;;aAwBgB,sBAAsB;QACpC,OAAO,uEAEH,mBAAmB,EAAE,g5BA6BxB,CAAC;IACJ,CAAC;IAED;QAgBE,6BACI,WAAqC,EAAE,cAAuB,EAC9D,cAAuB,EAAE,UAAkB,EAAE,UAAkB,EAC/D,IAAuB,EAAE,UAA0C,EACnE,sBAAyC;YAFhB,2BAAA,EAAA,kBAAkB;YAAE,2BAAA,EAAA,kBAAkB;YAC/D,qBAAA,EAAA,WAAuB;YAAE,2BAAA,EAAA,iBAA0C;YACnE,uCAAA,EAAA,6BAAyC;YAf7C,kBAAa,GAAG,CAAC,GAAG,EAAE,GAAG,CAAC,CAAC;YAC3B,aAAQ,GAAG,mDAAmD,CAAC;YAC/D,kBAAa,GAA6B,CAAC,GAAG,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YAcpD,IAAI,CAAC,WAAW,GAAG,WAAW,CAAC;YAC/B,IAAI,CAAC,cAAc,GAAG,EAAC,CAAC,EAAE,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,EAAC,CAAC;YACjD,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,CAAC,CAAC;YAE/D,IAAM,OAAO,GAAG,IAAI,IAAI,IAAI,CAAC;YAC7B,IAAM,yBAAyB,GAAG,sBAAsB,IAAI,IAAI,CAAC;YACjE,IAAI,OAAO,EAAE;gBACX,IAAI,CAAC,aAAa,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;aACjC;YAED,IAAI,yBAAyB,EAAE;gBAC7B,IAAI,CAAC,aAAa,CAAC,IAAI,CAAC,wBAAwB,CAAC,CAAC;aACnD;YAED,IAAI,CAAC,UAAU,GAAG,UAAU,CAAC;YAC7B,IAAI,CAAC,UAAU,GAAG,UAAU,CAAC;YAC7B,IAAI,CAAC,OAAO,GAAG,OAAO,CAAC;YACvB,IAAI,CAAC,UAAU,GAAG,UAAU,CAAC;YAC7B,IAAI,CAAC,yBAAyB,GAAG,yBAAyB,CAAC;YAC3D,IAAI,CAAC,cAAc,GAAG,cAAc,CAAC;YACrC,IAAI,CAAC,cAAc,GAAG,cAAc,CAAC;YACrC,IAAI,CAAC,SAAS,GAAG,kBAAgB,IAAI,CAAC,UAAU,SAAI,UAAU,SAC1D,UAAU,SAAI,IAAI,CAAC,cAAc,SAAI,IAAI,CAAC,cAAgB,CAAC;SAChE;QAED,yCAAW,GAAX;YACE,IAAI,OAAO,CAAC;YACZ,IAAI,IAAI,CAAC,UAAU,KAAK,KAAK,EAAE;gBAC7B,OAAO;oBACH,oEAAoE,CAAC;aAC1E;iBAAM;gBACL,OAAO;oBACH,qEAAqE,CAAC;aAC3E;YAED,IAAI,OAAO,CAAC;YACZ,IAAI,IAAI,CAAC,UAAU,KAAK,KAAK,EAAE;gBAC7B,OAAO;oBACH,qEAAqE,CAAC;aAC3E;iBAAM;gBACL,OAAO;oBACH,oEAAoE,CAAC;aAC1E;YAED,IAAI,iBAAiB,GAAG,EAAE,EAAE,sBAAsB,GAAG,EAAE,CAAC;YACxD,IAAI,IAAI,CAAC,UAAU,EAAE;gBACnB,IAAM,YAAY,GAAG,4BAA4B,CAAC,IAAI,CAAC,UAAU,EAAE,KAAK,CAAC,CAAC;gBAC1E,IAAI,IAAI,CAAC,yBAAyB,EAAE;oBAClC,iBAAiB;wBACb,sJAEK,YAAY,oBACf,CAAC;iBACR;qBAAM;oBACL,iBAAiB,GAAG,4FAEV,YAAY,oCAEjB,CAAC;iBACP;gBAED,sBAAsB,GAAG,sCAAsC,CAAC;aACjE;YAED,IAAM,cAAc,GAChB,IAAI,CAAC,OAAO,GAAG,kDAAkD,GAAG,EAAE,CAAC;YAE3E,IAAM,QAAQ,GAAG,aACb,iBAAiB,qFAIjB,IAAI,CAAC,cAAc,GAAG,uEAGnB;gBACmB,mHAGnB,mBACD,OAAO,8FAKT,IAAI,CAAC,cAAc,GAAG,uEAGnB;gBACmB,mHAGnB,mBACD,OAAO,qLAMP,cAAc,kBACd,sBAAsB,6EAGxB,sBAAsB,EAAE,WAC3B,CAAC;YACF,OAAO,QAAQ,CAAC;SACjB;kCACF;KAAA;;IC5LD;;;;;;;;;;;;;;;;aAuBgB,+BAA+B,CAC3C,aAAuC;QACzC,IAAM,UAAU,GAAG,aAAa,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC;QACxC,IAAM,UAAU,GAAG,aAAa,CAAC,CAAC,CAAC,CAAC;QACpC,IAAM,SAAS,GAAG,UAAU,GAAG,UAAU,GAAG,UAAU,GAAG,UAAU,CAAC;QACpE,OAAO,oDACsC,SAAS,WAAM,UAAU,yDACzB,UAAU,WAAM,SAAS,yDACzB,SAAS,WAAM,UAAU,yDACzB,UAAU,WAAM,SAAS,uaAQpE,mBAAmB,EAAE,+PAOsB,SAAS,8LAOhC,UAAU,wSAKI,SAAS,kIAET,SAAS,4DAGvB,UAAU,wSAKI,SAAS,kIAET,SAAS,iHAGf,SAAS,2DACJ,UAAU,wTAcvB,UAAU,wSAKI,SAAS,kIAET,SAAS,iHAGf,SAAS,2DACJ,UAAU,mRAUY,UAAU,8BAChD,UAAU,8FAI5B,CAAC;IACJ,CAAC;IAED;QAcE,sCACI,MAAgC,EAAE,MAAgC,EAClE,WAAqC,EAAE,IAAuB,EAC9D,UAA0C,EAC1C,sBAAyC;YAFF,qBAAA,EAAA,WAAuB;YAC9D,2BAAA,EAAA,iBAA0C;YAC1C,uCAAA,EAAA,6BAAyC;YAb7C,kBAAa,GAAG,CAAC,GAAG,EAAE,GAAG,CAAC,CAAC;YAC3B,aAAQ,GAAG,mDAAmD,CAAC;YAC/D,kBAAa,GAA6B,CAAC,CAAC,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC;YAYnDA,OAAI,CAAC,MAAM,CACP,MAAM,CAAC,CAAC,CAAC,IAAI,EAAE,IAAI,MAAM,CAAC,CAAC,CAAC,IAAI,EAAE,EAClC,cACI,OAAA,kEAAkE,GAAA,CAAC,CAAC;YAE5E,IAAI,CAAC,WAAW,GAAG,WAAW,CAAC;YAE/B,IAAI,CAAC,cAAc,GAAG,EAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,EAAC,CAAC;YAC/C,IAAI,CAAC,QAAQ,GAAG;gBACd,IAAI,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,aAAa,CAAC,CAAC,CAAC,CAAC;gBACjD,IAAI,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG,IAAI,CAAC,aAAa,CAAC,CAAC,CAAC,CAAC,EAAE,WAAW,CAAC,CAAC,CAAC;aACtE,CAAC;YAEF,IAAM,OAAO,GAAG,IAAI,IAAI,IAAI,CAAC;YAC7B,IAAI,OAAO,EAAE;gBACX,IAAI,CAAC,aAAa,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;aACjC;YAED,IAAM,yBAAyB,GAAG,sBAAsB,IAAI,IAAI,CAAC;YACjE,IAAI,yBAAyB,EAAE;gBAC7B,IAAI,CAAC,aAAa,CAAC,IAAI,CAAC,wBAAwB,CAAC,CAAC;aACnD;YAED,IAAI,CAAC,OAAO,GAAG,OAAO,CAAC;YACvB,IAAI,CAAC,UAAU,GAAG,UAAU,CAAC;YAC7B,IAAI,CAAC,yBAAyB,GAAG,yBAAyB,CAAC;YAC3D,IAAI,CAAC,cAAc,GAAG,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC;YACtC,IAAI,CAAC,cAAc,GAAG,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC;YACtC,IAAI,CAAC,SAAS,GAAG,2BAAyB,IAAI,CAAC,UAAU,SACrD,IAAI,CAAC,cAAc,SAAI,IAAI,CAAC,cAAgB,CAAC;SAClD;QAED,kDAAW,GAAX;YACE,IAAM,OAAO,GACT,yMAGY,CAAC;YAEjB,IAAM,OAAO,GACT,6MAGa,CAAC;YAElB,IAAI,iBAAiB,GAAG,EAAE,EAAE,sBAAsB,GAAG,EAAE,CAAC;YACxD,IAAI,IAAI,CAAC,UAAU,EAAE;gBACnB,IAAM,YAAY,GAAG,4BAA4B,CAAC,IAAI,CAAC,UAAU,EAAE,KAAK,CAAC,CAAC;gBAC1E,IAAI,IAAI,CAAC,yBAAyB,EAAE;oBAClC,iBAAiB;wBACb,gJAEE,YAAY,oBACZ,CAAC;iBACR;qBAAM;oBACL,iBAAiB;wBACb,wEACE,YAAY,gBAChB,CAAC;iBACJ;gBAED,sBAAsB,GAAG,sCAAsC,CAAC;aACjE;YAED,IAAM,cAAc,GAChB,IAAI,CAAC,OAAO,GAAG,kDAAkD,GAAG,EAAE,CAAC;YAE3E,IAAM,QAAQ,GAAG,aACb,iBAAiB,8FAIjB,IAAI,CAAC,cAAc,GAAG,uEAGnB;gBACmB,2HAGnB,mBACD,OAAO,qGAIT,IAAI,CAAC,cAAc,GAAG,uEAGnB;gBACmB,2HAGnB,mBACD,OAAO,oVAOL,cAAc,oBACd,sBAAsB,0FAI1B,+BAA+B,CAAC,IAAI,CAAC,aAAa,CAAC,WACtD,CAAC;YACF,OAAO,QAAQ,CAAC;SACjB;2CACF;KAAA;;ICrPD;;;;;;;;;;;;;;;;aAqBgB,OAAO,CACnB,IAA0E;QAErE,IAAA,oBAAM,EAAE,kBAAK,CAAS;QACtB,IAAA,YAAC,CAAW;QACZ,IAAA,mBAAK,CAAU;QAEtB,IAAM,KAAK,GAAGA,OAAI,CAAC,aAAa,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC;QAC1C,IAAM,MAAM,GAAGA,OAAI,CAAC,sBAAsB,CAAC,KAAK,EAAE,KAAK,CAAC,CAAC;QACzD,IAAM,MAAM,GAAGA,OAAI,CAAC,aAAa,CAAC,MAAM,CAAC,CAAC;QAE1CA,OAAI,CAAC,MAAM,CACP,KAAK,KAAK,MAAM,EAChB,cAAM,OAAA,oBAAkB,MAAM,cAAS,MAAM,2BAAwB;aACjE,YAAU,CAAC,CAAC,KAAK,cAAS,KAAK,sCAAmC,CAAA;YAClE,8CAA8C,GAAA,CAAC,CAAC;;QAGxD,IAAI,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC;QAC9B,OAAO,EAAC,MAAM,EAAE,CAAC,CAAC,MAAM,EAAE,KAAK,EAAE,MAAM,EAAE,KAAK,EAAE,CAAC,CAAC,KAAK,EAAC,CAAC;IAC3D,CAAC;IAEM,IAAM,aAAa,GAAiB;QACzC,UAAU,EAAEE,UAAO;QACnB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,OAA2B;KACxC;;aCPe,eAAe,CAAC,EAUZ;;YATlB,QAAC,EACD,QAAC,EACD,0BAAU,EACV,0BAAU,EACV,oBAAO,EACP,YAAW,EAAX,gCAAW,EACX,8BAA6B,EAA7B,kDAA6B,EAC7B,sBAAkB,EAAlB,uCAAkB,EAClB,kBAAiB,EAAjB,sCAAiB;QAEjB,IAAM,KAAK,GAAG,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC;QAC7B,IAAM,KAAK,GAAG,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC;QAE7B,IAAM,WAAW,GAAG,UAAU,GAAG,CAAC,CAAC,KAAK,CAAC,KAAK,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC,KAAK,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC;QACzE,IAAM,WAAW,GAAG,UAAU,GAAG,CAAC,CAAC,KAAK,CAAC,KAAK,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC,KAAK,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC;QAEzE,IAAM,WAAW,GAAG,UAAU,GAAG,CAAC,CAAC,KAAK,CAAC,KAAK,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC,KAAK,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC;QACzE,IAAM,WAAW,GAAG,UAAU,GAAG,CAAC,CAAC,KAAK,CAAC,KAAK,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC,KAAK,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC;QAEzE,IAAM,UAAU,GAAG,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;QACxC,IAAM,UAAU,GAAG,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;QAExC,IAAM,SAAS,GAAGF,OAAI,CAAC,aAAa,CAAC,UAAU,CAAC,CAAC;QACjD,IAAM,SAAS,GAAGA,OAAI,CAAC,aAAa,CAAC,UAAU,CAAC,CAAC;QAEjD,IAAM,iBAAiB,GAAGG,iBAAc,CAAC,0BAA0B,CAC/D,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC;QAChD,IAAM,QAAQ,GAAG,iBAAiB,CAAC,MAAM,CAAC,CAAC,WAAW,EAAE,WAAW,CAAC,CAAC,CAAC;QAEtEH,OAAI,CAAC,MAAM,CACP,WAAW,KAAK,WAAW,EAC3B,cAAM,OAAA,oCAAkC,WAAW,YAAS;aACrD,WAAW,iCAA4B,CAAC,CAAC,KAAK,UAAO,CAAA;aACrD,CAAC,CAAC,KAAK,wBAAmB,UAAY,CAAA;aACzC,qBAAmB,UAAU,iBAAc,CAAA,GAAA,CAAC,CAAC;QAErD,IAAM,QAAQ,GAA6B,UAAU;YACjD,CAAC,SAAS,EAAE,WAAW,EAAE,WAAW,CAAC;YACrC,CAAC,SAAS,EAAE,WAAW,EAAE,WAAW,CAAC,CAAC;QAC1C,IAAM,QAAQ,GAA6B,UAAU;YACjD,CAAC,SAAS,EAAE,WAAW,EAAE,WAAW,CAAC;YACrC,CAAC,SAAS,EAAE,WAAW,EAAE,WAAW,CAAC,CAAC;;QAG1C,IAAM,GAAG,GAAG,OAAO,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,CAAC,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,KAAK,EAAE,QAAQ,EAAC,EAAC,CAAC,CAAC;QACzE,IAAM,GAAG,GAAG,OAAO,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,CAAC,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,KAAK,EAAE,QAAQ,EAAC,EAAC,CAAC,CAAC;QACzE,IAAM,aAAa,GAAiB,CAAC,GAAG,EAAE,GAAG,CAAC,CAAC;QAE/C,IAAM,QAAQ,GAAG,IAAI,CAAC,GAAG,CAAC,SAAS,EAAE,SAAS,CAAC,CAAC;QAChD,IAAM,cAAc,GAAG,SAAS,KAAK,CAAC,CAAC;QACvC,IAAM,cAAc,GAAG,SAAS,KAAK,CAAC,CAAC;QACvC,IAAM,OAAO,GAAG,WAAW,GAAG,CAAC,KAAK,CAAC,IAAI,WAAW,GAAG,CAAC,KAAK,CAAC;YAC1D,CAAC,UAAU,IAAI,CAAC,UAAU,CAAC;QAC/B,IAAI,OAAsB,CAAC;QAC3B,IAAI,WAAW,GAAG,WAAW,IAAI,EAAE,EAAE;YACnC,OAAO,GAAG,IAAI,mBAAmB,CAC7B,CAAC,QAAQ,EAAE,WAAW,EAAE,WAAW,CAAC,EAAE,cAAc,EAAE,cAAc,EACpE,UAAU,EAAE,UAAU,EAAE,IAAI,EAAE,UAAU,EAAE,sBAAsB,CAAC,CAAC;SAEvE;;;;;;;;;;QASG,IAAI,CAAC,UAAU,IAAI,CAAC,UAAU;aACzB,CAAC,WAAW,IAAI,EAAE;iBAChB,WAAW,IAAI,GAAG,IAAI,WAAW,IAAI,CAAC,GAAG,WAAW,CAAC;iBACtD,WAAW,IAAI,EAAE;qBAChB,WAAW,IAAI,GAAG,IAAI,WAAW,IAAI,CAAC,GAAG,WAAW,CAAC,CAAC,CAAC,EAAE;YAClE,OAAO,GAAG,IAAI,4BAA4B,CACtC,QAAQ,EAAE,QAAQ,EAAE,CAAC,QAAQ,EAAE,WAAW,EAAE,WAAW,CAAC,EAAE,IAAI,EAC9D,UAAU,EAAE,sBAAsB,CAAC,CAAC;SACzC;aAAM,IAAI,OAAO,EAAE;;;;YAIlB,OAAO,GAAG,IAAI,uBAAuB,CACjC,QAAQ,EAAE,CAAC,QAAQ,EAAE,WAAW,EAAE,WAAW,CAAC,EAC9CD,MAAG,EAAE,CAAC,GAAG,CAAC,+BAA+B,CAAW,EAAE,cAAc,EACpE,cAAc,EAAE,IAAI,EAAE,UAAU,EAAE,sBAAsB,CAAC,CAAC;SAC/D;aAAM;YACL,OAAO,GAAG,IAAI,mBAAmB,CAC7B,QAAQ,EAAE,CAAC,QAAQ,EAAE,WAAW,EAAE,WAAW,CAAC,EAC9CA,MAAG,EAAE,CAAC,GAAG,CAAC,+BAA+B,CAAW,EAAE,cAAc,EACpE,cAAc,EAAE,UAAU,EAAE,UAAU,EAAE,IAAI,EAAE,UAAU,EACxD,sBAAsB,CAAC,CAAC;SAC7B;QACD,IAAM,MAAM,GAAiB,CAAC,GAAG,EAAE,GAAG,CAAC,CAAC;QACxC,IAAI,IAAI,EAAE;YACR,MAAM,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;SACnB;QACD,IAAI,sBAAsB,EAAE;YAC1B,MAAM,CAAC,IAAI,CAAC,sBAAsB,CAAC,CAAC;SACrC;QACD,IAAM,UAAU,GAAG;YACjB,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,WAAW,CAAC,EAAC,EAAE,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,WAAW,CAAC,EAAC;YAC1E,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,WAAW,CAAC,EAAC;SACrC,CAAC;QACF,IAAI,UAAU,KAAK,WAAW,EAAE;YAC9B,UAAU,CAAC,IAAI,CAAC,EAAC,IAAI,EAAE,SAAS,EAAE,IAAI,EAAE,CAAC,cAAc,CAAC,EAAC,CAAC,CAAC;YAC3D,OAAO,CAAC,QAAQ,IAAI,eAAe,CAAC;SACrC;QACD,IAAM,GAAG,GAAG,OAAO,CAAC,gBAAgB,CAAC,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC,KAAK,EAAE,UAAU,CAAC,CAAC;QAC3E,IAAM,WAAW,GACb,OAAO,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,GAAG,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,KAAK,EAAE,QAAQ,EAAC,EAAC,CAAC,CAAC;QACnE,aAAa,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC;;YACxB,KAAgB,IAAA,kBAAAK,SAAA,aAAa,CAAA,4CAAA,uEAAE;gBAA1B,IAAM,CAAC,0BAAA;gBACV,OAAO,CAAC,WAAW,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC;aAC/B;;;;;;;;;QACD,OAAO,WAAW,CAAC;IACrB;;IC3JA;;;;;;;;;;;;;;;;aAsBgB,YAAY,CAAC,IAI5B;QACQ,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,YAAC,EAAE,YAAC,EAAE,kBAAI,EAAE,sDAAsB,CAAW;QAC7C,IAAA,6BAAU,EAAE,6BAAU,EAAE,6BAAU,EAAE,qCAAc,CAAU;QAEnE,OAAO,eAAe,CAAC;YACrB,CAAC,GAAA;YACD,CAAC,GAAA;YACD,UAAU,YAAA;YACV,UAAU,YAAA;YACV,OAAO,SAAA;YACP,IAAI,MAAA;YACJ,sBAAsB,wBAAA;YACtB,cAAc,gBAAA;YACd,UAAU,YAAA;SACX,CAAC,CAAC;IACL,CAAC;IAEM,IAAM,kBAAkB,GAAiB;QAC9C,UAAU,EAAEC,eAAY;QACxB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,YAAgC;KAC7C;;IChDD;;;;;;;;;;;;;;;;IAwBA;QAUE,gCAAY,EAAgB,EAAE,MAAgB,EAAE,MAAgB;YAThE,kBAAa,GAAG,CAAC,OAAO,EAAE,OAAO,EAAE,OAAO,EAAE,OAAO,CAAC,CAAC;YAKrD,kBAAa,GAA6B,CAAC,GAAG,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YAEtD,SAAI,GAAG,IAAI,CAAC;YAGV,IAAI,CAAC,WAAW,GAAGJ,eAAY,CAAC,0BAA0B,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;YAC3E,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;YAC3D,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,CAAC,CAAC;YAE/D,IAAI,CAAC,SAAS,GAAG,qBAAmB,EAAI,CAAC;YACzC,IAAI,CAAC,EAAE,GAAG,EAAE,CAAC;SACd;QAED,4CAAW,GAAX;YACE,IAAM,KAAK,GAAG,iBAAiB,CAAC,IAAI,CAAC,EAAE,EAAE,KAAK,CAAC,CAAC;YAChD,IAAM,QAAQ,GAAG,kHAGX,KAAK,2BAGP,iCAAiC,EAAE,yWAStC,CAAC;YACF,OAAO,QAAQ,CAAC;SACjB;qCACF;KAAA;;IChED;;;;;;;;;;;;;;;;IAwBA;QAaE,+BACI,EAAgB,EAAE,MAAgB,EAAE,MAAgB,EACpD,oBAA6B;YAVjC,kBAAa,GAAG,CAAC,GAAG,EAAE,GAAG,CAAC,CAAC;YAM3B,SAAI,GAAG,IAAI,CAAC;;;YAOV,IAAM,cAAc,GAAG,GAAG,CAAC;YAC3B,IAAI,CAAC,aAAa,GAAG,CAAC,cAAc,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YAC5C,IAAI,CAAC,WAAW,GAAGA,eAAY,CAAC,0BAA0B,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;YAC3E,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;YAC3D,IAAI,CAAC,iBAAiB,GAAG,oBAAoB,GAAG,MAAM,CAAC,CAAC,CAAC,GAAG,MAAM,CAAC,CAAC,CAAC,CAAC;YACtE,IAAI,IAAI,CAAC,iBAAiB,GAAG,GAAG,EAAE;gBAChC,IAAI,CAAC,aAAa,GAAG,CAAC,CAAC;aACxB;iBAAM,IAAI,IAAI,CAAC,iBAAiB,GAAG,GAAG,EAAE;gBACvC,IAAI,CAAC,aAAa,GAAG,CAAC,CAAC;aACxB;iBAAM;gBACL,IAAI,CAAC,aAAa,GAAG,CAAC,CAAC;aACxB;YACD,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,EACzD,CAAC,IAAI,CAAC,aAAa,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;YAEhC,IAAI,CAAC,oBAAoB,GAAG,oBAAoB,CAAC;YACjD,IAAI,CAAC,EAAE,GAAG,EAAE,CAAC;;;YAGb,IAAI,CAAC,SAAS,GAAG,kBAAgB,EAAE,SAAI,IAAI,CAAC,iBAAiB,SACzD,IAAI,CAAC,oBAAsB,CAAC;SACjC;QAED,2CAAW,GAAX;YACE,IAAM,kBAAkB,GAAG,IAAI,CAAC,iBAAiB,GAAG,CAAC;gBACjD,aAAU,IAAI,CAAC,WAAW,CAAC,MAAM,GAAG,CAAC,OAAG;gBACxC,GAAG,CAAC;YACR,IAAM,iBAAiB,GAAG,IAAI,CAAC,oBAAoB;gBAC/C,qEACqB,kBAAkB,OAAI;gBAC3C,uBAAqB,kBAAkB,qDACF,CAAC;YAE1C,IAAM,KAAK,GAAG,iBAAiB,CAAC,IAAI,CAAC,EAAE,EAAE,KAAK,CAAC,CAAC;YAChD,IAAM,QAAQ,GAAG,wEAET,KAAK,mEAE+B,IAAI,CAAC,iBAAiB,oBAC5D,iCAAiC,EAAE,+QAMrC,IAAI,CAAC,iBAAiB,oCACtB,IAAI,CAAC,aAAa,CAAC,CAAC,CAAC,sDAErB,IAAI,CAAC,oBAAoB,GAAG,GAAG,GAAG,GAAG,oGAId,IAAI,CAAC,aAAa,4DACX,IAAI,CAAC,aAAa,uIAIxC,iBAAiB,yHAKxB,CAAC;YACN,OAAO,QAAQ,CAAC;SACjB;oCACF;KAAA;;IC5GD;;;;;;;;;;;;;;;;IAwBA;QAaE,6BAAY,EAAgB,EAAE,MAAgB,EAAE,MAAgB;YARhE,kBAAa,GAAG,CAAC,GAAG,EAAE,GAAG,CAAC,CAAC;YAC3B,kBAAa,GAAG,CAAC,CAAC;YAElB,WAAM,GAAG,IAAI,CAAC;YAEd,SAAI,GAAG,IAAI,CAAC;;YAKV,IAAM,cAAc,GAAG,GAAG,CAAC;YAC3B,IAAI,CAAC,aAAa,GAAG,CAAC,cAAc,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YAC5C,IAAI,CAAC,WAAW,GAAGA,eAAY,CAAC,0BAA0B,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;YAC3E,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;YAC3D,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,EACzD,CAAC,IAAI,CAAC,aAAa,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;YAChC,IAAI,CAAC,EAAE,GAAG,EAAE,CAAC;YACb,IAAI,CAAC,SAAS,GAAG,gBAAc,EAAI,CAAC;SACrC;QAED,yCAAW,GAAX;YACE,IAAM,KAAK,GAAG,iBAAiB,CAAC,IAAI,CAAC,EAAE,EAAE,IAAI,CAAC,MAAM,CAAC,CAAC;YACtD,IAAM,QAAQ,GAAG,sFAEX,KAAK,yBAEP,iCAAiC,EAAE,0NAOtC,CAAC;YACF,OAAO,QAAQ,CAAC;SACjB;kCACF;KAAA;;IClED;;;;;;;;;;;;;;;;IAwBA;QAUE,yBAAY,EAAgB,EAAE,MAAgB,EAAE,MAAgB;YALhE,kBAAa,GAAG,CAAC,GAAG,EAAE,GAAG,CAAC,CAAC;YAG3B,SAAI,GAAG,IAAI,CAAC;;YAIV,IAAM,cAAc,GAAG,GAAG,CAAC;YAC3B,IAAI,CAAC,aAAa,GAAG,CAAC,cAAc,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YAC5C,IAAI,CAAC,WAAW,GAAGA,eAAY,CAAC,0BAA0B,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;YAC3E,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;YAE3D,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,CAAC,CAAC;YAC/D,IAAI,CAAC,SAAS,GAAG,YAAU,EAAI,CAAC;YAChC,IAAI,CAAC,EAAE,GAAG,EAAE,CAAC;SACd;QAED,qCAAW,GAAX;YACE,IAAM,KAAK,GAAG,iBAAiB,CAAC,IAAI,CAAC,EAAE,EAAE,KAAK,CAAC,CAAC;YAChD,IAAM,QAAQ,GAAG,oEAEX,KAAK,yBAEP,iCAAiC,EAAE,4NAOpC,CAAC;YACJ,OAAO,QAAQ,CAAC;SACjB;8BACF;KAAA;;IC/DD;;;;;;;;;;;;;;;;aAuBgB,gBAAgB,CAC5B,EAAgB,EAAE,MAAgB,EAAE,MAAgB;QACtD,IAAM,OAAO,GACTD,OAAI,CAAC,WAAW,CAAC,MAAM,EAAE,MAAM,CAAC,IAAIA,OAAI,CAAC,aAAa,CAAC,MAAM,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC;QAC7E,IAAI,OAAO,EAAE;YACX,OAAO,IAAI,mBAAmB,CAAC,EAAE,EAAE,MAAM,EAAE,MAAM,CAAC,CAAC;SACpD;QACD,IAAM,oBAAoB,GACtB,MAAM,CAAC,MAAM,KAAK,CAAC,IAAI,MAAM,CAAC,MAAM,GAAG,CAAC,IAAI,MAAM,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC;QACjE,IAAM,oBAAoB,GACtB,MAAM,CAAC,MAAM,KAAK,CAAC,IAAI,MAAM,CAAC,MAAM,GAAG,CAAC,IAAI,MAAM,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC;QACjE,IAAI,oBAAoB,IAAI,oBAAoB,EAAE;YAChD,OAAO,IAAI,qBAAqB,CAAC,EAAE,EAAE,MAAM,EAAE,MAAM,EAAE,oBAAoB,CAAC,CAAC;SAC5E;aAAM;YACL,OAAO,IAAI,eAAe,CAAC,EAAE,EAAE,MAAM,EAAE,MAAM,CAAC,CAAC;SAChD;IACH;;ICvCA;;;;;;;;;;;;;;;;aAoBgB,QAAQ,CACpB,IAAsD;QACjD,IAAA,oBAAM,CAAS;QACf,IAAA,YAAC,CAAW;QAEnB,IAAI,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC;QAC9B,OAAO,EAAC,MAAM,EAAE,CAAC,CAAC,MAAM,EAAE,KAAK,EAAE,CAAC,CAAC,KAAK,EAAE,KAAK,EAAE,CAAC,CAAC,KAAK,EAAC,CAAC;IAC5D,CAAC;IAEM,IAAM,cAAc,GAAiB;QAC1C,UAAU,EAAEM,WAAQ;QACpB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,QAA4B;KACzC;;ICjCD;;;;;;;;;;;;;;;;IAsBA;;;;;;;;aAQgB,OAAO,CAAC,IAAqD;QAEpE,IAAA,oBAAM,EAAE,sBAAO,CAAS;QACxB,IAAA,kBAAI,EAAE,kBAAI,CAAW;QAE5B,IAAM,WAAW,GAAG,OAAO,CAAC,cAAc,CAAC,IAAI,CAAC,KAAK,EAAE,WAAW,CAAC,CAAC;QACpE,IAAM,OAAO,GAAG,OAAO,CAAC,SAAS,CAAC,GAAG,CAAC,WAAW,CAAC,MAAM,CAAC,CAAC;QAE1D,IAAM,cAAc,GAAG,QAAQ,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,IAAI,EAAC,EAAE,OAAO,SAAA,EAAC,CAAC,CAAC;QAE9D,IAAM,cAAc,GAAG,QAAQ,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,IAAI,EAAC,EAAE,OAAO,SAAA,EAAC,CAAC,CAAC;QAE9D,OAAO,CAAC,kBAAkB,GAAG,EAAC,IAAI,EAAE,cAAc,EAAE,IAAI,EAAE,cAAc,EAAC,CAAC;QAE1E,OAAO,WAAW,CAAC;IACrB,CAAC;IAEM,IAAM,aAAa,GAAiB;QACzC,UAAU,EAAEC,UAAO;QACnB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,OAA2B;KACxC;;ICnDD;;;;;;;;;;;;;;;;IAuBA;QAWE,wBAAY,WAAqB,EAAE,EAAe;YANlD,kBAAa,GAAG,CAAC,GAAG,CAAC,CAAC;YAItB,SAAI,GAAG,IAAI,CAAC;;YAIV,IAAM,cAAc,GAAG,GAAG,CAAC;YAC3B,IAAI,CAAC,aAAa,GAAG,CAAC,cAAc,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YAC5C,IAAI,CAAC,WAAW,GAAG,WAAW,CAAC;YAC/B,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;YAC3D,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,CAAC,CAAC;YAC/D,IAAI,CAAC,EAAE,GAAG,EAAE,CAAC;YACb,IAAI,CAAC,SAAS,GAAG,WAAS,EAAI,CAAC;SAChC;QAED,oCAAW,GAAX;YACE,OAAO,0DAED,gBAAgB,CAAC,IAAI,CAAC,EAAE,EAAE,KAAK,CAAC,yBAElC,iCAAiC,EAAE,2KAMpC,CAAC;SACL;6BACF;KAAA;;ICxBD;;;;;;;;;aASgB,eAAe,CAC3B,EAAqD;YAApD,kBAAM,EAAE,gCAAa,EAAE,gBAAK;QAC/B,OAAO,UAAC,EAAiB;gBAAhB,kBAAM,EAAE,oBAAO;YACf,IAAA,YAAC,CAA0B;YAClC,IAAM,aAAa,GAAG,OAAwB,CAAC;YAE/C,IAAM,MAAM,GAAG,KAAK,IAAI,CAAC,CAAC,KAAK,CAAC;YAChC,IAAI,aAAa,CAAC,kBAAkB,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,aAAa,IAAI,IAAI,EAAE;gBAClE,IAAM,KAAK,GAAG,aAAa,CAAC,SAAS,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC;gBACpD,IAAM,SAAS,GAAG,aAAa,CAAC,KAAK,CAAC,MAAoB,EAAE,MAAM,CAAC,CAAC;gBACpE,OAAO,aAAa,CAAC,cAAc,CAAC,CAAC,CAAC,KAAK,EAAE,MAAM,EAAE,SAAS,CAAC,CAAC;aACjE;YAED,IAAM,OAAO,GAAmB,IAAI,cAAc,CAAC,CAAC,CAAC,KAAK,EAAE,MAAM,CAAC,CAAC;YACpE,OAAO,aAAa,CAAC,gBAAgB,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC;SAC7D,CAAC;IACJ,CAAC;IASD;;;;;;;;;aASgB,gBAAgB,CAC5B,EAC0B;YADzB,wBAAS,EAAE,gCAAa,EAAE,uBAAuB,EAAvB,4CAAuB,EAAE,gBAAK;QAE3D,OAAO,UAAC,EAAiB;;gBAAhB,kBAAM,EAAE,oBAAO;YACf,IAAA,YAAC,EAAE,YAAC,CAA2B;YACtC,IAAM,aAAa,GAAG,OAAwB,CAAC;YAE/C,IAAI,eAAe,IAAI,CAAC,CAAC,KAAK,KAAK,WAAW,EAAE;gBAC9C,IAAM,KAAK,GAAG,aAAa,CAAC,SAAS,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC;gBACpD,IAAM,KAAK,GAAG,aAAa,CAAC,SAAS,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC;gBACpD,IAAI,IAAgB,SAAA,EAAE,IAAgB,SAAA,CAAC;gBACvC,IAAI,SAAS,KAAK,YAAY,CAAC,GAAG,EAAE;oBAClC;;;;;;;;;;;;;;;;;0BAqBE,EArBD,YAAI,EAAE,YAAI,CAqBR;iBACJ;qBAAM;oBACL,IAAM,WAAW,GAAG,IAAI,sBAAsB,CAC1C,YAAY,CAAC,qBAAqB,EAAE,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC;oBAC1D,IAAM,WAAW,GAAG,IAAI,sBAAsB,CAC1C,YAAY,CAAC,qBAAqB,EAAE,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC;oBAE1D,IAAM,QAAM,GAAG;wBACb;4BACE,MAAM,EAAE,KAAK,CAAC,kBAAkB,CAAC,IAAI,CAAC,MAAM;4BAC5C,KAAK,EAAE,KAAK,CAAC,kBAAkB,CAAC,IAAI,CAAC,KAAK;4BAC1C,KAAK,EAAE,CAAC,CAAC,KAAK;yBACf;wBACD;4BACE,MAAM,EAAE,KAAK,CAAC,kBAAkB,CAAC,IAAI,CAAC,MAAM;4BAC5C,KAAK,EAAE,KAAK,CAAC,kBAAkB,CAAC,IAAI,CAAC,KAAK;4BAC1C,KAAK,EAAE,CAAC,CAAC,KAAK;yBACf;wBACD;4BACE,MAAM,EAAE,KAAK,CAAC,kBAAkB,CAAC,IAAI,CAAC,MAAM;4BAC5C,KAAK,EAAE,KAAK,CAAC,kBAAkB,CAAC,IAAI,CAAC,KAAK;4BAC1C,KAAK,EAAE,CAAC,CAAC,KAAK;yBACf;wBACD;4BACE,MAAM,EAAE,KAAK,CAAC,kBAAkB,CAAC,IAAI,CAAC,MAAM;4BAC5C,KAAK,EAAE,KAAK,CAAC,kBAAkB,CAAC,IAAI,CAAC,KAAK;4BAC1C,KAAK,EAAE,CAAC,CAAC,KAAK;yBACf;qBACF,CAAC;oBAEF,IAAI,GAAG,aAAa,CAAC,gBAAgB,CAAC,WAAW,EAAE,QAAM,EAAE,SAAS,CAAC,CAAC;oBACtE,IAAI,GAAG,aAAa,CAAC,gBAAgB,CAAC,WAAW,EAAE,QAAM,EAAE,SAAS,CAAC,CAAC;iBACvE;gBAED,IAAM,aAAa,GACf,OAAO,CAAC,EAAC,MAAM,EAAE,EAAC,IAAI,MAAA,EAAE,IAAI,MAAA,EAAC,EAAE,OAAO,EAAE,aAAa,EAAC,CAAC,CAAC;gBAE5D,aAAa,CAAC,WAAW,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;gBACvC,aAAa,CAAC,WAAW,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;;gBAIvC,OAAO,aAAa,CAAC;aACtB;YAED,IAAM,MAAM,GAAG,KAAK,IAAIC,aAAU,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC;YACrD,IAAI,CAAC,CAAC,CAAC,KAAK,KAAK,QAAQ,IAAI,CAAC,CAAC,KAAK,KAAK,QAAQ;gBAC5C,aAAa,CAAC,kBAAkB,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC;gBACzC,aAAa,IAAI,IAAI,EAAE;gBACzB,IAAM,KAAK,GAAG,aAAa,CAAC,SAAS,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,MAAoB,CAAC;gBACzE,IAAM,KAAK,GAAG,aAAa,CAAC,SAAS,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,MAAoB,CAAC;gBACzE,IAAM,YAAY,GAAG,CAAC,CAAC,KAAK,KAAK,QAAQ;;oBAErCP,eAAY,CAAC,sBAAsB,CAAC,KAA4B,CAAC;oBACjE,KAAK,CAAC;gBACV,IAAM,YAAY,GAAG,CAAC,CAAC,KAAK,KAAK,QAAQ;;oBAErCA,eAAY,CAAC,sBAAsB,CAAC,KAA4B,CAAC;oBACjE,KAAK,CAAC;gBACJ,IAAA,mFACiE,EADhE,iBAAS,EAAE,gBACqD,CAAC;gBAExE,OAAO,aAAa,CAAC,cAAc,CAAC,QAAQ,EAAE,MAAM,EAAE,SAAS,CAAC,CAAC;aAClE;YACD,IAAM,OAAO,GAAG,gBAAgB,CAAC,SAAS,EAAE,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC;YAC9D,OAAO,aAAa,CAAC,gBAAgB,CAAC,OAAO,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC;SAChE,CAAC;IACJ;;IClLA;;;;;;;;;;;;;;;;aAsBgB,aAAa,CAAC,IAAgB;QAC5C,IAAM,YAAY,GAAG,IAAI,YAAY,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;QACnD,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,IAAI,CAAC,MAAM,EAAE,EAAE,CAAC,EAAE;YACpC,YAAY,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC;SACrC;QACD,OAAO,YAAY,CAAC;IACtB;;IC5BA;;;;;;;;;;;;;;;;IAqBA;;;aAGgB,4BAA4B,CAAC,EAAyB;QAEpE,OAAO,UAAC,MAAgB,EAAE,MAAgB,EAAE,KAAiB,EACrD,KAAiB,EAAE,KAAe;YACxC,IAAM,QAAQ,GAAGA,eAAY,CAAC,0BAA0B,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;YAEzE,IAAM,UAAU,GAAG,QAAQ,CAAC,MAAM,CAAC;YACnC,IAAM,aAAa,GAAGD,OAAI,CAAC,cAAc,CAAC,QAAQ,CAAC,CAAC;YACpD,IAAM,UAAU,GAAGA,OAAI,CAAC,aAAa,CAAC,QAAQ,CAAC,CAAC;YAEhD,IAAM,MAAM,GACRA,OAAI,CAAC,sBAAsB,CAAC,KAAwB,EAAE,UAAU,CAAC,CAAC;YAEtE,IAAM,KAAK,GAAG,MAAM,CAAC,MAAM,CAAC;YAC5B,IAAM,KAAK,GAAG,MAAM,CAAC,MAAM,CAAC;YAE5B,IAAM,QAAQ,GAAGA,OAAI,CAAC,cAAc,CAAC,MAAM,CAAC,CAAC;YAC7C,IAAM,QAAQ,GAAGA,OAAI,CAAC,cAAc,CAAC,MAAM,CAAC,CAAC;YAE7C,IAAM,cAAc,GAAGC,eAAY,CAAC,gBAAgB,CAAC,MAAM,EAAE,QAAQ,CAAC,CAAC;YACvE,IAAM,cAAc,GAAGA,eAAY,CAAC,gBAAgB,CAAC,MAAM,EAAE,QAAQ,CAAC,CAAC;YAEvE,IAAI,cAAc,CAAC,MAAM,GAAG,cAAc,CAAC,MAAM,KAAK,CAAC,EAAE;gBACvD,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,EAAE,CAAC,EAAE;oBACtC,MAAM,CAAC,CAAC,CAAC,GAAG,EAAE,CAAC,KAAK,CAAC,CAAC,GAAG,KAAK,CAAC,MAAM,CAAC,EAAE,KAAK,CAAC,CAAC,GAAG,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC;iBAClE;aACF;iBAAM;wCACI,CAAC;oBACR,IAAM,GAAG,GAAGD,OAAI,CAAC,UAAU,CAAC,CAAC,EAAE,UAAU,EAAE,aAAa,CAAC,CAAC;oBAE1D,IAAM,IAAI,GAAG,GAAG,CAAC,KAAK,CAAC,CAAC,KAAK,CAAC,CAAC;oBAC/B,cAAc,CAAC,OAAO,CAAC,UAAA,CAAC,IAAI,OAAA,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,GAAA,CAAC,CAAC;oBACzC,IAAM,MAAM,GAAGA,OAAI,CAAC,UAAU,CAAC,IAAI,EAAE,KAAK,EAAE,QAAQ,CAAC,CAAC;oBAEtD,IAAM,IAAI,GAAG,GAAG,CAAC,KAAK,CAAC,CAAC,KAAK,CAAC,CAAC;oBAC/B,cAAc,CAAC,OAAO,CAAC,UAAA,CAAC,IAAI,OAAA,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,GAAA,CAAC,CAAC;oBACzC,IAAM,MAAM,GAAGA,OAAI,CAAC,UAAU,CAAC,IAAI,EAAE,KAAK,EAAE,QAAQ,CAAC,CAAC;oBAEtD,MAAM,CAAC,CAAC,CAAC,GAAG,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC;;gBAX/C,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,EAAE,CAAC;4BAA7B,CAAC;iBAYT;aACF;YAED,OAAO,CAAC,MAAM,EAAE,QAAQ,CAAC,CAAC;SAC3B,CAAC;IACJ;;ICpEA;;;;;;;;;;;;;;;;IAsBO,IAAM,OAAO,GAChB,4BAA4B,EAAE,UAAC,CAAS,EAAE,CAAS,IAAK,OAAA,CAAC,GAAG,CAAC,GAAA,EAAE;;ICvBnE;;;;;;;;;;;;;;;;IAqBA;;;aAGgB,qBAAqB,CAAC,EAAwB;QAE5D,OAAO,UAAC,MAAM,EAAE,KAAK,EAAE,KAAK;YAC1B,IAAM,SAAS,GACXA,OAAI,CAAC,sBAAsB,CAAC,KAAwB,EAAE,MAAM,CAAC,MAAM,CAAC,CAAC;YACzE,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,EAAE,CAAC,EAAE;gBACtC,SAAS,CAAC,CAAC,CAAC,GAAG,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC;aACrC;YACD,OAAO,SAAS,CAAC;SAClB,CAAC;IACJ;;IClCA;;;;;;;;;;;;;;;;IAsBO,IAAM,QAAQ,GAAG,qBAAqB,CAAC,UAAC,EAAE,IAAK,OAAA,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC,GAAA,CAAC;;ICtBpE;;;;;;;;;;;;;;;;aAmBgBS,YAAU,CACtB,MAAqD,EAAE,QAAkB,EACzE,KAAe,EAAE,YAAqB;QACxC,IAAM,OAAO,GAAGT,OAAI,CAAC,iBAAiB,CAAC,KAAK,EAAEA,OAAI,CAAC,aAAa,CAAC,QAAQ,CAAC,CAAC,CAAC;QAE5E,IAAI,YAAY,IAAI,KAAK,KAAK,QAAQ,EAAE;;YAEtC,IAAI,QAAM,GAAG,CAAC,CAAC;YACf,MAAM,CAAC,OAAO,CAAC,UAAA,KAAK;gBAClB,IAAM,IAAI,GAAGA,OAAI,CAAC,aAAa,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC;gBAE5C,OAAsB,CAAC,GAAG,CAAC,KAAK,CAAC,IAAkB,EAAE,QAAM,CAAC,CAAC;gBAC9D,QAAM,IAAI,IAAI,CAAC;aAChB,CAAC,CAAC;SACJ;aAAM;YACL,IAAI,WAAS,GAAG,CAAC,CAAC;YAElB,MAAM,CAAC,OAAO,CAAC,UAAA,KAAK;gBAClB,IAAM,WAAW,GAAG,KAAK,KAAK,QAAQ;oBAClCC,eAAY,CAAC,sBAAsB,CAAC,KAAK,CAAC,IAAoB,CAAC;oBAC/D,KAAK,CAAC,IAAkB,CAAC;gBAE7B,IAAI,IAAI,GAAG,CAAC,CAAC;gBAEb,KAAK,IAAI,GAAG,GAAG,CAAC,EAAE,GAAG,GAAG,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,EAAE,GAAG,EAAE;oBAC7C,IAAM,MAAM,GAAG,GAAG,GAAG,QAAQ,CAAC,CAAC,CAAC,GAAG,WAAS,CAAC;oBAC7C,KAAK,IAAI,GAAG,GAAG,CAAC,EAAE,GAAG,GAAG,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,EAAE,GAAG,EAAE;wBAC7C,OAAO,CAAC,MAAM,GAAG,GAAG,CAAC,GAAG,WAAW,CAAC,IAAI,EAAE,CAAC,CAAC;qBAC7C;iBACF;gBAED,WAAS,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC;aAC7B,CAAC,CAAC;SACJ;QAED,OAAO,OAAO,CAAC;IACjB;;ICvDA;;;;;;;;;;;;;;;;IAsBO,IAAM,SAAS,GAClB,4BAA4B,CAAC,UAAC,CAAS,EAAE,CAAS,IAAK,OAAA,CAAC,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,GAAA,CAAC;;ICvB7E;;;;;;;;;;;;;;;;IAsBO,IAAM,OAAO,GAAG,qBAAqB,CAAC,UAAC,EAAE,IAAK,OAAA,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAA,CAAC;;ICtBlE;;;;;;;;;;;;;;;;IAsBO,IAAM,SAAS,GAAG,qBAAqB,CAAC,UAAC,EAAE,IAAK,OAAA,IAAI,CAAC,KAAK,CAAC,EAAE,CAAC,GAAA,CAAC;;ICtBtE;;;;;;;;;;;;;;;;IAsBO,IAAM,SAAS,GAAG,qBAAqB,CAAC,UAAC,EAAE,IAAK,OAAA,IAAI,CAAC,KAAK,CAAC,EAAE,CAAC,GAAA,CAAC;;aCHtD,YAAY,CACxB,WAAuB,EAAE,SAA0B,EAAE,KAAe,EACpE,SAAiB,EAAE,SAAiB,EAAE,SAAiB,EAAE,OAAiB,EAC1E,WAAqB,EAAE,UAAkB;QAC3C,IAAM,MAAM,GAAGS,SAAM,CAAC,CAAC,SAAS,EAAE,SAAS,CAAC,EAAE,KAAK,CAAC,CAAC;QAErD,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,SAAS,EAAE,CAAC,EAAE,EAAE;YAClC,IAAM,KAAK,GAAG,EAAE,CAAC;YACjB,IAAI,YAAY,GAAG,CAAC,CAAC;YACrB,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,SAAS,EAAE,CAAC,EAAE,EAAE;gBAClC,IAAM,GAAG,GAAG,WAAW,CAAC,CAAC,GAAG,SAAS,GAAG,CAAC,CAAC,CAAC;gBAC3C,YAAY,IAAI,GAAG,GAAG,OAAO,CAAC,CAAC,CAAC,CAAC;gBACjC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC;aACjB;YACD,IAAI,YAAY,GAAG,CAAC,IAAI,YAAY,IAAI,UAAU,GAAG,SAAS,EAAE;gBAC9D,MAAM,IAAI,KAAK,CACX,sBAAoB,KAAK,6BAAwB,WAAa,CAAC,CAAC;aACrE;YAED,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,SAAS,EAAE,CAAC,EAAE,EAAE;gBAClC,MAAM,CAAC,MAAM,CAAC,CAAC,GAAG,SAAS,GAAG,CAAC,CAAC,GAC5B,SAAS,CAAC,GAAG,OAAb,SAAS,WAAQ,SAAS,CAAC,UAAU,CAAC,YAAY,GAAG,SAAS,GAAG,CAAC,CAAC,EAAC,CAAC;aAC1E;SACF;QAED,OAAO,MAAyB,CAAC;IACnC;;IC7CA;;;;;;;;;;;;;;;;aAmBgB,YAAY,CACxB,IAAwB,EAAE,UAA8B,EACxD,kBAA4B;QAC9B,IAAM,MAAM,GAAGA,SAAM,CAAC,kBAAkB,EAAE,IAAI,CAAC,KAAK,CAAC,CAAC;QACtD,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,MAAM,CAAC,IAAI,EAAE,EAAE,CAAC,EAAE;YACpC,IAAM,MAAM,GAAG,MAAM,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC;YAEpC,IAAM,WAAW,GAAa,MAAM,CAAC,KAAK,EAAE,CAAC;YAC7C,IAAM,QAAQ,GAAG,WAAW,CAAC,CAAC,CAAC,CAAC;YAChC,IAAM,UAAU,GAAG,WAAW,CAAC,CAAC,CAAC,CAAC;YAClC,IAAM,YAAY,GAAG,UAAU,CAAC,UAAU,CAAC,CAAC,QAAQ,EAAE,UAAU,CAAC,CAAC,CAAC;YACnE,WAAW,CAAC,CAAC,CAAC,GAAG,UAAU,CAAC,MAAM,CAAC,YAAY,CAAW,CAAC;YAE3D,IAAM,aAAa,GAAG,IAAI,CAAC,UAAU,CAAC,WAAW,CAAC,CAAC;YAEnD,IAAI,CAAC,IAAI,aAAa,IAAI,aAAa,GAAG,IAAI,CAAC,MAAM,CAAC,MAAM,EAAE;gBAC5D,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,MAAM,CAAC,aAAa,CAAC,CAAC;aAC/C;SACF;QAED,OAAO,MAA4B,CAAC;IACtC;;ICxCA;;;;;;;;;;;;;;;;IAsBO,IAAM,WAAW,GACpB,4BAA4B,CAAC,UAAC,CAAS,EAAE,CAAS,IAAK,OAAA,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,GAAG,CAAC,GAAA,CAAC;;ICvB3E;;;;;;;;;;;;;;;;IAsBO,IAAM,gBAAgB,GACzB,4BAA4B,CAAC,UAAC,CAAS,EAAE,CAAS,IAAK,OAAA,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,GAAA,CAAC;;ICvB5E;;;;;;;;;;;;;;;;IAsBO,IAAM,QAAQ,GACjB,4BAA4B,CAAC,UAAC,CAAS,EAAE,CAAS,IAAK,OAAA,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,GAAG,CAAC,GAAA,CAAC;;ICvB3E;;;;;;;;;;;;;;;;IAsBO,IAAM,aAAa,GACtB,4BAA4B,CAAC,UAAC,CAAS,EAAE,CAAS,IAAK,OAAA,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,GAAA,CAAC;;ICvB5E;;;;;;;;;;;;;;;;IAsBO,IAAM,OAAO,GAAG,qBAAqB,CAAC,UAAC,EAAE,IAAK,OAAA,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAA,CAAC;;ICtBlE;;;;;;;;;;;;;;;;aAmBgB,OAAO,CACnB,KAAiB,EAAE,UAAkB,EAAE,QAAkB,EACzD,KAAe;QACjB,IAAM,IAAI,GAAGV,OAAI,CAAC,sBAAsB,CACpC,KAAwB,EAAEA,OAAI,CAAC,aAAa,CAAC,QAAQ,CAAC,CAAC,CAAC;QAE5D,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,IAAI,CAAC,MAAM,EAAE,EAAE,CAAC,EAAE;YACpC,IAAM,MAAM,GAAG,CAAC,GAAG,UAAU,CAAC;YAC9B,IAAI,GAAG,GAAG,KAAK,CAAC,MAAM,CAAC,CAAC;YACxB,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,UAAU,EAAE,EAAE,CAAC,EAAE;gBACnC,IAAM,KAAK,GAAG,KAAK,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC;gBAChC,IAAI,MAAM,CAAC,KAAK,CAAC,KAAK,CAAC;oBACnB,KAAK,GAAG,GAAG,EAAE;oBACf,GAAG,GAAG,KAAK,CAAC;iBACb;aACF;YACD,IAAI,CAAC,CAAC,CAAC,GAAG,GAAG,CAAC;SACf;QACD,OAAO,IAAI,CAAC;IACd;;ICtCA;;;;;;;;;;;;;;;;IAsBO,IAAM,WAAW,GAAG,4BAA4B,EAClD,UAAC,MAAM,EAAE,MAAM,IAAK,OAAA,IAAI,CAAC,GAAG,CAAC,MAAgB,EAAE,MAAgB,CAAC,GAAA,EAAE;;ICvBvE;;;;;;;;;;;;;;;;IAsBO,IAAM,WAAW,GAAG,4BAA4B,EAClD,UAAC,MAAM,EAAE,MAAM,IAAK,OAAA,IAAI,CAAC,GAAG,CAAC,MAAgB,EAAE,MAAgB,CAAC,GAAA,EAAE;;ICvBvE;;;;;;;;;;;;;;;;IAqBO,IAAM,YAAY,GAAG,4BAA4B,EACnD,UAAC,MAAc,EAAE,MAAc,IAAK,OAAA,MAAM,GAAG,MAAM,GAAA,EAAE;;aCC1C,OAAO,CAAC,KAAiB,EAAE,MAAgB,EAAE,MAAgB;QAE3E,IAAM,QAAQ,GACVA,OAAI,CAAC,iBAAiB,CAAC,CAAC,CAAoB,EAAE,MAAM,CAAe,CAAC;QACxE,OAAO,YAAY,CAAC,EAAE,EAAE,MAAM,EAAE,QAAQ,EAAE,KAAK,EAAE,MAAM,CAAC,CAAC;IAC3D;;IC5BA;;;;;;;;;;;;;;;;IAsBO,IAAM,YAAY,GACrB,4BAA4B,EAAE,UAAC,CAAC,EAAE,CAAC,IAAK,OAAA,CAAC,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,GAAA,EAAE;;ICvB/D;;;;;;;;;;;;;;;;aAoBgB,aAAa,CACzB,KAAiB,EAAE,MAAgB,EAAE,KAAe,EAAE,IAAc,EACpE,QAAkB;QACpB,IAAM,KAAK,GAAG,MAAM,CAAC,MAAM,CAAC;QAC5B,IAAM,KAAK,GAAGA,OAAI,CAAC,aAAa,CAAC,MAAM,CAAC,CAAC;QACzC,IAAM,QAAQ,GAAGA,OAAI,CAAC,cAAc,CAAC,MAAM,CAAC,CAAC;QAC7C,IAAM,UAAU,GAAGA,OAAI,CAAC,cAAc,CAAC,QAAQ,CAAC,CAAC;QAEjD,IAAM,MAAM,GAAGA,OAAI,CAAC,sBAAsB,CACtC,KAAwB,EAAEA,OAAI,CAAC,aAAa,CAAC,QAAQ,CAAC,CAAC,CAAC;QAE5D,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,KAAK,EAAE,EAAE,CAAC,EAAE;YAC9B,IAAM,GAAG,GAAGA,OAAI,CAAC,UAAU,CAAC,CAAC,EAAE,KAAK,EAAE,QAAQ,CAAC,CAAC;;YAGhD,IAAM,MAAM,GAAa,IAAI,KAAK,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC;YAC/C,KAAK,IAAI,GAAC,GAAG,CAAC,EAAE,GAAC,GAAG,MAAM,CAAC,MAAM,EAAE,GAAC,EAAE,EAAE;gBACtC,MAAM,CAAC,GAAC,CAAC,GAAG,GAAG,CAAC,IAAI,CAAC,GAAC,CAAC,CAAC,CAAC;aAC1B;YAED,IAAM,QAAQ,GAAGA,OAAI,CAAC,UAAU,CAAC,MAAM,EAAE,KAAK,EAAE,UAAU,CAAC,CAAC;YAC5D,MAAM,CAAC,QAAQ,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,CAAC;SAC7B;QACD,OAAO,MAAM,CAAC;IAChB;;aCrBgB,QAAQ,CACpB,MAAgB,EAAE,MAAgB,EAAE,KAAiB,EACrD,aAAuB;QAEnB,IAAA,gFAC2D,EAD1D,gBAAQ,EAAE,mBACgD,CAAC;QAClE,IAAM,QAAQ,GAAGQ,aAAU,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;QAC7C,IAAM,OAAO,GAAGR,OAAI,CAAC,mBAAmB,CACpBA,OAAI,CAAC,aAAa,CAAC,QAAQ,CAAC,EAAE,QAAQ,CAAe,CAAC;QAC1E,IAAM,UAAU,GAAGA,OAAI,CAAC,aAAa,CAAC,WAAW,CAAC,CAAC;QAEnD,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,OAAO,CAAC,MAAM,EAAE,EAAE,CAAC,EAAE;YACvC,IAAM,MAAM,GAAG,CAAC,GAAG,UAAU,CAAC;YAC9B,IAAI,MAAI,GAAG,CAAC,CAAC;YACb,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,UAAU,EAAE,EAAE,CAAC,EAAE;gBACnC,MAAI,IAAI,KAAK,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC;aAC3B;YACD,OAAO,CAAC,CAAC,CAAC,GAAG,MAAI,CAAC;SACnB;QAED,OAAO,EAAC,OAAO,SAAA,EAAE,QAAQ,UAAA,EAAE,QAAQ,UAAA,EAAC,CAAC;IACvC;;IC5CA;;;;;;;;;;;;;;;;aAmBgB,SAAS,CACrB,KAAa,EAAE,IAAY,EAAE,IAAY,EACzC,KAAwB;QAC1B,IAAM,aAAa,GAAG,KAAK,KAAK,IAAI,CAAC;QACrC,IAAM,2BAA2B,GAAG,KAAK,GAAG,IAAI,IAAI,IAAI,GAAG,CAAC,CAAC;QAC7D,IAAM,2BAA2B,GAAG,IAAI,GAAG,KAAK,IAAI,IAAI,GAAG,CAAC,CAAC;QAE7D,IAAI,aAAa,IAAI,2BAA2B;YAC5C,2BAA2B,EAAE;YAC/B,OAAOA,OAAI,CAAC,mBAAmB,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC;SAC3C;QAED,IAAM,WAAW,GAAG,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,IAAI,GAAG,KAAK,IAAI,IAAI,CAAC,CAAC,CAAC;QAC/D,IAAM,MAAM,GAAGA,OAAI,CAAC,mBAAmB,CAAC,WAAW,EAAE,KAAK,CAAC,CAAC;QAE5D,IAAI,IAAI,GAAG,KAAK,IAAI,IAAI,KAAK,CAAC,EAAE;;;YAG9B,IAAI,GAAG,CAAC,CAAC,CAAC;SACX;QAED,MAAM,CAAC,CAAC,CAAC,GAAG,KAAK,CAAC;QAClB,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE;YACtC,MAAM,CAAC,CAAC,CAAC,GAAG,MAAM,CAAC,CAAC,GAAG,CAAC,CAAC,GAAG,IAAI,CAAC;SAClC;QACD,OAAO,MAAM,CAAC;IAChB;;IC7CA;;;;;;;;;;;;;;;;IAsBO,IAAM,SAAS,GAAG,qBAAqB,CAAC,UAAC,EAAE,IAAK,OAAA,CAAC,GAAG,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC,GAAA,CAAC;;aCAzD,SAAS,CACrB,IAAmB,EAAE,KAAe,EAAE,IAAc,EAAE,KAAe,EACrE,KAAe;QACjB,IAAM,WAAW,GAAGW,aAAU,CAAC,gBAAgB,CAAC,KAAK,EAAE,KAAK,EAAE,IAAI,CAAC,CAAC;QACpE,IAAM,MAAM,GAAGX,OAAI,CAAC,aAAa,CAAC,IAAI,CAAC,CAAC;QACxC,IAAM,QAAQ,GAAGA,OAAI,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC;QAE5C,IAAI,WAAW,EAAE;YACf,IAAM,UAAU,GAAGW,aAAU,CAAC,iBAAiB,CAAC,KAAK,EAAE,QAAQ,CAAC,CAAC;YAEjE,IAAI,KAAK,KAAK,QAAQ,EAAE;gBACtB,OAAQ,IAAqB,CAAC,KAAK,CAAC,UAAU,EAAE,UAAU,GAAG,MAAM,CAAC,CAAC;aACtE;YAED,OAAQ,IAAmB,CAAC,QAAQ,CAAC,UAAU,EAAE,UAAU,GAAG,MAAM,CAAC,CAAC;SACvE;QAED,IAAM,WAAW,GAAG,KAAK,KAAK,QAAQ;YAClCV,eAAY,CAAC,sBAAsB,CAAC,IAAoB,CAAC;YACzD,IAAkB,CAAC;QAEvB,IAAM,KAAK,GAAGS,SAAM,CAAC,KAAK,EAAE,KAAK,EAAE,WAAW,CAAC,CAAC;QAChD,IAAM,MAAM,GAAGA,SAAM,CAAC,IAAI,EAAE,KAAK,CAAC,CAAC;QACnC,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,MAAM,CAAC,IAAI,EAAE,EAAE,CAAC,EAAE;YACpC,IAAM,MAAM,GAAG,MAAM,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC;YACpC,IAAM,KAAK,GAAG,MAAM,CAAC,GAAG,CAAC,UAAC,GAAW,EAAE,CAAC,IAAK,OAAA,GAAG,GAAG,KAAK,CAAC,CAAC,CAAC,GAAA,CAAC,CAAC;YAC7D,MAAM,CAAC,GAAG,OAAV,MAAM,YAAK,KAAK,CAAC,GAAG,OAAT,KAAK,WAAQ,KAAK,KAAM,MAAM,GAAE;SAC5C;QAED,IAAI,KAAK,KAAK,QAAQ,EAAE;YACtB,OAAOT,eAAY,CAAC,sBAAsB,CAAC,MAAM,CAAC,MAAkB,CAAC,CAAC;SACvE;QACD,OAAO,MAAM,CAAC,MAAoB,CAAC;IACrC;;aCpCgB,gBAAgB,CAC5B,QAAkB,EAAE,IAAqB,EAAE,OAAiB,EAC5D,KAAe;QACjB,IAAM,MAAM,GAAGS,SAAM,CAAC,QAAQ,EAAE,IAAI,CAAC,KAAK,CAAC,CAAC;QAE5C,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,MAAM,CAAC,IAAI,EAAE,CAAC,EAAE,EAAE;YACpC,IAAM,GAAG,GAAG,MAAM,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC;YAEjC,IAAM,MAAM,GAAa,IAAI,KAAK,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC;YAC/C,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE;gBACtC,MAAM,CAAC,CAAC,CAAC,GAAG,GAAG,CAAC,CAAC,CAAC,GAAG,OAAO,CAAC,CAAC,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,CAAC;aAC5C;YACD,MAAM,CAAC,GAAG,OAAV,MAAM,YAAK,IAAI,CAAC,GAAG,OAAR,IAAI,WAAQ,MAAM,KAAM,GAAG,GAAE;SACzC;QAED,OAAO,MAAyB,CAAC;IACnC;;ICnCA;;;;;;;;;;;;;;;;IAmBA;;;;;;IAMA;QAQE,wBACI,SAAiB,EAAE,WAAqB,EAAE,OAAe,EACzD,QAAgB,EAAE,QAAgB,EAAE,sBAA+B;YACrE,IAAI,CAAC,SAAS,GAAGV,OAAI,CAAC,YAAY,CAAC,SAAS,CAAC,CAAC;YAC9C,IAAI,CAAC,WAAW,GAAG,WAAW,CAAC;YAC/B,IAAI,CAAC,OAAO,GAAGA,OAAI,CAAC,YAAY,CAAC,OAAO,CAAC,CAAC;YAC1C,IAAI,CAAC,QAAQ,GAAGA,OAAI,CAAC,YAAY,CAAC,QAAQ,CAAC,CAAC;YAC5C,IAAI,CAAC,QAAQ,GAAG,QAAQ,CAAC;YACzB,IAAI,CAAC,aAAa,GAAG,sBAAsB,CAAC;SAC7C;QAEO,oCAAW,GAAX,UAAY,UAAkB;;;;YAIpC,OAAO,IAAI,CAAC,GAAG,CACX,IAAI,CAAC,QAAQ,GAAG,CAAC,GAAG,UAAU,GAAG,CAAC,GAAG,IAAI,CAAC,QAAQ,EAAE,UAAU,GAAG,CAAC,CAAC,CAAC;SACzE;QAEO,qCAAY,GAAZ,UAAa,MAAc,EAAE,UAAkB;YACrD,IAAM,QAAQ,GAAG,IAAI,CAAC,WAAW,CAAC,UAAU,CAAC,CAAC;YAC9C,OAAO,IAAI,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,CAAC,MAAM,GAAG,CAAC,GAAG,QAAQ,IAAI,UAAU,IAAI,CAAC,CAAC,CAAC;SAChE;QAEO,qCAAY,GAAZ,UACJ,IAAkB,EAAE,UAAkB,EAAE,MAAoB,EAC5D,gBAAwB,EAAE,SAAiB,EAAE,UAAkB;oCACxD,UAAU;gBACjB,IAAM,QAAQ,GAAG,OAAK,WAAW,CAAC,UAAU,CAAC,CAAC;gBAC9C,IAAM,WAAW,GAAG,IAAI,CAAC,GAAG,CAAC,CAAC,EAAE,QAAQ,GAAG,UAAU,CAAC,CAAC;gBACvD,IAAM,YAAY,GACd,IAAI,CAAC,GAAG,CAAC,CAAC,EAAE,QAAQ,IAAI,SAAS,IAAI,UAAU,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC;gBAC3D,IAAM,SAAS,GAAG,UAAU,IAAI,WAAW,GAAG,YAAY,CAAC,CAAC;gBAC5D,IAAM,cAAc,GAChB,UAAU,IAAI,WAAW,GAAG,CAAC,GAAG,CAAC,GAAG,UAAU,GAAG,QAAQ,CAAC,CAAC;;;gBAI/D,IAAI,SAAS,GAAG,CAAC,CAAC;;gBAElB,SAAS,IAAI,WAAW,GAAG,OAAK,OAAO,CAAC,MAAM,CAAC;;gBAE/C,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,SAAS,EAAE,EAAE,CAAC,EAAE;oBAClC,SAAS,IAAI,IAAI,CAAC,cAAc,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC;iBAC9C;;gBAED,SAAS,IAAI,YAAY,GAAG,OAAK,QAAQ,CAAC,MAAM,CAAC;;gBAEjD,IAAM,aAAa,GAAG,WAAW,GAAG,YAAY,GAAG,SAAS,GAAG,CAAC,CAAC;gBACjE,SAAS,IAAI,aAAa,GAAG,OAAK,SAAS,CAAC,MAAM,CAAC;;gBAGnD,MAAM,CAAC,gBAAgB,GAAG,UAAU,CAAC,GAAG,IAAI,UAAU,CAAC,SAAS,CAAC,CAAC;gBAClE,IAAM,KAAK,GAAG,MAAM,CAAC,gBAAgB,GAAG,UAAU,CAAC,CAAC;gBAEpD,IAAI,cAAc,GAAG,CAAC,CAAC;gBACvB,IAAM,aAAa,GAAG,UAAC,GAAe,IAClC,OAAA,GAAG,CAAC,OAAO,CAAC,UAAC,KAAK,IAAK,OAAA,KAAK,CAAC,cAAc,EAAE,CAAC,GAAG,KAAK,GAAA,CAAC,GAAA,CAAC;gBAE5D,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,WAAW,EAAE,EAAE,CAAC,EAAE;oBACpC,aAAa,CAAC,OAAK,OAAO,CAAC,CAAC;oBAC5B,aAAa,CAAC,OAAK,SAAS,CAAC,CAAC;iBAC/B;;gBAED,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,SAAS,GAAG,CAAC,EAAE,EAAE,CAAC,EAAE;oBACtC,aAAa,CAAC,IAAI,CAAC,cAAc,GAAG,CAAC,CAAC,CAAC,CAAC;oBACxC,aAAa,CAAC,OAAK,SAAS,CAAC,CAAC;iBAC/B;;;gBAGD,IAAI,SAAS,GAAG,CAAC,EAAE;;;;oBAIjB,aAAa,CAAC,IAAI,CAAC,cAAc,GAAG,SAAS,GAAG,CAAC,CAAC,CAAC,CAAC;oBACpD,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,YAAY,EAAE,EAAE,CAAC,EAAE;wBACrC,aAAa,CAAC,OAAK,SAAS,CAAC,CAAC;wBAC9B,aAAa,CAAC,OAAK,QAAQ,CAAC,CAAC;qBAC9B;iBACF;qBAAM;;;;;oBAKL,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,YAAY,GAAG,CAAC,EAAE,EAAE,CAAC,EAAE;wBACzC,aAAa,CAAC,OAAK,QAAQ,CAAC,CAAC;wBAC7B,aAAa,CAAC,OAAK,SAAS,CAAC,CAAC;qBAC/B;oBACD,aAAa,CAAC,OAAK,QAAQ,CAAC,CAAC;iBAC9B;;;YA9DH,KAAK,IAAI,UAAU,GAAG,CAAC,EAAE,UAAU,GAAG,SAAS,EAAE,EAAE,UAAU;wBAApD,UAAU;aA+DlB;SACF;;;;QAKM,gCAAO,GAAP,UAAQ,IAAkB,EAAE,MAAkB;YAA9C,iBAoFN;;;YAhFC,IAAM,aAAa,GAAG,IAAI,CAAC,MAAM,CAAC;YAClC,IAAM,UAAU,GAAG,MAAM,CAAC,MAAM,CAAC;YACjC,IAAI,UAAU,GAAG,CAAC,EAAE;gBAClB,IAAI,SAAS,GAAG,MAAM,CAAC,CAAC,CAAC,CAAC;gBAC1B,IAAI,SAAS,KAAK,CAAC,EAAE;oBACnB,MAAM,IAAI,KAAK,CAAC,sCAAoC,SAAW,CAAC,CAAC;iBAClE;gBACD,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,UAAU,EAAE,EAAE,CAAC,EAAE;oBACnC,IAAI,WAAW,GAAG,MAAM,CAAC,CAAC,CAAC,IAAI,SAAS,CAAC;oBACzC,WAAW,GAAG,WAAW,KAAK,MAAM,CAAC,CAAC,CAAC,IAAI,aAAa,CAAC,CAAC;oBAC1D,IAAI,CAAC,WAAW,EAAE;wBAChB,MAAM,IAAI,KAAK,CAAC,yBAAuB,MAAM,CAAC,CAAC,CAAC,sBAC5C,SAAS,UAAK,aAAa,MAAG,CAAC,CAAC;qBACrC;oBACD,SAAS,GAAG,MAAM,CAAC,CAAC,CAAC,CAAC;iBACvB;gBACD,IAAI,SAAS,KAAK,aAAa,EAAE;oBAC/B,MAAM,IAAI,KAAK,CAAC,kDACZ,aAAa,cAAS,SAAW,CAAC,CAAC;iBACxC;aACF;YAED,IAAM,aAAa,GAAG,UAAU,GAAG,CAAC,CAAC;YACrC,IAAM,YAAY,GAAGA,OAAI,CAAC,iBAAiB,CAAC,OAAO,EAAE,UAAU,CAAC,CAAC;;YAEjE,IAAI,aAAa,KAAK,CAAC,IAAI,UAAU,KAAK,CAAC,EAAE;gBAC3C,IAAM,KAAK,GAAiB,IAAI,KAAK,CAAC,aAAa,CAAC,CAAC;gBACrD,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,IAAI,aAAa,EAAE,EAAE,CAAC,EAAE;oBACvC,YAAY,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC;iBACrB;gBACD,OAAO,CAAC,KAAK,EAAE,YAAY,CAAC,CAAC;aAC9B;YAED,YAAY,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC;oCACX,CAAC;gBACR,IAAM,MAAM,GAAG,MAAM,CAAC,CAAC,CAAC,GAAG,MAAM,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC;gBACzC,IAAI,SAAS,GAAG,CAAC,CAAC;gBAClB,OAAK,WAAW,CAAC,OAAO,CAAC,UAAC,UAAU;oBAClC,SAAS,IAAI,KAAI,CAAC,YAAY,CAAC,MAAM,EAAE,UAAU,CAAC,CAAC;iBACpD,CAAC,CAAC;gBACH,IAAI,OAAK,aAAa,IAAI,MAAM,GAAG,CAAC,IAAI,SAAS,KAAK,CAAC,EAAE;oBACvD,SAAS,GAAG,CAAC,CAAC;iBACf;gBACD,YAAY,CAAC,CAAC,CAAC,GAAG,YAAY,CAAC,CAAC,GAAG,CAAC,CAAC,GAAG,SAAS,CAAC;;;YATpD,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,IAAI,aAAa,EAAE,EAAE,CAAC;wBAA9B,CAAC;aAUT;YAED,IAAM,MAAM,GAAiB,IAAI,KAAK,CAAC,YAAY,CAAC,aAAa,CAAC,CAAC,CAAC;oCAE3D,CAAC;gBACR,IAAM,UAAU,GAAG,MAAM,CAAC,CAAC,CAAC,CAAC;gBAC7B,IAAI,cAAc,GAAG,YAAY,CAAC,CAAC,CAAC,CAAC;gBACrC,OAAK,WAAW,CAAC,OAAO,CAAC,UAAC,UAAU;oBAClC,IAAM,MAAM,GAAG,MAAM,CAAC,CAAC,GAAG,CAAC,CAAC,GAAG,MAAM,CAAC,CAAC,CAAC,CAAC;oBACzC,IAAM,SAAS,GAAG,KAAI,CAAC,YAAY,CAAC,MAAM,EAAE,UAAU,CAAC,CAAC;oBACxD,KAAI,CAAC,YAAY,CACb,IAAI,EAAE,UAAU,EAAE,MAAM,EAAE,cAAc,EAAE,SAAS,EAAE,UAAU,CAAC,CAAC;oBACrE,cAAc,IAAI,SAAS,CAAC;iBAC7B,CAAC,CAAC;;;;;;gBAMH,IAAI,OAAK,aAAa,IAAI,cAAc,KAAK,YAAY,CAAC,CAAC,CAAC,EAAE;oBAC5D,IAAM,UAAU,GAAG,MAAM,CAAC,CAAC,GAAG,CAAC,CAAC,GAAG,MAAM,CAAC,CAAC,CAAC,CAAC;;;oBAG7C,IAAI,UAAU,KAAK,CAAC,EAAE;;qBAErB;;;;oBAID,IAAM,UAAU,GAAG,UAAU,GAAG,CAAC,GAAG,OAAK,QAAQ,CAAC;oBAClD,IAAM,SAAS,GAAG,CAAC,CAAC;oBACpB,OAAK,YAAY,CACb,IAAI,EAAE,UAAU,EAAE,MAAM,EAAE,cAAc,EAAE,SAAS,EAAE,UAAU,CAAC,CAAC;iBACtE;;;YA7BH,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,aAAa,EAAE,EAAE,CAAC;wBAA7B,CAAC;aA8BT;YACD,OAAO,CAAC,MAAM,EAAE,YAAY,CAAC,CAAC;SAC/B;6BACF;KAAA,IAAA;aAEe,gBAAgB,CAC5B,IAAkB,EAAE,UAAsB,EAAE,SAAiB,EAC7D,WAAqB,EAAE,OAAe,EAAE,QAAgB,EAAE,QAAgB,EAC1E,sBAA+B;QACjC,OAAO,IAAI,cAAc,CACd,SAAS,EAAE,WAAW,EAAE,OAAO,EAAE,QAAQ,EAAE,QAAQ,EACnD,sBAAsB,CAAC;aAC7B,OAAO,CAAC,IAAI,EAAE,UAAU,CAAC,CAAC;IACjC;;IChOA;;;;;;;;;;;;;;;;IAsBO,IAAM,OAAO,GAAG,4BAA4B,EAC9C,UAAC,MAAc,EAAE,MAAc,IAAK,OAAA,MAAM,GAAG,MAAM,GAAA,EAAE;;ICvB1D;;;;;;;;;;;;;;;;IAmBA;;;;aAKgB,QAAQ,CACpB,IAA+B,EAC/B,IAAc;QAChB,IAAM,QAAQ,GAAa,IAAI,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;QAChD,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,QAAQ,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE;YACxC,QAAQ,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,CAAC;SACvC;QACD,IAAM,MAAM,GAAGU,SAAM,CAAC,QAAQ,EAAE,IAAI,CAAC,KAAK,CAAC,CAAC;QAC5C,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,MAAM,CAAC,MAAM,CAAC,MAAM,EAAE,EAAE,CAAC,EAAE;YAC7C,IAAM,MAAM,GAAG,MAAM,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC;YAEpC,IAAM,WAAW,GAAa,IAAI,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;YACnD,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,WAAW,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE;gBAC3C,WAAW,CAAC,CAAC,CAAC,GAAG,MAAM,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC;aAC5C;YAED,IAAM,aAAa,GAAG,IAAI,CAAC,UAAU,CAAC,WAAW,CAAC,CAAC;YAEnD,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,MAAM,CAAC,aAAa,CAAC,CAAC;SAC/C;QACD,OAAO,MAAmC,CAAC;IAC7C;;ICnBA,IAAM,WAAW,GAAG,UAAC,CAAO,EAAE,CAAO;QACnC,IAAM,SAAS,GAAG,CAAC,CAAC,KAAK,GAAG,CAAC,CAAC,KAAK,CAAC;QACpC,OAAO,SAAS,KAAK,CAAC,GAAG,CAAC,CAAC,KAAK,GAAG,CAAC,CAAC,KAAK,GAAG,SAAS,CAAC;IACzD,CAAC,CAAC;IAEF;;;;;;;;;;;IAWA,SAASE,QAAM,CAAC,KAAa,EAAE,CAAS,EAAE,IAAQ,EAAE,KAAwB;QAAlC,qBAAA,EAAA,QAAQ;QAAE,sBAAA,EAAA,QAAQ,KAAK,CAAC,MAAM,GAAG,CAAC;QAC1E,OAAO,KAAK,GAAG,IAAI,EAAE;;;;YAInB,IAAI,KAAK,GAAG,IAAI,GAAG,GAAG,EAAE;gBACtB,IAAM,CAAC,GAAG,KAAK,GAAG,IAAI,GAAG,CAAC,CAAC;gBAC3B,IAAM,GAAC,GAAG,CAAC,GAAG,IAAI,GAAG,CAAC,CAAC;gBACvB,IAAM,CAAC,GAAG,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC;gBACtB,IAAM,CAAC,GAAG,GAAG,GAAG,IAAI,CAAC,GAAG,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC;gBACpC,IAAM,EAAE,GAAG,GAAG,GAAG,IAAI,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC,GAAG,IAAI,CAAC,IAAI,CAAC,GAAC,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC;gBACvE,IAAM,OAAO,GAAG,IAAI,CAAC,GAAG,CAAC,IAAI,EAAE,IAAI,CAAC,KAAK,CAAC,CAAC,GAAG,GAAC,GAAG,CAAC,GAAG,CAAC,GAAG,EAAE,CAAC,CAAC,CAAC;gBAC/D,IAAM,QAAQ,GAAG,IAAI,CAAC,GAAG,CAAC,KAAK,EAAE,IAAI,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,CAAC,GAAG,GAAC,IAAI,CAAC,GAAG,CAAC,GAAG,EAAE,CAAC,CAAC,CAAC;gBACvEA,QAAM,CAAC,KAAK,EAAE,CAAC,EAAE,OAAO,EAAE,QAAQ,CAAC,CAAC;aACrC;;YAED,IAAM,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,CAAC;YACnB,IAAI,CAAC,GAAG,IAAI,CAAC;YACb,IAAI,CAAC,GAAG,KAAK,CAAC;YAEdZ,OAAI,CAAC,IAAI,CAAC,KAAK,EAAE,IAAI,EAAE,CAAC,CAAC,CAAC;YAE1B,IAAI,WAAW,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,GAAG,CAAC,EAAE;gBACpCA,OAAI,CAAC,IAAI,CAAC,KAAK,EAAE,IAAI,EAAE,KAAK,CAAC,CAAC;aAC/B;YACD,OAAO,CAAC,GAAG,CAAC,EAAE;gBACZA,OAAI,CAAC,IAAI,CAAC,KAAK,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;gBACvB,CAAC,EAAE,CAAC;gBACJ,CAAC,EAAE,CAAC;gBACJ,OAAO,WAAW,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,GAAG,CAAC,EAAE;oBACnC,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC;iBACX;gBACD,OAAO,WAAW,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,GAAG,CAAC,EAAE;oBACnC,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC;iBACX;aACF;YACD,IAAI,WAAW,CAAC,KAAK,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,KAAK,CAAC,EAAE;gBACrCA,OAAI,CAAC,IAAI,CAAC,KAAK,EAAE,IAAI,EAAE,CAAC,CAAC,CAAC;aAC3B;iBAAM;gBACL,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC;gBACVA,OAAI,CAAC,IAAI,CAAC,KAAK,EAAE,CAAC,EAAE,KAAK,CAAC,CAAC;aAC5B;;;YAGD,IAAI,CAAC,IAAI,CAAC,EAAE;gBACV,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC;aACd;YACD,IAAI,CAAC,IAAI,CAAC,EAAE;gBACV,KAAK,GAAG,CAAC,GAAG,CAAC,CAAC;aACf;SACF;IACH,CAAC;aAEe,QAAQ,CACpB,CAAa,EAAE,MAAgB,EAAE,MAAuB,EAAE,CAAS,EACnE,MAAe;;QAGjB,IAAM,OAAO,GAAG,MAAM,CAAC,MAAM,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC;QACpC,IAAA,6CAA6C,EAA5C,aAAK,EAAE,YAAqC,CAAC;QACpD,IAAM,WAAW,GAAGA,OAAI,CAAC,sBAAsB,CAAC,MAAM,EAAE,KAAK,GAAG,CAAC,CAAC,CAAC;QACnE,IAAM,cAAc,GAAGA,OAAI,CAAC,sBAAsB,CAAC,OAAO,EAAE,KAAK,GAAG,CAAC,CAAC,CAAC;gCAE9D,CAAC;YACR,IAAM,MAAM,GAAG,CAAC,GAAG,IAAI,CAAC;YACxB,IAAM,IAAI,GAAG,CAAC,CAAC,QAAQ,CAAC,MAAM,EAAE,MAAM,GAAG,IAAI,CAAC,CAAC;YAE/C,IAAI,SAAS,GAAW,IAAI,KAAK,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;YAC/C,IAAI,CAAC,OAAO,CACR,UAAC,KAAa,EAAE,KAAa,IAAK,OAAA,SAAS,CAAC,KAAK,CAAC,GAAG,EAAC,KAAK,OAAA,EAAE,KAAK,OAAA,EAAC,GAAA,CAAC,CAAC;YAEzE,IAAI,CAAC,GAAG,SAAS,CAAC,MAAM,EAAE;gBACxBY,QAAM,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC;gBACrB,SAAS,GAAG,SAAS,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC;aACnC;YAED,IAAI,MAAM,EAAE;gBACV,SAAS,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;aAC7B;YAED,IAAM,SAAS,GAAG,CAAC,GAAG,CAAC,CAAC;YACxB,IAAM,QAAQ,GAAG,WAAW,CAAC,QAAQ,CAAC,SAAS,EAAE,SAAS,GAAG,CAAC,CAAC,CAAC;YAChE,IAAM,WAAW,GAAG,cAAc,CAAC,QAAQ,CAAC,SAAS,EAAE,SAAS,GAAG,CAAC,CAAC,CAAC;YACtE,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,CAAC,EAAE,CAAC,EAAE,EAAE;gBAC1B,QAAQ,CAAC,CAAC,CAAC,GAAG,SAAS,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC;gBACjC,WAAW,CAAC,CAAC,CAAC,GAAG,SAAS,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC;aACrC;;QAvBH,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,KAAK,EAAE,CAAC,EAAE;oBAArB,CAAC;SAwBT;;;QAGD,IAAM,WAAW,GAAG,MAAM,CAAC,KAAK,EAAE,CAAC;QACnC,WAAW,CAAC,WAAW,CAAC,MAAM,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC;QAExC,OAAO;YACLF,SAAM,CAAC,WAA0B,EAAE,MAAM,EAAE,WAAW,CAAC;YACvDA,SAAM,CAAC,WAA0B,EAAE,OAAO,EAAE,cAAc,CAAC;SAC5D,CAAC;IACJ;;IC3IA;;;;;;;;;;;;;;;;QA6BE,oBAAmB,EACnB,sBAAqB,EACrB,4BAAyB,EACzB,wBAAuB,EACvB,oBAAmB,EACnB,wBAAuB,EACvB,wBAAuB,EACvB,8BAA6B,EAC7B,8BAA6B,EAC7B,sCAAqC,EACrC,4BAA2B,EAC3B,gCAA+B,EAC/B,sBAAqB,EACrB,oBAAmB,EACnB,oBAAmB,EACnB,4BAA2B,EAC3B,4BAA2B,EAC3B,8BAA6B,EAC7B,oBAAmB,EACnB,8BAA6B,EAC7B,sBAAqB,EACrB,wBAAuB,EACvB,wBAAuB,EACvB,gCAA+B,EAC/B,wBAAuB,EACvB,sCAAqC,EACrC,sCAAqC,EACrC,oBAAmB,EACnB,sBAAqB,EACrB,sBAAqB,EACrB,gCAA+B;;IC3DjC;;;;;;;;;;;;;;;;IAsBO,IAAM,GAAG,GACZ,eAAe,CAAC,EAAC,MAAM,EAAE,WAAW,CAAC,GAAG,EAAE,aAAa,EAAE,gBAAgB,EAAC,CAAC,CAAC;IAEzE,IAAM,SAAS,GAAiB;QACrC,UAAU,EAAEG,MAAG;QACf,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,GAAG;KAChB;;IC7BD;;;;;;;;;;;;;;;;IAsBO,IAAM,aAAa,GAAG,gBAAgB,CAAC;QAC5C,SAAS,EAAE,YAAY,CAAC,GAAG;QAC3B,aAAa,EAAEC,UAAM;QACrB,eAAe,EAAE,IAAI;KACtB,CAAC,CAAC;IAEI,IAAM,SAAS,GAAiB;QACrC,UAAU,EAAEC,MAAG;QACf,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,aAAa;KAC1B;;IChCD;;;;;;;;;;;;;;;;IAsBA;QAUE,2BAAY,MAAkB;YAJ9B,kBAAa,GAAG,CAAC,CAAC;YAClB,kBAAa,GAA6B,CAAC,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YACrD,SAAI,GAAG,IAAI,CAAC;YAGV,IAAI,CAAC,WAAW,GAAG,MAAM,CAAC,CAAC,CAAC,CAAC;YAC7B,IAAI,CAAC,aAAa,GAAG,MAAM,CAAC,GAAG,CAAC,UAAC,CAAC,EAAE,CAAC,IAAK,OAAA,MAAI,CAAG,GAAA,CAAC,CAAC;YACnD,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;YAC3D,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,EACzD,CAAC,IAAI,CAAC,aAAa,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;YAChC,IAAI,CAAC,SAAS,GAAG,MAAM,CAAC;SACzB;QAED,uCAAW,GAAX;YACE,IAAM,QAAQ,GAAa,EAAE,CAAC;;YAE9B,IAAI,CAAC,aAAa,CAAC,OAAO,CAAC,UAAA,QAAQ;gBACjC,QAAQ,CAAC,IAAI,CACT,UAAQ,QAAQ,cAAS,QAAQ,4BAAyB,CAAC,CAAC;aACjE,CAAC,CAAC;;YAEH,IAAM,SAAS,GAAG,IAAI,CAAC,aAAa;iBACb,GAAG,CAAC,UAAA,QAAQ;gBACX,OAAO,MAAI,QAAU,CAAC;aACvB,CAAC;iBACD,IAAI,CAAC,KAAK,CAAC,CAAC;YAEnC,IAAM,QAAQ,GAAG,aACb,iCAAiC,EAAE,sCACb,IAAI,CAAC,aAAa,0DACZ,IAAI,CAAC,aAAa,gIAGxC,QAAQ,CAAC,IAAI,CAAC,YAAY,CAAC,kDACC,SAAS,8CAI9C,CAAC;YACF,OAAO,QAAQ,CAAC;SACjB;gCACF;KAAA;;ICtED;;;;;;;;;;;;;;;;aAwBgB,IAAI,CAAC,IAAkD;QAE9D,IAAA,oBAAM,EAAE,sBAAO,CAAS;QAE/B,IAAM,OAAO,GAAG,MAAM,CAAC;QACvB,IAAI,OAAO,CAAC,MAAM,KAAK,CAAC,EAAE;YACxB,OAAO,QAAQ,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,OAAO,CAAC,CAAC,CAAC,EAAC,EAAE,OAAO,SAAA,EAAC,CAAC,CAAC;SACrD;QAED,IAAM,KAAK,GACP,OAAO,CAAC,GAAG,CAAC,UAAA,CAAC,IAAI,OAAA,CAAC,CAAC,KAAK,GAAA,CAAC,CAAC,MAAM,CAAC,UAAC,EAAE,EAAE,EAAE,IAAK,OAAAP,aAAU,CAAC,EAAE,EAAE,EAAE,CAAC,GAAA,CAAC,CAAC;QACrE,IAAM,MAAM,GAAG,OAAO,CAAC,GAAG,CAAC,UAAA,CAAC,IAAI,OAAA,CAAC,CAAC,KAAK,GAAA,CAAC,CAAC;QACzC,IAAM,OAAO,GAAG,IAAI,iBAAiB,CAAC,MAAM,CAAC,CAAC;QAC9C,OAAO,OAAO,CAAC,gBAAgB,CAAC,OAAO,EAAE,OAAO,EAAE,KAAK,CAAC,CAAC;IAC3D,CAAC;IAEM,IAAM,UAAU,GAAiB;QACtC,UAAU,EAAEQ,OAAI;QAChB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,IAAwB;KACrC;;ICrBD;QAaE,0BAAY,UAAoB,EAAE,IAAY,EAAE,UAAuB;YARvE,kBAAa,GAA6B,CAAC,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YACrD,kBAAa,GAAG,CAAC,GAAG,CAAC,CAAC;YACtB,aAAQ,GAAG,sBAAsB,CAAC;YAIlC,SAAI,GAAG,IAAI,CAAC;YAGV,IAAM,IAAI,GAAG,CAAC,IAAI,CAAC,CAAC;YACpBf,eAAY,CAAC,0BAA0B,CACnC,KAAK,GAAG,UAAU,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,WAAW,EAAE,GAAG,UAAU,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,IAAI,EACtE,UAAU,CAAC,MAAM,CAAC,CAAC;YAEvB,IAAI,CAAC,EAAE,GAAG,UAAU,KAAK,KAAK,GAAG,GAAG,GAAG,GAAG,CAAC;;YAGrC,IAAA,2EACsD,EADrD,mBACqD,CAAC;YAE7D,IAAI,CAAC,WAAW,GAAG,WAAW,CAAC,MAAM,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG,WAAW,CAAC;YAEhE,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;;;YAG3D,IAAI,CAAC,QAAQ;gBACT,eAAe,CAAC,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;YAEtE,IAAI,CAAC,UAAU,GAAG,UAAU,CAAC;YAC7B,IAAI,CAAC,SAAS,GAAG,cAAY,IAAI,CAAC,EAAI,CAAC;SACxC;QAED,sCAAW,GAAX;YAAA,iBA4EC;YA3EC,IAAM,mBAAmB,GAAG,sDACiB,IAAI,CAAC,aAAa,CAAC,CAAC,CAAC,0DACtB,IAAI,CAAC,aAAa,CAAC,CAAC,CAAC,aAChE,CAAC;YAEF,IAAM,oBAAoB,GAAG;gBAC3B,IAAI,KAAI,CAAC,UAAU,CAAC,MAAM,KAAK,CAAC,EAAE;oBAChC,OAAO,iBAAiB,CAAC;iBAC1B;qBAAM;oBACL,OAAO,qBAAmB,YAAY,CAAC,KAAI,CAAC,UAAU,CAAC,MAAM,GAAG,CAAC,CAAG,CAAC;iBACtE;aACF,CAAC;YAEF,IAAM,iBAAiB,GAAG;gBACxB,IAAI,OAAO,GAAG,EAAE,CAAC;gBACjB,IAAI,KAAI,CAAC,WAAW,CAAC,MAAM,KAAK,CAAC,EAAE;oBACjC,IAAI,KAAI,CAAC,UAAU,CAAC,MAAM,KAAK,CAAC,EAAE;wBAChC,OAAO,IAAI,eAAe,CAAC;qBAC5B;iBACF;qBAAM;oBACL,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,KAAI,CAAC,WAAW,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE;wBAChD,OAAO,IAAI,kBAAgB,YAAY,CAAC,CAAC,CAAC,MAAG,CAAC;qBAC/C;iBACF;gBACD,OAAO,OAAO,CAAC;aAChB,CAAC;YAEF,IAAM,QAAQ,GAAG,2GAKb,mBAAmB,kBAEnB,iCAAiC,EAAE,6FAEd,oBAAoB,EAAE,sUAOlB,iBAAiB,EAAE,2DACL,IAAI,CAAC,EAAE,mlBAe1B,IAAI,CAAC,EAAE,icAc9B,CAAC;YACF,OAAO,QAAQ,CAAC;SACjB;+BACF;KAAA;;ICzID;;;;;;;;;;;;;;;;IAqBA;QASE,gCAAY,MAAgB,EAAE,MAAgB;YAR9C,kBAAa,GAAG,CAAC,GAAG,CAAC,CAAC;;YAMtB,kBAAa,GAA6B,CAAC,EAAE,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC;YAGpD,IAAM,WAAW,GAAa,IAAI,KAAK,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC;YACvD,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,WAAW,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE;gBAC3C,WAAW,CAAC,CAAC,CAAC,GAAG,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;aACpC;YACD,IAAI,CAAC,WAAW,GAAG,WAAW,CAAC;YAC/B,IAAI,CAAC,cAAc,GAAG,EAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,EAAC,CAAC;YACvC,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,EAAE,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;YAE1E,IAAI,CAAC,SAAS,GAAG,iBAAiB,CAAC;SACpC;QAED,4CAAW,GAAX;YACE,IAAM,QAAQ,GAAG,4BACE,IAAI,CAAC,aAAa,CAAC,CAAC,CAAC,yDACG,IAAI,CAAC,aAAa,CAAC,CAAC,CAAC,GAAG,CAAC,YAChE,IAAI,CAAC,aAAa,CAAC,CAAC,CAAC,kBACrB,sBAAsB,EAAE,kwBAmB3B,CAAC;YACF,OAAO,QAAQ,CAAC;SACjB;qCACF;KAAA;;ICtED;;;;;;;;;;;;;;;;IAqBA;QAWE,0BAAY,MAAgB,EAAE,MAAgB;YAV9C,kBAAa,GAAG,CAAC,GAAG,CAAC,CAAC;YAKtB,kBAAa,GAAG,CAAC,CAAC;YAClB,kBAAa,GAA6B,CAAC,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YAErD,SAAI,GAAG,IAAI,CAAC;YAGV,IAAM,WAAW,GAAa,IAAI,KAAK,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC;YACvD,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,WAAW,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE;gBAC3C,WAAW,CAAC,CAAC,CAAC,GAAG,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;aACpC;YACD,IAAI,CAAC,WAAW,GAAG,WAAW,CAAC;YAC/B,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;YAC3D,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,EACzD,CAAC,IAAI,CAAC,aAAa,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;YAEhC,IAAI,CAAC,MAAM,GAAG,MAAM,CAAC;YACrB,IAAI,CAAC,SAAS,GAAG,eAAa,MAAQ,CAAC;SACxC;QAED,sCAAW,GAAX;YACE,IAAM,KAAK,GAAG,iBAAiB,CAAC,IAAI,CAAC,WAAW,CAAC,MAAM,CAAC,CAAC;YACzD,IAAM,QAAQ,GAAG,iBAAiB,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;YAEhD,IAAM,QAAQ,GAAG,aACb,iCAAiC,EAAE,uCAEd,IAAI,CAAC,aAAa,0DACX,IAAI,CAAC,aAAa,8KAI9C,IAAI,CAAC,WAAW,CAAC,MAAM,0BACf,KAAK,SAAI,QAAQ,kEAI5B,CAAC;YACF,OAAO,QAAQ,CAAC;SACjB;+BACF;KAAA,IAAA;IAED,SAAS,iBAAiB,CAAC,MAAgB;QACzC,IAAM,IAAI,GAAG,MAAM,CAAC,MAAM,CAAC;QAC3B,IAAI,IAAI,GAAG,CAAC,EAAE;YACZ,MAAM,KAAK,CAAC,wBAAsB,IAAI,0BAAuB,CAAC,CAAC;SAChE;QACD,IAAM,cAAc,GAAG,IAAI,KAAK,CAAC,IAAI,CAAC,CAAC;QACvC,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE;YACtC,cAAc,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,GAAG,WAAS,YAAY,CAAC,CAAC,CAAG,CAAC;SACxD;QAED,OAAO,cAAc,CAAC,IAAI,EAAE,CAAC;IAC/B;;IChFA;;;;;;;;;;;;;;;;aAyBgB,SAAS,CAAC,IAIzB;QACQ,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,YAAC,CAAW;QACZ,IAAA,iBAAI,CAAU;QACrB,IAAM,aAAa,GAAG,OAAO,CAAC;QAE9B,IAAM,KAAK,GAAG,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC;QAC7B,IAAM,QAAQ,GAAa,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC;QAC5C,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,QAAQ,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE;YACxC,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC;SAChC;QACD,IAAI,OAAO,CAAC,kBAAkB,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;YACnC,IAAM,KAAK,GAAG,aAAa,CAAC,SAAS,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC;YACpD,IAAM,MAAM,GAAG,KAAK,CAAC,MAAoB,CAAC;YAC1C,IAAM,SAAS,GAAGgB,gBAAY,CAAC,MAAM,EAAE,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,KAAK,EAAE,IAAI,EAAE,QAAQ,CAAC,CAAC;YACzE,OAAO,OAAO,CAAC,cAAc,CAAC,QAAQ,EAAE,CAAC,CAAC,KAAK,EAAE,SAAS,CAAC,CAAC;SAC7D;QACD,IAAI,CAAC,CAAC,KAAK,CAAC,MAAM,KAAK,CAAC,IAAIjB,OAAI,CAAC,WAAW,CAAC,IAAI,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE;YAC1D,IAAM,SAAO,GAAG,IAAI,sBAAsB,CAAC,CAAC,CAAC,KAAK,EAAE,IAAI,CAAC,CAAC;YAC1D,OAAO,aAAa,CAAC,gBAAgB,CAAC,SAAO,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC;SAC9D;QACD,IAAM,OAAO,GAAG,IAAI,gBAAgB,CAAC,CAAC,CAAC,KAAK,EAAE,IAAI,CAAC,CAAC;QACpD,OAAO,aAAa,CAAC,gBAAgB,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC;IAC/D,CAAC;IAEM,IAAM,eAAe,GAAiB;QAC3C,UAAU,EAAEkB,YAAS;QACrB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,SAA6B;KAC1C;;IC1DD;;;;;;;;;;;;;;;;aAwBgB,MAAM,CAClB,IAAwE;QAEnE,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,YAAC,CAAW;QACZ,IAAA,iBAAI,CAAU;QAErB,IAAI,IAAI,GAAGlB,OAAI,CAAC,cAAc,CAAC,IAAI,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC;QAC9C,IAAM,YAAY,GAAGC,eAAY,CAAC,kBAAkB,CAAC,IAAI,EAAE,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC;QAC3E,IAAI,EAAE,GAAG,CAAC,CAAC;QACX,IAAM,uBAAuB,GAAG,EAAE,CAAC;QACnC,IAAI,YAAY,IAAI,IAAI,EAAE;YACxB,EAAE,GAAG,SAAS,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,GAAA,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,IAAI,EAAE,YAAY,EAAC,EAAC,CAAC,CAAC;YACpE,uBAAuB,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC;YACjC,IAAI,GAAGA,eAAY,CAAC,gBAAgB,CAAC,IAAI,CAAC,MAAM,EAAE,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC;SACpE;QAEDA,eAAY,CAAC,0BAA0B,CAAC,QAAQ,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC;QAC9E,IAAM,OAAO,GAAG,IAAI,gBAAgB,CAAC,EAAE,CAAC,KAAK,EAAE,IAAI,CAAC,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC;QAC/D,IAAM,WAAW,GAAG,CAAC,EAAC,IAAI,EAAE,SAAS,EAAE,IAAI,EAAE,CAAC,MAAM,CAAC,iBAAiB,CAAC,EAAC,CAAC,CAAC;QAC1E,IAAM,GAAG,GAAG,OAAO,CAAC,gBAAgB,CAAC,OAAO,EAAE,CAAC,EAAE,CAAC,EAAE,OAAO,EAAE,WAAW,CAAC,CAAC;QAC1E,uBAAuB,CAAC,OAAO,CAAC,UAAA,CAAC,IAAI,OAAA,OAAO,CAAC,WAAW,CAAC,CAAC,CAAC,MAAM,CAAC,GAAA,CAAC,CAAC;QACpE,OAAO,GAAG,CAAC;IACb,CAAC;IAEM,IAAM,YAAY,GAAiB;QACxC,UAAU,EAAEkB,SAAM;QAClB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,MAA0B;KACvC;;ICrDD;;;;;;;;;;;;;;;;aAwBgB,MAAM,CAClB,IAAwE;QAEnE,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,YAAC,CAAW;QACZ,IAAA,iBAAI,CAAU;QAErB,IAAI,IAAI,GAAGnB,OAAI,CAAC,cAAc,CAAC,IAAI,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC;QAC9C,IAAM,YAAY,GAAGC,eAAY,CAAC,kBAAkB,CAAC,IAAI,EAAE,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC;QAC3E,IAAI,EAAE,GAAG,CAAC,CAAC;QACX,IAAM,uBAAuB,GAAG,EAAE,CAAC;QACnC,IAAI,YAAY,IAAI,IAAI,EAAE;YACxB,EAAE,GAAG,SAAS,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,GAAA,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,IAAI,EAAE,YAAY,EAAC,EAAC,CAAC,CAAC;YACpE,uBAAuB,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC;YACjC,IAAI,GAAGA,eAAY,CAAC,gBAAgB,CAAC,IAAI,CAAC,MAAM,EAAE,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC;SACpE;QAEDA,eAAY,CAAC,0BAA0B,CAAC,QAAQ,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC;QAC9E,IAAM,OAAO,GAAG,IAAI,gBAAgB,CAAC,EAAE,CAAC,KAAK,EAAE,IAAI,CAAC,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC;QAC/D,IAAM,WAAW,GAAG,CAAC,EAAC,IAAI,EAAE,SAAS,EAAE,IAAI,EAAE,CAAC,MAAM,CAAC,iBAAiB,CAAC,EAAC,CAAC,CAAC;QAC1E,IAAM,GAAG,GAAG,OAAO,CAAC,gBAAgB,CAAC,OAAO,EAAE,CAAC,EAAE,CAAC,EAAE,OAAO,EAAE,WAAW,CAAC,CAAC;QAC1E,uBAAuB,CAAC,OAAO,CAAC,UAAA,CAAC,IAAI,OAAA,OAAO,CAAC,WAAW,CAAC,CAAC,CAAC,MAAM,CAAC,GAAA,CAAC,CAAC;QACpE,OAAO,GAAG,CAAC;IACb,CAAC;IAEM,IAAM,YAAY,GAAiB;QACxC,UAAU,EAAEmB,SAAM;QAClB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,MAA0B;KACvC;;ICrDD;;;;;;;;;;;;;;;;IAuBA;QAcE,uBAAY,QAAiC,EAAE,QAAqB;YATpE,kBAAa,GAAG,CAAC,GAAG,CAAC,CAAC;YACtB,aAAQ,GACJ,0GAA0G,CAAC;;;YAG/G,kBAAa,GAA6B,CAAC,GAAG,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YAEtD,SAAI,GAAG,IAAI,CAAC;YAGV,IAAI,CAAC,WAAW,GAAG,QAAQ,CAAC,QAAQ,CAAC;YAErC,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;YAE3D,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,CAAC,CAAC;YAE/D,IAAI,CAAC,SAAS,GAAG,YAAU,QAAU,CAAC;YACtC,IAAI,CAAC,QAAQ,GAAG,QAAQ,CAAC;SAC1B;QAED,mCAAW,GAAX;YACE,IAAI,aAAa,GAAG,wCAAwC,CAAC;YAC7D,IAAI,IAAI,CAAC,QAAQ,KAAK,KAAK,EAAE;gBAC3B,aAAa,GAAG,yDAAyD,CAAC;aAC3E;YAED,IAAI,WAAW,GAAG,aAAa,CAAC;YAChC,IAAI,IAAI,CAAC,QAAQ,KAAK,KAAK,EAAE;gBAC3B,WAAW,GAAG,qBAAqB,CAAC;aACrC;YAED,IAAM,QAAQ,GAAG,aACb,iCAAiC,EAAE,gUASnC,IAAI,CAAC,QAAQ,KAAK,KAAK,GAAG,KAAK,GAAG,yBAAyB,+jBAiBnD,aAAa,0EAIO,WAAW,iCAG1C,CAAC;YACF,OAAO,QAAQ,CAAC;SACjB;4BACF;KAAA;;ICjGD;;;;;;;;;;;;;;;;IAuBA;QAUE,4CAAY,QAAiC;YAL7C,kBAAa,GAAG,CAAC,GAAG,CAAC,CAAC;YACtB,aAAQ,GAAG,qBAAqB,CAAC;YACjC,kBAAa,GAA6B,CAAC,GAAG,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YACtD,SAAI,GAAG,IAAI,CAAC;YAGV,IAAI,CAAC,WAAW,GAAG,QAAQ,CAAC,QAAQ,CAAC;YACrC,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;YAE3D,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,CAAC,CAAC;YAE/D,IAAI,CAAC,SAAS,GAAG,6BAA6B,CAAC;SAChD;QAED,wDAAW,GAAX;YACE,IAAM,QAAQ,GAAG,aACb,iCAAiC,EAAE,waActC,CAAC;YACF,OAAO,QAAQ,CAAC;SACjB;iDACF;KAAA;;IC9DD;;;;;;;;;;;;;;;;aAwBgB,OAAO,CACnB,IAA0E;QAErE,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,YAAC,CAAW;QACZ,IAAA,6BAAU,EAAE,uBAAO,EAAE,eAAG,EAAE,uCAAe,CAAU;QAC1D,IAAM,SAAS,GAAG,CAAC,CAAC;QACpB,IAAM,QAAQ,GAAGnB,eAAY,CAAC,iBAAiB,CAC3C,CAAC,CAAC,KAAyC,EAAE,UAAU,EAAE,OAAO,EAChE,SAAS,EAAE,GAAG,EAAE,eAAe,CAAC,CAAC;QACrC,IAAI,QAAQ,CAAC,WAAW,KAAK,CAAC,IAAI,QAAQ,CAAC,YAAY,KAAK,CAAC;YACzDD,OAAI,CAAC,WAAW,CAAC,QAAQ,CAAC,OAAO,EAAE,QAAQ,CAAC,QAAQ,CAAC,EAAE;YACzD,OAAO,QAAQ,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,GAAA,EAAC,EAAE,OAAO,SAAA,EAAC,CAAC,CAAC;SACzC;QAED,IAAI,OAAyD,CAAC;QAC9D,IAAM,UAAU,GACZ,CAAC,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,YAAY,EAAE,QAAQ,CAAC,WAAW,CAAC,EAAC,CAAC,CAAC;QAC3E,IAAI,QAAQ,CAAC,YAAY,KAAK,CAAC,IAAI,QAAQ,CAAC,WAAW,KAAK,CAAC,EAAE;YAC7D,OAAO,GAAG,IAAI,kCAAkC,CAAC,QAAQ,CAAC,CAAC;SAC5D;aAAM;YACL,OAAO,GAAG,IAAI,aAAa,CAAC,QAAQ,EAAE,KAAK,CAAC,CAAC;YAC7C,UAAU,CAAC,IAAI,CACX,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,OAAO,CAAC,GAAG,EAAE,QAAQ,CAAC,OAAO,CAAC,IAAI,CAAC,EAAC,EAAE;gBACpE,IAAI,EAAE,OAAO;gBACb,IAAI,EAAE,CAAC,QAAQ,CAAC,cAAc,EAAE,QAAQ,CAAC,aAAa,CAAC;aACxD,EACD,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,QAAQ,EAAE,QAAQ,CAAC,OAAO,CAAC,EAAC,EAAE;gBAC5D,IAAI,EAAE,OAAO;gBACb,IAAI,EAAE,CAAC,QAAQ,CAAC,qBAAqB,EAAE,QAAQ,CAAC,oBAAoB,CAAC;aACtE,CAAC,CAAC;SACR;QAED,OAAO,OAAO,CAAC,gBAAgB,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,KAAK,EAAE,UAAU,CAAC,CAAC;IACrE,CAAC;IAEM,IAAM,aAAa,GAAiB;QACzC,UAAU,EAAEqB,UAAO;QACnB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,OAA2B;KACxC;;IChED;;;;;;;;;;;;;;;;aAsBgB,WAAW,CAAC,IAI3B;QACQ,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,YAAC,EAAE,YAAC,CAAW;QACf,IAAA,6BAAU,EAAE,6BAAU,CAAU;QAEvC,OAAO,eAAe,CAAC,EAAC,CAAC,GAAA,EAAE,CAAC,GAAA,EAAE,UAAU,YAAA,EAAE,UAAU,YAAA,EAAE,OAAO,SAAA,EAAC,CAAC,CAAC;IAClE,CAAC;IAEM,IAAM,iBAAiB,GAAiB;QAC7C,UAAU,EAAEC,cAAW;QACvB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,WAA+B;KAC5C;;ICtCD;;;;;;;;;;;;;;;;IAqBA;QAaE,sBAAY,KAAe,EAAE,QAAkB;YAZ/C,kBAAa,GAAG,CAAC,QAAQ,CAAC,CAAC;YAO3B,kBAAa,GAAG,CAAC,CAAC;YAClB,kBAAa,GAA6B,CAAC,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YAErD,SAAI,GAAG,IAAI,CAAC;YAGV,IAAI,CAAC,WAAW,GAAG,QAAQ,CAAC;YAC5B,IAAI,CAAC,IAAI,GAAG,QAAQ,CAAC,MAAM,CAAC;YAC5B,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;YAC3D,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,EACzD,CAAC,IAAI,CAAC,aAAa,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;YAEhC,IAAI,CAAC,KAAK,GAAG,KAAK,CAAC;YACnB,IAAI,CAAC,QAAQ,GAAG,aAAW,iBAAiB,CAAC,KAAK,CAAC,MAAM,CAAC,OAAI,CAAC;YAC/D,IAAI,CAAC,SAAS,GAAG,OAAO,CAAC;SAC1B;QAED,kCAAW,GAAX;YACE,IAAM,KAAK,GAAG,iBAAiB,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;YAC3C,IAAM,YAAY,GAAGC,WAAS,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;YAC1C,IAAI,QAAQ,CAAC;YACb,IAAI,IAAI,CAAC,KAAK,CAAC,MAAM,KAAK,CAAC,EAAE;gBAC3B,QAAQ,GAAG,IAAI,CAAC,WAAW,CAAC,GAAG,CAAC,UAAC,CAAC,EAAE,CAAC;oBACnC,OAAO,sCAAsC,CAAC;iBAC/C,CAAC,CAAC;aACJ;iBAAM;gBACL,QAAQ,GAAG,IAAI,CAAC,WAAW,CAAC,GAAG,CAAC,UAAC,CAAC,EAAE,CAAC;oBACnC,OAAO,eAAa,MAAM,CAAC,CAAC,CAAC,0BAAqB,CAAC,mBAC/C,MAAM,CAAC,CAAC,CAAC,MAAG,CAAC;iBAClB,CAAC,CAAC;aACJ;YAED,IAAM,QAAQ,GAAG,aACb,iCAAiC,EAAE,0EAEf,KAAK,wEAErB,QAAQ,CAAC,IAAI,CAAC,IAAI,CAAC,sDACe,YAAY,kCAGrD,CAAC;YACF,OAAO,QAAQ,CAAC;SACjB;2BACF;KAAA,IAAA;IAED,IAAM,MAAM,GAAG,CAAC,GAAG,EAAE,GAAG,EAAE,GAAG,EAAE,GAAG,EAAE,GAAG,EAAE,GAAG,CAAC,CAAC;IAE9C,SAASA,WAAS,CAAC,IAAY;QAC7B,IAAI,IAAI,KAAK,CAAC,EAAE;YACd,OAAO,WAAW,CAAC;SACpB;aAAM,IAAI,IAAI,IAAI,CAAC,EAAE;YACpB,OAAO,MAAM,CAAC,KAAK,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,GAAG,CAAC,UAAA,KAAK,IAAI,OAAA,eAAa,KAAO,GAAA,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC;SAC3E;aAAM;YACL,MAAM,KAAK,CAAC,sBAAoB,IAAI,0BAAuB,CAAC,CAAC;SAC9D;IACH;;aC/DgB,KAAK,CACjB,IAAsE;QAEjE,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,YAAC,CAAW;QACZ,IAAA,mBAAK,EAAE,iBAAI,CAAU;QAEtB,IAAA,8DAA6D,EAA5D,cAAM,EAAE,aAAoD,CAAC;QACpEZ,aAAU,CAAC,iBAAiB,CAAC,CAAC,EAAE,MAAM,EAAE,KAAK,CAAC,CAAC;QAE/C,IAAI,OAAO,CAAC,kBAAkB,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,KAAK,KAAK,QAAQ,EAAE;YAC3D,IAAM,WAAW,GAAG,OAAO,CAAC,SAAS,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC;YACpD,IAAM,SAAS,GAAG,YAAY,CAC1B,WAAW,CAAC,MAAoB,EAAE,MAAM,EAAE,KAAK,EAAE,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC;YACvE,OAAO,OAAO,CAAC,cAAc,CAAC,KAAK,EAAE,CAAC,CAAC,KAAK,EAAE,SAAS,CAAC,CAAC;SAC1D;QAED,IAAIX,OAAI,CAAC,aAAa,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE;YACnC,OAAO,OAAO,CAAC,cAAc,CAAC,KAAK,EAAE,CAAC,CAAC,KAAK,EAAE,EAAE,CAAC,CAAC;SACnD;;QAGD,IAAM,OAAO,GAAG,IAAI,YAAY,CAAC,MAAM,EAAE,KAAK,CAAC,CAAC;QAChD,IAAM,WAAW,GAAG,CAAC,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,MAAM,EAAC,CAAC,CAAC;QACpD,OAAO,OAAO,CAAC,gBAAgB,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,KAAK,EAAE,WAAW,CAAC,CAAC;IACtE,CAAC;IAEM,IAAM,WAAW,GAAiB;QACvC,UAAU,EAAEwB,QAAK;QACjB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,KAAyB;KACtC;;ICtDD;;;;;;;;;;;;;;;;IAyBO,IAAM,cAAc,GAAG,UAAC,IAI9B;QACQ,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,YAAC,CAAW;QACZ,IAAA,6BAAU,EAAE,mBAAK,CAAU;QAElCxB,OAAI,CAAC,MAAM,CACP,CAAC,CAAC,KAAK,CAAC,MAAM,IAAI,CAAC,EACnB,cAAM,OAAA,wDAAwD;YAC1D,iBAAiB,GAAA,CAAC,CAAC;QAC3B,IAAM,IAAI,GAAG,UAAU,CAAC,MAAM,CAAC,UAAC,CAAC,EAAE,CAAC,IAAK,OAAA,CAAC,GAAG,CAAC,GAAA,CAAC,CAAC;QAEhD,IAAM,QAAQ,GAAGC,eAAY,CAAC,WAAW,CAAC,CAAC,CAAC,KAAK,EAAE,UAAU,EAAE,IAAI,CAAC,CAAC;QACrE,IAAM,QAAQ,GAAGA,eAAY,CAAC,WAAW,CAAC,QAAQ,CAAC,MAAM,EAAE,UAAU,CAAC,MAAM,CAAC,CAAC;QAC9E,IAAM,gBAAgB,GAClBA,eAAY,CAAC,mBAAmB,CAAC,CAAC,CAAC,KAAK,EAAE,UAAU,EAAE,IAAI,CAAC,CAAC;QAChE,IAAM,gBAAgB,GAClBA,eAAY,CAAC,mBAAmB,CAAC,KAAK,EAAE,UAAU,CAAC,MAAM,CAAC,CAAC;QAC/D,IAAM,SAAS,GACXA,eAAY,CAAC,YAAY,CAAC,gBAAgB,EAAE,KAAK,EAAE,UAAU,CAAC,MAAM,CAAC,CAAC;QAE1E,IAAM,SAAS,GAAG,EAAE,CAAC;QAErB,IAAM,oBAAoB,GACtB,OAAO,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,GAAA,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,KAAK,EAAE,QAAQ,EAAC,EAAC,CAAC,CAAC;QAC9D,IAAM,sBAAsB,GAAG,SAAS,CACpC,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,oBAAoB,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,IAAI,EAAE,QAAQ,EAAC,EAAC,CAAC,CAAC;QAC3E,IAAM,qBAAqB,GAAG,OAAO,CAAC;YACpC,MAAM,EAAE,EAAC,CAAC,EAAE,sBAAsB,EAAC;YACnC,OAAO,SAAA;YACP,KAAK,EAAE,EAAC,KAAK,EAAE,gBAAgB,EAAC;SACjC,CAAC,CAAC;QACH,IAAM,MAAM,GAAG,KAAK,CAAC;YACnB,MAAM,EAAE,EAAC,CAAC,EAAE,qBAAqB,EAAC;YAClC,OAAO,SAAA;YACP,KAAK,EAAE,EAAC,KAAK,EAAE,gBAAgB,EAAE,IAAI,EAAE,SAAS,EAAC;SAClD,CAAC,CAAC;QAEH,SAAS,CAAC,IAAI,CAAC,oBAAoB,CAAC,CAAC;QACrC,SAAS,CAAC,IAAI,CAAC,sBAAsB,CAAC,CAAC;QACvC,SAAS,CAAC,IAAI,CAAC,qBAAqB,CAAC,CAAC;QAEtC,SAAS,CAAC,OAAO,CAAC,UAAA,CAAC,IAAI,OAAA,OAAO,CAAC,WAAW,CAAC,CAAC,CAAC,MAAM,CAAC,GAAA,CAAC,CAAC;QAEtD,OAAO,MAAM,CAAC;IAChB,CAAC,CAAC;IAEK,IAAM,oBAAoB,GAAiB;QAChD,UAAU,EAAEwB,iBAAc;QAC1B,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,cAAkC;KAC/C;;IC/ED;;;;;;;;;;;;;;;;IAwBO,IAAM,QAAQ,GAAG,gBAAgB,CAAC;QACvC,SAAS,EAAE,YAAY,CAAC,SAAS;QACjC,KAAK,EAAE,MAAM;QACb,aAAa,EAAEC,eAAW;KAC3B,CAAC,CAAC;IAEI,IAAM,cAAc,GAAiB;QAC1C,UAAU,EAAEC,WAAQ;QACpB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,QAAQ;KACrB;;IClCD;;;;;;;;;;;;;;;;aAsBgB,IAAI,CAAC,IAAkD;QAE9D,IAAA,oBAAM,EAAE,sBAAO,CAAS;QACxB,IAAA,oBAAK,CAAW;QACvB,IAAM,SAAS,GAAG,OAAO,CAAC,SAAS,CAAC,GAAG,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC;QAEtD,OAAO,QAAQ,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,SAAS,CAAC,kBAAkB,CAAC,IAAI,EAAC,EAAE,OAAO,SAAA,EAAC,CAAC,CAAC;IAC7E,CAAC;IAEM,IAAM,UAAU,GAAiB;QACtC,UAAU,EAAEC,OAAI;QAChB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,IAAwB;KACrC;;ICnCD;;;;;;;;;;;;;;;;aAsBgB,GAAG,CAAC,KAAiB,EAAE,OAAsB;QAC3D,IAAM,OAAO,GAAG,IAAI,cAAc,CAAC,KAAK,CAAC,KAAK,EAAE,WAAW,CAAC,MAAM,CAAC,CAAC;QACpE,IAAM,MAAM,GAAG,OAAO,CAAC,gBAAgB,CAAC,OAAO,EAAE,CAAC,KAAK,CAAC,EAAE,OAAO,CAAC,CAAC;QACnE,OAAO,EAAC,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,KAAK,EAAE,MAAM,CAAC,KAAK,EAAE,KAAK,EAAE,MAAM,CAAC,KAAK,EAAC,CAAC;IAC3E;;IC1BA;;;;;;;;;;;;;;;;aA4BgB,IAAI,CAChB,IAAoE;QAE/D,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,YAAC,CAAW;QACZ,IAAA,mBAAK,CAAU;;QAGtB,IAAI,KAAK,KAAK,WAAW,EAAE;YACzB,IAAI,CAAC,CAAC,KAAK,KAAK,WAAW,EAAE;gBAC3B,OAAO,QAAQ,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,GAAA,EAAC,EAAE,OAAO,SAAA,EAAC,CAAC,CAAC;aACzC;;YAGD,IAAM,WAAW,GAAGC,aAAE,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC;YACtC,IAAM,MAAM,GAAG,IAAI,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,GAAA,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,KAAK,EAAE,SAAS,EAAC,EAAC,CAAC,CAAC;YAEvE,IAAM,MAAM,GACR,OAAO,CAAC,EAAC,MAAM,EAAE,EAAC,IAAI,EAAE,MAAM,EAAE,IAAI,EAAE,WAAW,EAAC,EAAE,OAAO,SAAA,EAAC,CAAC,CAAC;YAElE,WAAW,CAAC,OAAO,EAAE,CAAC;YACtB,OAAO,CAAC,WAAW,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC;YAEnC,OAAO,MAAM,CAAC;SACf;;QAGD,IAAI,CAAC,CAAC,KAAK,KAAK,WAAW,EAAE;YAC3B,IAAM,QAAQ,GAAG,IAAI,CAAC,EAAC,MAAM,EAAE,EAAC,KAAK,EAAE,CAAC,EAAC,EAAE,OAAO,SAAA,EAAC,CAAC,CAAC;YACrD,IAAM,MAAM,GAAG,IAAI,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,QAAQ,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,KAAK,OAAA,EAAC,EAAC,CAAC,CAAC;YACtE,OAAO,CAAC,WAAW,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC;YACrC,OAAO,MAAM,CAAC;SACf;QAED,IAAI,CAAC7B,OAAI,CAAC,eAAe,CAAC,CAAC,CAAC,KAAK,EAAE,KAAK,CAAC,EAAE;;;YAGzC,IAAM,MAAM,GAAG,QAAQ,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,GAAA,EAAC,EAAE,OAAO,SAAA,EAAC,CAAC,CAAC;YAChD,OAAO,EAAC,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,KAAK,EAAE,MAAM,CAAC,KAAK,EAAE,KAAK,OAAA,EAAC,CAAC;SAC5D;QAED,IAAI,KAAK,KAAK,OAAO,EAAE;YACrB,OAAO,GAAG,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC;SACxB;QAED,IAAI,KAAK,KAAK,MAAM,EAAE;YACpB,IAAM,eAAe,GAAG,OAAO,CAAC,cAAc,CAC1C,EAAE,EAAE,MAAM,EAAEA,OAAI,CAAC,sBAAsB,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC;YAExD,IAAM,YAAY,GAAiB,EAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,eAAe,EAAC,CAAC;YAE9D,IAAM,MAAM,GAAG,QAAQ,CAAC,EAAC,MAAM,EAAE,YAAY,EAAE,OAAO,SAAA,EAAC,CAAe,CAAC;YACvE,OAAO,CAAC,WAAW,CAAC,eAAe,CAAC,MAAM,CAAC,CAAC;YAC5C,OAAO,MAAM,CAAC;SACf;QAED,MAAM,IAAI,KAAK,CAAC,mCAAiC,CAAC,CAAC,KAAK,YAAO,KAAO,CAAC,CAAC;IAC1E,CAAC;IAEM,IAAM,UAAU,GAAiB;QACtC,UAAU,EAAE8B,OAAI;QAChB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,IAAwB;KACrC;;IC3FD;;;;;;;;;;;;;;;;IAsBO,IAAM,IAAI,GACb,eAAe,CAAC,EAAC,MAAM,EAAE,WAAW,CAAC,IAAI,EAAE,aAAa,EAAE,WAAW,EAAC,CAAC,CAAC;IAErE,IAAM,UAAU,GAAiB;QACtC,UAAU,EAAEC,OAAI;QAChB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,IAAI;KACjB;;IC7BD;;;;;;;;;;;;;;;;IAqBA;QAYE,yBAAY,WAAqB;YATjC,kBAAa,GAAG,CAAC,GAAG,CAAC,CAAC;YACtB,aAAQ,GAAG,6BAA6B,CAAC;YAGzC,kBAAa,GAAG,CAAC,CAAC;YAClB,kBAAa,GAA6B,CAAC,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YACrD,WAAM,GAAG,IAAI,CAAC;YACd,SAAI,GAAG,IAAI,CAAC;YAGV,IAAI,CAAC,WAAW,GAAG,WAAW,CAAC;YAC/B,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;YAC3D,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,EACzD,CAAC,IAAI,CAAC,aAAa,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;YAChC,IAAI,CAAC,SAAS,GAAG,UAAU,CAAC;SAC7B;QAED,qCAAW,GAAX;YACE,IAAM,QAAQ,GAAG,aACb,iCAAiC,EAAE,sdAetC,CAAC;YACF,OAAO,QAAQ,CAAC;SACjB;8BACF;KAAA;;IC9DD;;;;;;;;;;;;;;;;IAqBA;QAYE,qBAAY,WAAqB;YATjC,kBAAa,GAAG,CAAC,GAAG,CAAC,CAAC;YACtB,aAAQ,GAAG,6BAA6B,CAAC;YAGzC,kBAAa,GAA6B,CAAC,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YAGrD,SAAI,GAAG,IAAI,CAAC;YAGV,IAAI,CAAC,WAAW,GAAG,WAAW,CAAC;YAC/B,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;YAC3D,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,CAAC,CAAC;YAE/D,IAAI,CAAC,SAAS,GAAG,MAAM,CAAC;SACzB;QAED,iCAAW,GAAX;YACE,IAAM,QAAQ,GAAG,aACb,iCAAiC,EAAE,uTAUtC,CAAC;YACF,OAAO,QAAQ,CAAC;SACjB;0BACF;KAAA;;ICzDD;;;;;;;;;;;;;;;;aAwBgB,WAAW,CAAC,IAI3B;QACQ,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,YAAC,CAAW;QACZ,IAAA,iCAAY,EAAE,iCAAY,CAAU;QAE3C,IAAI,OAAoC,CAAC;QACzC,IAAM,WAAW,GAAG;YAClB,EAAC,IAAI,EAAE,SAAS,EAAE,IAAI,EAAE,CAAC,YAAY,CAAC,EAAC;YACvC,EAAC,IAAI,EAAE,SAAS,EAAE,IAAI,EAAE,CAAC,YAAY,CAAC,EAAC;SACxC,CAAC;QACF,IAAI/B,OAAI,CAAC,aAAa,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,KAAK,CAAC,EAAE;YACzC,OAAO,GAAG,IAAI,eAAe,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC;SACxC;aAAM;YACL,OAAO,GAAG,IAAI,WAAW,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC;SACpC;QACD,OAAO,OAAO,CAAC,gBAAgB,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,KAAK,EAAE,WAAW,CAAC,CAAC;IACtE,CAAC;IAEM,IAAM,iBAAiB,GAAiB;QAC7C,UAAU,EAAEgC,cAAW;QACvB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,WAA+B;KAC5C;;IClDD;;;;;;;;;;;;;;;;IAuBA;QAYE,uBAAY,MAA+B;YAN3C,aAAQ,GAAG,EAAE,CAAC;YACd,kBAAa,GAAG,CAAC,CAAC;YAClB,kBAAa,GAA6B,CAAC,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YACrD,SAAI,GAAG,IAAI,CAAC;YAIV,IAAI,CAAC,WAAW;gBACZ/B,eAAY,CAAC,eAAe,CAAC,MAAM,EAAE,CAAC,YAAgC,CAAC;YAC3E,IAAI,CAAC,aAAa,GAAG,MAAM,CAAC,GAAG,CAAC,UAAC,CAAC,EAAE,CAAC,IAAK,OAAA,MAAI,CAAG,GAAA,CAAC,CAAC;YACnD,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;YAC3D,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,EACzD,CAAC,IAAI,CAAC,aAAa,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;YAEhC,IAAI,CAAC,YAAY,GAAG,MAAM,CAAC,MAAM,GAAG,CAAC,CAAC;YACtC,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,IAAI,CAAC,YAAY,EAAE,CAAC,EAAE,EAAE;gBAC1C,IAAI,CAAC,QAAQ,IAAI,WAAS,CAAC,YAAS,CAAC;aACtC;YACD,IAAI,CAAC,SAAS,GAAG,QAAQ,CAAC;SAC3B;QAED,mCAAW,GAAX;YACE,IAAM,QAAQ,GAAa,EAAE,CAAC;YAC9B,IAAI,IAAI,CAAC,YAAY,GAAG,CAAC,EAAE;gBACzB,QAAQ,CAAC,IAAI,CACT,qFAAqF,CAAC,CAAC;gBAC3F,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,IAAI,CAAC,YAAY,EAAE,CAAC,EAAE,EAAE;oBAC1C,QAAQ,CAAC,IAAI,CACT,kCAAgC,CAAC,CAAC,CAAC,QAAK;yBACxC,+CACI,CAAC,kCAA4B,CAAC,GAAG,CAAC,WAAO,CAAA,CAAC,CAAC;iBACpD;gBACD,IAAM,SAAS,GAAG,IAAI,CAAC,YAAY,CAAC;gBACpC,IAAM,cAAc,GAAG,IAAI,CAAC,YAAY,GAAG,CAAC,CAAC;gBAC7C,QAAQ,CAAC,IAAI,CAAC,sDACV,SAAS,iCAA4B,cAAc,UAAO,CAAC,CAAC;aACjE;iBAAM;gBACL,QAAQ,CAAC,IAAI,CAAC,uDAAuD,CAAC,CAAC;aACxE;YAED,IAAM,QAAQ,GAAG,aACb,iCAAiC,EAAE,qCACd,IAAI,CAAC,aAAa,0DACX,IAAI,CAAC,aAAa,iMAMxC,QAAQ,CAAC,IAAI,CAAC,YAAY,CAAC,4CAIpC,CAAC;YACF,OAAO,QAAQ,CAAC;SACjB;4BACF;KAAA;;ICtFD;;;;;;;;;;;;;;;;aAsBgB,IAAI,CAAC,IAAkD;QAE9D,IAAA,oBAAM,EAAE,sBAAO,CAAS;QACxB,IAAA,oBAAK,CAAW;QACvB,IAAM,SAAS,GAAG,OAAO,CAAC,SAAS,CAAC,GAAG,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC;QAEtD,OAAO,QAAQ,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,SAAS,CAAC,kBAAkB,CAAC,IAAI,EAAC,EAAE,OAAO,SAAA,EAAC,CAAC,CAAC;IAC7E,CAAC;IAEM,IAAM,UAAU,GAAiB;QACtC,UAAU,EAAEgC,OAAI;QAChB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,IAAwB;KACrC;;ICnCD;;;;;;;;;;;;;;;;aA4BgB,UAAU,CACtB,MAAoB,EAAE,IAAY,EAAE,OAAsB;QAC5D,IAAM,KAAK,GAAG,MAAM,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC;QAC9B,IAAI,KAAK,KAAK,WAAW,EAAE;YACzB,IAAM,KAAK,GAAG,MAAM,CAAC,GAAG,CAAC,UAAC,CAAC,IAAK,OAAA,IAAI,CAAC,EAAC,MAAM,EAAE,EAAC,KAAK,EAAE,CAAC,EAAC,EAAE,OAAO,SAAA,EAAC,CAAC,GAAA,CAAC,CAAC;YACrE,IAAM,KAAK,GAAG,MAAM,CAAC,GAAG,CAAC,UAAC,CAAC,IAAK,OAAA,IAAI,CAAC,EAAC,MAAM,EAAE,EAAC,KAAK,EAAE,CAAC,EAAC,EAAE,OAAO,SAAA,EAAC,CAAC,GAAA,CAAC,CAAC;YAErE,IAAM,YAAY,GAAG,UAAU,CAAC,KAAK,EAAE,IAAI,EAAE,OAAO,CAAC,CAAC;YACtD,IAAM,YAAY,GAAG,UAAU,CAAC,KAAK,EAAE,IAAI,EAAE,OAAO,CAAC,CAAC;YAEtD,IAAM,MAAM,GACR,OAAO,CAAC,EAAC,MAAM,EAAE,EAAC,IAAI,EAAE,YAAY,EAAE,IAAI,EAAE,YAAY,EAAC,EAAE,OAAO,SAAA,EAAC,CAAC,CAAC;YAEzE,KAAK,CAAC,OAAO,CAAC,UAAA,CAAC,IAAI,OAAA,OAAO,CAAC,WAAW,CAAC,CAAC,CAAC,MAAM,CAAC,GAAA,CAAC,CAAC;YAClD,KAAK,CAAC,OAAO,CAAC,UAAA,CAAC,IAAI,OAAA,OAAO,CAAC,WAAW,CAAC,CAAC,CAAC,MAAM,CAAC,GAAA,CAAC,CAAC;YAClD,OAAO,CAAC,WAAW,CAAC,YAAY,CAAC,MAAM,CAAC,CAAC;YACzC,OAAO,CAAC,WAAW,CAAC,YAAY,CAAC,MAAM,CAAC,CAAC;YAEzC,OAAO,MAAM,CAAC;SACf;QAED,IAAI,QAAQ,GAAG,OAAO,CAAC,kBAAkB,CAAC,MAAM,CAAC,CAAC;;;;;;;QAQlD,IAAI,KAAK,KAAK,QAAQ,EAAE;YACtB,QAAQ,GAAG,IAAI,CAAC;SACjB;QAED,IAAI,QAAQ,EAAE;;;;;;;;YAQZ,IAAM,WAAS,GAAG,MAAM,CAAC,GAAG,CAAC,UAAA,CAAC;gBAC5B,IAAM,SAAS,GAAGjC,OAAI,CAAC,aAAa,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC;gBAC1D,IAAM,KAAK,GAAG,CAAC,CAAC,CAAC,EAAE,SAAS,CAAC,CAAC;gBAC9B,OAAO,OAAO,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,CAAC,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,KAAK,OAAA,EAAC,EAAC,CAAC,CAAC;aAC3D,CAAC,CAAC;YAEH,IAAM,eAAe,GAAG,WAAS,CAAC,GAAG,CAAC,UAAA,CAAC;gBACrC,OAAO,EAAC,IAAI,EAAE,OAAO,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,EAAE,KAAK,EAAE,CAAC,CAAC,KAAK,EAAC,CAAC;aAC3D,CAAC,CAAC;;YAGH,IAAM,UAAQ,GACVC,eAAY,CAAC,eAAe,CAAC,WAAS,CAAC,GAAG,CAAC,UAAA,CAAC,IAAI,OAAA,CAAC,CAAC,KAAK,GAAA,CAAC,EAAE,CAAC,YAAY,CAAC;YAC5E,IAAM,YAAY,GAAG,WAAS,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC;YACjD,IAAM,OAAO,GACT,aAAa,CAAC,eAAe,EAAE,UAAQ,EAAE,KAAK,EAAE,YAAY,CAAC,CAAC;YAElE,IAAM,aAAa,GACfA,eAAY,CAAC,eAAe,CAAC,MAAM,CAAC,GAAG,CAAC,UAAA,CAAC,IAAI,OAAA,CAAC,CAAC,KAAK,GAAA,CAAC,EAAE,IAAI,CAAC,CAAC;YAEjE,IAAM,OAAO,GAAG,OAAO,CAAC,cAAc,CAAC,aAAa,EAAE,KAAK,EAAE,OAAO,CAAC,CAAC;YAEtE,WAAS,CAAC,OAAO,CAAC,UAAA,CAAC,IAAI,OAAA,OAAO,CAAC,WAAW,CAAC,CAAC,CAAC,MAAM,CAAC,GAAA,CAAC,CAAC;YAEtD,OAAO,OAAO,CAAC;SAChB;QAEK,IAAA,4CAA+D,EAA9D,wBAAS,EAAE,sBAAmD,CAAC;QACtE,IAAM,MAAM,GAAG,CAAC,SAAS,EAAE,GAAG,CAAC,UAAA,CAAC,IAAI,OAAA,CAAC,CAAC,KAAyB,GAAA,CAAC,CAAC;QACjE,IAAM,OAAO,GAAG,IAAI,aAAa,CAAC,MAAM,CAAC,CAAC;QAE1C,IAAM,WAAW,GAA0C,EAAE,CAAC;QAC9D,IAAM,OAAO,GAAa,IAAI,KAAK,CAAC,MAAM,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC;QACvD,IAAI,OAAO,CAAC,MAAM,GAAG,CAAC,EAAE;YACtB,OAAO,CAAC,CAAC,CAAC,GAAG,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;YAC1B,WAAW,CAAC,IAAI,CAAC,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,EAAC,CAAC,CAAC;YACtD,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,OAAO,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE;gBACvC,OAAO,CAAC,CAAC,CAAC,GAAG,OAAO,CAAC,CAAC,GAAG,CAAC,CAAC,GAAG,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;gBAC3C,WAAW,CAAC,IAAI,CAAC,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,EAAC,CAAC,CAAC;aACvD;SACF;QAED,IAAM,GAAG,GAAG,OAAO,CAAC,gBAAgB,CAChC,OAAO,EAAE,SAAS,EAAE,SAAS,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,WAAW,CAAC,CAAC;QACzD,SAAS,CAAC,OAAO,CAAC,UAAA,CAAC,IAAI,OAAA,OAAO,CAAC,WAAW,CAAC,CAAC,CAAC,MAAM,CAAC,GAAA,CAAC,CAAC;QAEtD,IAAM,cAAc,GAChB,OAAO,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,GAAG,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,KAAK,EAAE,QAAQ,EAAC,EAAC,CAAC,CAAC;QACnE,OAAO,CAAC,WAAW,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC;QAChC,OAAO,cAAc,CAAC;IACxB,CAAC;IAED,SAAS,gBAAgB,CACrB,MAAoB,EAAE,IAAY,EAAE,OAAsB;QAC5D,IAAM,QAAQ,GAAGA,eAAY,CAAC,eAAe,CAAC,MAAM,CAAC,GAAG,CAAC,UAAA,CAAC,IAAI,OAAA,CAAC,CAAC,KAAK,GAAA,CAAC,EAAE,IAAI,CAAC,CAAC;QAC9E,IAAM,SAAS,GAAG,MAAM,CAAC,GAAG,CAAC,UAAA,CAAC,IAAI,OAAA,OAAO,CAAC;YACX,MAAM,EAAE,EAAC,CAAC,EAAE,CAAC,EAAC;YACd,OAAO,SAAA;YACP,KAAK,EAAE;gBACL,KAAK,EAAE;oBACLD,OAAI,CAAC,aAAa,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC;oBAC1CA,OAAI,CAAC,aAAa,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC;iBACxC;aACF;SACF,CAAC,GAAA,CAAC,CAAC;QAEjC,OAAO,EAAC,SAAS,WAAA,EAAE,QAAQ,UAAA,EAAC,CAAC;IAC/B;;ICxIA;;;;;;;;;;;;;;;;aAwBgB,MAAM,CAClB,IAAwE;QAEnE,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,iBAAI,CAAU;QAErB,IAAM,KAAK,GAAGA,OAAI,CAAC,cAAc,CAAC,IAAI,EAAE,MAAM,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC;QAC5D,IAAM,QAAQ,GACVC,eAAY,CAAC,eAAe,CAAC,MAAM,CAAC,GAAG,CAAC,UAAA,CAAC,IAAI,OAAA,CAAC,CAAC,KAAK,GAAA,CAAC,EAAE,KAAK,CAAC,CAAC;QAClE,IAAID,OAAI,CAAC,aAAa,CAAC,QAAQ,CAAC,KAAK,CAAC,EAAE;YACtC,OAAO,OAAO,CAAC,cAAc,CAAC,QAAQ,EAAE,MAAM,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,EAAE,CAAC,CAAC;SAC9D;;QAGD,IAAM,OAAO,GAAG,MAAM,CAAC,MAAM,CAAC,UAAA,CAAC,IAAI,OAAAA,OAAI,CAAC,aAAa,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,GAAA,CAAC,CAAC;QACpE,IAAI,OAAO,CAAC,MAAM,KAAK,CAAC,EAAE;YACxB,OAAO,QAAQ,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,OAAO,CAAC,CAAC,CAAC,EAAC,EAAE,OAAO,SAAA,EAAC,CAAC,CAAC;SACrD;QAED,IAAM,MAAM,GAAG,OAAO,CAAC,GAAG,CAAC,UAAA,CAAC,IAAI,OAAA,CAAC,CAAC,KAAK,GAAA,CAAC,CAAC;QACzCC,eAAY,CAAC,sBAAsB,CAAC,MAAM,EAAE,KAAK,CAAC,CAAC;QAEnD,OAAO,UAAU,CAAC,OAAO,EAAE,KAAK,EAAE,OAAO,CAAC,CAAC;IAC7C,CAAC;IAEM,IAAM,YAAY,GAAiB;QACxC,UAAU,EAAEiC,SAAM;QAClB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,MAA0B;KACvC;;IC7BD;QAuBE,6BACI,QAAiC,EAAE,OAAe,EAClD,UAA0C,EAC1C,yBAAiC,EAAE,iBAAyB;;YAFzB,wBAAA,EAAA,eAAe;YAClD,2BAAA,EAAA,iBAA0C;YAC1C,0CAAA,EAAA,iCAAiC;YAAE,kCAAA,EAAA,yBAAyB;YArBhE,kBAAa,GAAG,CAAC,GAAG,EAAE,GAAG,CAAC,CAAC;YAC3B,aAAQ,GACJ,6IACkD,CAAC;YACvD,kBAAa,GAA6B,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YAEpD,WAAM,GAAG,IAAI,CAAC;YAgBZ,IAAI,CAAC,WAAW,GAAG,QAAQ,CAAC,QAAQ,CAAC;YAErClC,OAAI,CAAC,MAAM,CACP,QAAQ,CAAC,UAAU,KAAK,cAAc,EACtC,cAAM,OAAA,6BAA6B,GAAA,CAAC,CAAC;YACzC,IAAI,CAAC,cAAc,GAAG,EAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,EAAC,CAAC;;YAElD,IAAI,IAAI,CAAC,WAAW,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE;gBAC7B,IAAI,CAAC,iBAAiB,GAAG,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;aACpC;iBAAM;gBACL,IAAI,CAAC,iBAAiB,GAAG,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;aACpC;YACD,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,EACzD,IAAI,CAAC,iBAAiB,CAAC,CAAC;YAC5B,IAAI,CAAC,QAAQ,GAAG,QAAQ,CAAC;YACzB,IAAI,CAAC,OAAO,GAAG,OAAO,CAAC;YACvB,IAAI,CAAC,UAAU,GAAG,UAAU,CAAC;YAC7B,IAAI,CAAC,yBAAyB,GAAG,yBAAyB,CAAC;YAC3D,IAAI,CAAC,iBAAiB,GAAG,iBAAiB,CAAC;YAC3C,IAAI,IAAI,CAAC,OAAO,EAAE;gBAChB,IAAI,CAAC,aAAa,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;aACjC;YAED,IAAI,IAAI,CAAC,yBAAyB,EAAE;gBAClC,IAAI,CAAC,aAAa,CAAC,IAAI,CAAC,wBAAwB,CAAC,CAAC;aACnD;YAED,IAAI,IAAI,CAAC,iBAAiB,EAAE;gBAC1B,IAAI,CAAC,aAAa,CAAC,IAAI,CAAC,gBAAgB,CAAC,CAAC;aAC3C;YAED,IAAI,CAAC,UAAU,GAAG,IAAI,CAAC,WAAW,CAAC,CAAC,CAAC,KAAK,CAAC;gBACvC,CAAC;gBACD,IAAI,CAAC,aAAa,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,iBAAiB,CAAC,CAAC,CAAC,CAAC;YACtD,IAAI,CAAC,UAAU,GAAG,IAAI,CAAC,aAAa,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,iBAAiB,CAAC,CAAC,CAAC,CAAC;YACpE,IAAI,CAAC,SAAS,GAAG,IAAI,CAAC,UAAU,CAAC;YACjC,kCAA2C,EAA1C,iBAAS,EAAE,iBAAS,CAAuB;YAC5C,IAAI,CAAC,SAAS,GAAG,kBAAgB,IAAI,CAAC,UAAU,SAAI,IAAI,CAAC,IAAI,SACzD,IAAI,CAAC,IAAI,SAAI,IAAI,CAAC,iBAAmB,CAAC;SAC3C;QAED,yCAAW,GAAX;YACE,IAAM,SAAS,GAAG,CAAC,IAAI,CAAC,UAAU,EAAE,IAAI,CAAC,SAAS,CAAC,CAAC;YACpD,IAAM,SAAS,GAAG,CAAC,IAAI,CAAC,SAAS,EAAE,IAAI,CAAC,UAAU,CAAC,CAAC;YACpD,IAAM,SAAS,GAAG,IAAI,CAAC,WAAW,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC;YAC5D,IAAM,SAAS,GAAG,IAAI,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC;YACtC,IAAM,QAAQ,GAAG,IAAI,CAAC,QAAQ,CAAC,YAAY,GAAG,IAAI,CAAC,QAAQ,CAAC,WAAW;gBACnE,IAAI,CAAC,QAAQ,CAAC,UAAU,CAAC;YAC7B,OAAO;gBACL,uBAAuB,CAAC,SAAS,EAAE,CAAC,SAAS,EAAE,QAAQ,CAAC,CAAC;gBACzD,uBAAuB,CAAC,SAAS,EAAE,CAAC,QAAQ,EAAE,SAAS,CAAC,CAAC;aAC1D,CAAC;SACH;;QAGD,qDAAuB,GAAvB,UAAwB,KAAa;YACnC,OAAO,kBACH,KAAK,iFACY,KAAK,oBAAe,KAAK,kCAC7B,KAAK,oBAAe,KAAK,8BAC7B,KAAK,wBAAmB,KAAK,mCACrB,KAAK,sCACR,KAAK,wNAKP,KAAK,wBAAmB,KAAK,yCACtB,KAAK,kDACE,KAAK,sBAAiB,KAAK,8CAC3B,KAAK,kDACL,KAAK,qBAAgB,KAAK,+CAC1B,KAAK,kDACL,KAAK,oBAAe,KAAK,iCAGtD,CAAC;SACH;QAED,yCAAW,GAAX;YACE,IAAM,YAAY,GAAG,0BAA0B,CAC3C,IAAI,CAAC,iBAAiB,EAAE,IAAI,CAAC,UAAU,EAAE,IAAI,CAAC,UAAU,EACxD,IAAI,CAAC,SAAS,CAAC,CAAC;YAEpB,IAAM,SAAS,GAAG,IAAI,CAAC,QAAQ,CAAC,UAAU,GAAG,CAAC,CAAC;;YAE/C,IAAM,gBAAgB,GAAG,SAAS,KAAK,CAAC;gBACpC,oTAKgC;gBAChC,2CACI,IAAI,CAAC,uBAAuB,CAAC,CAAC,CAAC,yNAK3B,IAAI,CAAC,uBAAuB,CAAC,CAAC,CAAC,2TASpC,CAAC;YAER,IAAM,YAAY,GAAG,0kBAWf,gBAAgB,8BACF,CAAC;YAErB,IAAM,OAAO,GAAG,IAAI,CAAC,IAAI;gBACrB,KAAG,YAAc;gBACjB,uEACI,YAAY,4DAGf,CAAC;YAEN,IAAM,OAAO,GAAG,IAAI,CAAC,IAAI;gBACrB,+CAA+C;gBAC/C,oNAIC,CAAC;YACN,IAAI,iBAAiB,GAAG,EAAE,EAAE,sBAAsB,GAAG,EAAE,CAAC;YACxD,IAAI,IAAI,CAAC,UAAU,EAAE;gBACnB,IAAM,YAAY,GACd,4BAA4B,CAAC,IAAI,CAAC,UAAU,EAAE,IAAI,CAAC,MAAM,CAAC,CAAC;gBAC/D,IAAI,IAAI,CAAC,yBAAyB,EAAE;oBAClC,iBAAiB;wBACb,wJAEA,YAAY,gBACd,CAAC;iBACJ;qBAAM,IAAI,IAAI,CAAC,iBAAiB,EAAE;oBACjC,iBAAiB,GAAG,gIAEhB,YAAY,gBACd,CAAC;oBACH,MAAM,IAAI,KAAK,CAAC,6BAA6B,CAAC,CAAC;iBAChD;qBAAM;oBACL,iBAAiB,GAAG,4FAEhB,YAAY,gBACd,CAAC;iBACJ;gBAED,sBAAsB,GAAG,sCAAsC,CAAC;aACjE;YAED,IAAM,cAAc,GAChB,IAAI,CAAC,OAAO,GAAG,kDAAkD,GAAG,EAAE,CAAC;YAE3E,IAAM,QAAQ,GAAG,eACX,iBAAiB,gMAKf,OAAO,iHAIP,OAAO,2cAaL,cAAc,sBACd,sBAAsB,oJAK1B,YAAY,aACf,CAAC;YACJ,OAAO,QAAQ,CAAC;SACjB;kCACF;KAAA;;ICrOD;QAkBE,yBACI,QAAiC,EAAE,OAAe,EAClD,UAA0C,EAC1C,yBAAiC;;YAFE,wBAAA,EAAA,eAAe;YAClD,2BAAA,EAAA,iBAA0C;YAC1C,0CAAA,EAAA,iCAAiC;YAhBrC,kBAAa,GAAG,CAAC,GAAG,EAAE,GAAG,CAAC,CAAC;YAC3B,aAAQ,GACJ,sIAAsI,CAAC;YAezI,IAAI,CAAC,WAAW,GAAG,QAAQ,CAAC,QAAQ,CAAC;YACrC,IAAI,CAAC,cAAc,GAAG,QAAQ,CAAC,UAAU,KAAK,cAAc,CAAC;YAC7D,IAAI,CAAC,cAAc,GAAG,IAAI,CAAC,cAAc,GAAG,EAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,EAAC;gBAC3B,EAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,EAAC,CAAC;YACxE,IAAI,CAAC,aAAa;gBACd,6BAA6B,CAAC,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,CAAC,CAAC;YACzE,IAAI,CAAC,iBAAiB;gBAClB,6BAA6B,CAAC,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,CAAC,CAAC;YAEzE,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,EACzD,IAAI,CAAC,iBAAiB,CAAC,CAAC;YAE5B,IAAI,OAAO,EAAE;gBACX,IAAI,CAAC,aAAa,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;aACjC;YAED,IAAI,yBAAyB,EAAE;gBAC7B,IAAI,CAAC,aAAa,CAAC,IAAI,CAAC,wBAAwB,CAAC,CAAC;aACnD;YACD,IAAI,CAAC,QAAQ,GAAG,QAAQ,CAAC;YACzB,IAAI,CAAC,OAAO,GAAG,OAAO,CAAC;YACvB,IAAI,CAAC,UAAU,GAAG,UAAU,CAAC;YAC7B,IAAI,CAAC,yBAAyB,GAAG,yBAAyB,CAAC;YAE3D,kCAA2C,EAA1C,iBAAS,EAAE,iBAAS,CAAuB;YAC5C,IAAI,CAAC,SAAS,GAAG,cAAY,IAAI,CAAC,iBAAiB,SAAI,IAAI,CAAC,UAAU,SAClE,IAAI,CAAC,IAAI,SAAI,IAAI,CAAC,IAAI,SAAI,IAAI,CAAC,cAAgB,CAAC;SACrD;QAED,qCAAW,GAAX;YACE,IAAM,UAAU,GAAG,IAAI,CAAC,aAAa,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,iBAAiB,CAAC,CAAC,CAAC,CAAC;YACrE,IAAM,UAAU,GAAG,IAAI,CAAC,aAAa,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,iBAAiB,CAAC,CAAC,CAAC,CAAC;YACrE,IAAM,SAAS,GAAG,UAAU,GAAG,UAAU,GAAG,UAAU,GAAG,UAAU,CAAC;YACpEA,OAAI,CAAC,MAAM,CACP,SAAS,GAAG,IAAI,CAAC,aAAa,CAAC,CAAC,CAAC,KAAK,CAAC;gBACnC,SAAS,GAAG,IAAI,CAAC,aAAa,CAAC,CAAC,CAAC,KAAK,CAAC,EAC3C;;gBAEA,OAAA,mEAAmE;aAAA,CAAC,CAAC;YACzE,IAAM,SAAS,GAAG,CAAC,UAAU,EAAE,SAAS,CAAC,CAAC;YAC1C,IAAM,SAAS,GAAG,CAAC,SAAS,EAAE,UAAU,CAAC,CAAC;YAC1C,IAAM,SAAS,GAAG,IAAI,CAAC,QAAQ,CAAC,SAAS,GAAG,IAAI,CAAC,QAAQ,CAAC,QAAQ,CAAC;YACnE,IAAM,SAAS,GAAG,IAAI,CAAC,QAAQ,CAAC,WAAW,CAAC;YAC5C,IAAM,QAAQ,GAAG,IAAI,CAAC,QAAQ,CAAC,YAAY,GAAG,IAAI,CAAC,QAAQ,CAAC,WAAW;gBACnE,IAAI,CAAC,QAAQ,CAAC,UAAU,CAAC;YAE7B,OAAO;gBACL,uBAAuB,CAAC,SAAS,EAAE,CAAC,SAAS,EAAE,QAAQ,CAAC,CAAC;gBACzD,uBAAuB,CAAC,SAAS,EAAE,CAAC,QAAQ,EAAE,SAAS,CAAC,CAAC;aAC1D,CAAC;SACH;QAED,qCAAW,GAAX;YACE,IAAM,aAAa,GAAG,IAAI,CAAC,cAAc,GAAG,yEAE3C;gBAC2C,yEAE3C,CAAC;YAEF,IAAM,eAAe,GAAG,IAAI,CAAC,cAAc,GAAG,gHAM7C;gBAC6C,gHAM7C,CAAC;YAEF,IAAM,YAAY,GACd,sBAAsB,CAAC,IAAI,CAAC,iBAAiB,EAAE,IAAI,CAAC,aAAa,CAAC,CAAC;YAEvE,IAAM,YAAY,GAAG,qEAGjB,IAAI,CAAC,cAAc,GAAG,sBAAsB,GAAG,sBAAsB,wYAQvE,aAAa,8PAMH,CAAC;YAEb,IAAM,OAAO,GAAG,IAAI,CAAC,IAAI;gBACrB,KAAG,YAAc;gBACjB,uEACA,YAAY,mCAGf,CAAC;YAEF,IAAM,OAAO,GAAG,IAAI,CAAC,IAAI;gBACrB,2CAA2C;gBAC3C,gLAIL,CAAC;YAEA,IAAI,iBAAiB,GAAG,EAAE,EAAE,sBAAsB,GAAG,EAAE,CAAC;YACxD,IAAI,IAAI,CAAC,UAAU,EAAE;gBACnB,IAAM,YAAY,GAAG,4BAA4B,CAAC,IAAI,CAAC,UAAU,EAAE,KAAK,CAAC,CAAC;gBAC1E,IAAI,IAAI,CAAC,yBAAyB,EAAE;oBAClC,iBAAiB;wBACb,2JAEQ,YAAY,wBACd,CAAC;iBACZ;qBAAM;oBACL,iBAAiB,GAAG,oGAEN,YAAY,4CAEjB,CAAC;iBACX;gBAED,sBAAsB,GAAG,sCAAsC,CAAC;aACjE;YAED,IAAM,cAAc,GAChB,IAAI,CAAC,OAAO,GAAG,kDAAkD,GAAG,EAAE,CAAC;YAE3E,IAAM,QAAQ,GAAG,WACf,iBAAiB,0HAGf,OAAO,+FAIP,OAAO,6LAOP,IAAI,CAAC,cAAc,GAAG,sBAAsB,GAAG,sBAAsB,kBACrE,eAAe,gBACf,cAAc,gBACd,sBAAsB,+FAGxB,YAAY,SACf,CAAC;YACA,OAAO,QAAQ,CAAC;SACjB;8BACF;KAAA;;IChND;;;;;;;;;;;;;;;;IAwBA;QAcE,4BACI,QAAiC,EAAE,OAAe,EAClD,UAA0C,EAC1C,yBAAiC;YAFE,wBAAA,EAAA,eAAe;YAClD,2BAAA,EAAA,iBAA0C;YAC1C,0CAAA,EAAA,iCAAiC;YAZrC,kBAAa,GAAG,CAAC,GAAG,EAAE,GAAG,CAAC,CAAC;YAC3B,aAAQ,GACJ,oFAAoF,CAAC;YACzF,kBAAa,GAA6B,CAAC,GAAG,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YAUpD,IAAI,CAAC,WAAW,GAAG,QAAQ,CAAC,QAAQ,CAAC;YACrC,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;YAC3D,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,CAAC,CAAC;YAE/DA,OAAI,CAAC,MAAM,CACP,QAAQ,CAAC,UAAU,KAAK,cAAc,EACtC,cAAM,OAAA,6BAA6B,GAAA,CAAC,CAAC;YACzC,IAAI,OAAO,EAAE;gBACX,IAAI,CAAC,aAAa,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;aACjC;YAED,IAAI,yBAAyB,EAAE;gBAC7B,IAAI,CAAC,aAAa,CAAC,IAAI,CAAC,wBAAwB,CAAC,CAAC;aACnD;YAED,IAAI,CAAC,QAAQ,GAAG,QAAQ,CAAC;YACzB,IAAI,CAAC,OAAO,GAAG,OAAO,CAAC;YACvB,IAAI,CAAC,UAAU,GAAG,UAAU,CAAC;YAC7B,IAAI,CAAC,yBAAyB,GAAG,yBAAyB,CAAC;YAE3D,IAAI,CAAC,SAAS,GAAG,iBAAe,IAAI,CAAC,UAAY,CAAC;SACnD;QAED,wCAAW,GAAX;YACE,IAAI,iBAAiB,GAAG,EAAE,EAAE,sBAAsB,GAAG,EAAE,CAAC;YACxD,IAAI,IAAI,CAAC,UAAU,EAAE;gBACnB,IAAM,YAAY,GAAG,4BAA4B,CAAC,IAAI,CAAC,UAAU,CAAC,CAAC;gBACnE,IAAI,IAAI,CAAC,yBAAyB,EAAE;oBAClC,iBAAiB;wBACb,qJAEK,YAAY,qBACd,CAAC;iBACT;qBAAM;oBACL,iBAAiB,GAAG,mGAEN,YAAY,4CAEjB,CAAC;iBACX;gBAED,sBAAsB,GAAG,sCAAsC,CAAC;aACjE;YAED,IAAM,cAAc,GAChB,IAAI,CAAC,OAAO,GAAG,kDAAkD,GAAG,EAAE,CAAC;YAE3E,IAAM,QAAQ,GAAG,aACb,iBAAiB,2xBAoBb,cAAc,oBACd,sBAAsB,kGAK1B,mBAAmB,EAAE,i4BAqBxB,CAAC;YACF,OAAO,QAAQ,CAAC;SACjB;iCACF;KAAA;;IC7ID;;;;;;;;;;;;;;;;IAqBA;QAcE,uBAAY,WAAqB,EAAE,cAAuB;YAb1D,kBAAa,GAAG,CAAC,GAAG,CAAC,CAAC;YACtB,aAAQ,GACJ,6HACkB,CAAC;YAKvB,kBAAa,GAAG,CAAC,CAAC;YAClB,kBAAa,GAA6B,CAAC,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YAErD,SAAI,GAAG,IAAI,CAAC;YAGV,IAAI,CAAC,WAAW,GAAG,WAAW,CAAC;YAC/B,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;YAC3D,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,EACzD,CAAC,IAAI,CAAC,aAAa,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;YAChC,IAAI,CAAC,cAAc,GAAG,cAAc,CAAC;YACrC,IAAI,CAAC,SAAS,GAAG,YAAU,IAAI,CAAC,cAAgB,CAAC;SAClD;QAED,mCAAW,GAAX;YACE,IAAM,MAAM,GAAG,IAAI,CAAC,cAAc,GAAG,CAAC,GAAG,CAAC,CAAC;YAC3C,IAAM,MAAM,GAAG,IAAI,CAAC,cAAc,GAAG,CAAC,GAAG,CAAC,CAAC;YAE3C,IAAM,QAAQ,GAAG,WACf,iCAAiC,EAAE,mCAEhB,IAAI,CAAC,aAAa,wDACT,IAAI,CAAC,aAAa,kaAWhB,MAAM,kWAMJ,MAAM,uKAQzC,CAAC;YACA,OAAO,QAAQ,CAAC;SACjB;4BACF;KAAA;;ICzCD;IACA;IACA;IACA,SAAS,cAAc,CAAC,EAST;YARb,QAAC,EACD,kBAAM,EACN,sBAAQ,EACR,oBAAO,EACP,YAAW,EAAX,gCAAW,EACX,8BAA6B,EAA7B,kDAA6B,EAC7B,sBAAkB,EAAlB,uCAAkB,EAClB,kBAAiB,EAAjB,sCAAiB;QAEjB,IAAM,cAAc,GAAG,QAAQ,CAAC,UAAU,KAAK,cAAc,CAAC;QAC9D,IAAM,UAAU,GAAG,cAAc,GAAG,KAAK,GAAG,IAAI,CAAC;QACjD,IAAM,UAAU,GAAG,KAAK,CAAC;QAEzB,IAAM,QAAQ,GAAG,cAAc;YAC3B,QAAQ,CAAC,YAAY,KAAK,QAAQ,CAAC,QAAQ;YAC3C,QAAQ,CAAC,WAAW,KAAK,QAAQ,CAAC,OAAO;YACzC,QAAQ,CAAC,OAAO,CAAC,IAAI,KAAK,OAAO,CAAC;QACtC,IAAI,SAAS,CAAC;QACd,IAAI,cAAc,CAAC;QAEnB,IAAI,QAAQ,EAAE;YACZ,IAAM,SAAS,GACX,QAAQ,CAAC,QAAQ,GAAG,QAAQ,CAAC,OAAO,GAAG,QAAQ,CAAC,UAAU,CAAC;YAC/D,SAAS,GAAG,OAAO,CAAC;gBAClB,MAAM,EAAE,EAAC,CAAC,GAAA,EAAC;gBACX,OAAO,SAAA;gBACP,KAAK,EAAE,EAAC,KAAK,EAAE,CAAC,CAAC,EAAE,QAAQ,CAAC,SAAS,EAAE,SAAS,CAAC,EAAC;aACnD,CAAC,CAAC;YACH,cAAc,GAAG,OAAO,CAAC;gBACvB,MAAM,EAAE,EAAC,CAAC,EAAE,MAAM,EAAC;gBACnB,OAAO,SAAA;gBACP,KAAK,EAAE,EAAC,KAAK,EAAE,CAAC,CAAC,EAAE,SAAS,EAAE,QAAQ,CAAC,WAAW,CAAC,EAAC;aACrD,CAAC,CAAC;SACJ;aAAM;YACL,SAAS,GAAG,OAAO,CAAC;gBAClB,MAAM,EAAE,EAAC,CAAC,GAAA,EAAC;gBACX,OAAO,SAAA;gBACP,KAAK,EAAE;oBACL,KAAK,EAAE,cAAc;wBACjB;4BACE,QAAQ,CAAC,SAAS,EAAE,QAAQ,CAAC,QAAQ,GAAG,QAAQ,CAAC,OAAO;4BACxD,QAAQ,CAAC,UAAU;yBACpB;wBACD;4BACE,QAAQ,CAAC,SAAS,EAAE,QAAQ,CAAC,UAAU;4BACvC,QAAQ,CAAC,QAAQ,GAAG,QAAQ,CAAC,OAAO;yBACrC;iBACN;aACF,CAAC,CAAC;YACH,cAAc,GAAG,OAAO,CAAC;gBACvB,MAAM,EAAE,EAAC,CAAC,EAAE,MAAM,EAAC;gBACnB,OAAO,SAAA;gBACP,KAAK,EAAE,EAAC,KAAK,EAAE,CAAC,CAAC,EAAE,QAAQ,CAAC,UAAU,EAAE,QAAQ,CAAC,WAAW,CAAC,EAAC;aAC/D,CAAC,CAAC;SACJ;QAED,IAAM,MAAM,GAAG,eAAe,CAAC;YAC7B,CAAC,EAAE,cAAc,GAAG,SAAS,GAAG,cAAc;YAC9C,CAAC,EAAE,cAAc,GAAG,cAAc,GAAG,SAAS;YAC9C,UAAU,YAAA;YACV,UAAU,YAAA;YACV,OAAO,SAAA;YACP,IAAI,MAAA;YACJ,UAAU,YAAA;YACV,sBAAsB,wBAAA;YACtB,cAAc,gBAAA;SACf,CAAC,CAAC;QACH,IAAM,GAAG,GAAG,OAAO,CACf,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,MAAM,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,KAAK,EAAE,QAAQ,CAAC,QAAQ,EAAC,EAAC,CAAC,CAAC;QAEvE,OAAO,CAAC,WAAW,CAAC,SAAS,CAAC,MAAM,CAAC,CAAC;QACtC,OAAO,CAAC,WAAW,CAAC,cAAc,CAAC,MAAM,CAAC,CAAC;QAC3C,OAAO,CAAC,WAAW,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC;QAEnC,OAAO,GAAG,CAAC;IACb,CAAC;IAED;IACA;IACA,SAAS,gBAAgB,CAAC,EASX;;YARb,QAAC,EACD,kBAAM,EACN,sBAAQ,EACR,oBAAO,EACP,YAAW,EAAX,gCAAW,EACX,8BAA6B,EAA7B,kDAA6B,EAC7B,sBAAkB,EAAlB,uCAAkB,EAClB,kBAAiB,EAAjB,sCAAiB;;;;;;;QASf,IAAA,kCAAW,EACX,oCAAY,EACZ,gCAAU,EACV,kCAAW,EACX,oCAAY,EACZ,0BAAO,EACP,4BAAQ,EACR,8BAAS,EACT,sCAAa,EACb,wCAAc,EACd,gCAAU,CACC;QAEb,IAAM,cAAc,GAAG,UAAU,KAAK,cAAc,CAAC;QAErD,IAAM,SAAS,GAAG,WAAW,GAAG,YAAY,GAAG,UAAU,CAAC;QAC1D,IAAM,OAAO,GAAG,SAAS,GAAG,QAAQ,CAAC;QACrC,IAAM,UAAU,GAAG,CAAC,OAAO,EAAE,SAAS,CAAC,CAAC;QACxC,IAAM,UAAU,GAAG,KAAK,CAAC;QACzB,IAAM,UAAU,GAAG,KAAK,CAAC;QAEzB,IAAM,aAAa,GAAiB,EAAE,CAAC;QAEvC,IAAM,SAAS,GACX,OAAO,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,GAAA,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,KAAK,EAAE,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,EAAC,EAAC,CAAC,CAAC;QACtE,IAAM,KAAK,GAAG,OAAO,CACjB,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,MAAM,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,KAAK,EAAE,CAAC,CAAC,EAAE,SAAS,EAAE,CAAC,CAAC,CAAC,EAAC,EAAC,CAAC,CAAC;QAExE,aAAa,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC;QAC9B,aAAa,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC;QAE1B,IAAM,aAAa,GAAG,IAAI,aAAa,CAAC,UAAU,EAAE,cAAc,CAAC,CAAC;QACpE,IAAM,UAAU,GAAG;YACjB,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,OAAO,CAAC,IAAI,EAAE,OAAO,CAAC,GAAG,CAAC,EAAC;YAClD,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,WAAW,EAAE,YAAY,CAAC,EAAC;YAClD,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,aAAa,EAAE,cAAc,CAAC,EAAC;YACtD,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,EAAC;YACjC,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,UAAU,GAAG,WAAW,CAAC,EAAC;YACjD,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,UAAU,CAAC,EAAC;SACpC,CAAC;QACF,IAAM,MAAM,GAAG,OAAO,CAAC,gBAAgB,CACnC,aAAa,EAAE,CAAC,SAAS,CAAC,EAAE,SAAS,CAAC,KAAK,EAAE,UAAU,CAAC,CAAC;QAC7D,IAAM,QAAQ,GAAG,OAAO,CAAC;YACvB,MAAM,EAAE,EAAC,CAAC,EAAE,MAAM,EAAC;YACnB,OAAO,SAAA;YACP,KAAK,EAAE,EAAC,KAAK,EAAE,CAAC,CAAC,EAAE,UAAU,CAAC,CAAC,CAAC,EAAE,UAAU,CAAC,CAAC,CAAC,CAAC,EAAC;SAClD,CAAC,CAAC;QACH,aAAa,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;QAC3B,aAAa,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC;QAC7B,IAAM,QAAQ,GAA6B,CAAC,CAAC,EAAE,UAAU,CAAC,CAAC,CAAC,EAAE,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC;QAC7E,IAAM,aAAa,GAAG,IAAI,mBAAmB,CACzC,QAAQ,EAAE,CAAC,CAAC,EAAE,OAAO,EAAE,QAAQ,CAAC,WAAW,CAAC,EAC5CD,MAAG,EAAE,CAAC,GAAG,CAAC,+BAA+B,CAAW,EAAE,IAAI,EAAE,IAAI,EAChE,UAAU,EAAE,UAAU,EAAE,IAAI,EAAE,UAAU,EAAE,sBAAsB,CAAC,CAAC;QACtE,IAAM,SAAS,GAAG,QAAQ,CAAC,CAAC,CAAC,CAAC;QAC9B,IAAM,QAAQ,GAAG,QAAQ,CAAC,CAAC,CAAC,CAAC;QAC7B,IAAM,SAAS,GAAG,QAAQ,CAAC,WAAW,CAAC;QACvC,IAAM,gBAAgB,GAAG;YACvB,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,SAAS,CAAC,EAAC,EAAE,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,SAAS,CAAC,EAAC;YACtE,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,EAAC;SAClC,CAAC;QACF,IAAM,MAAM,GAAiB,CAAC,QAAQ,EAAE,KAAK,CAAC,CAAC;QAC/C,IAAI,IAAI,EAAE;YACR,MAAM,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;SACnB;QACD,IAAI,sBAAsB,EAAE;YAC1B,MAAM,CAAC,IAAI,CAAC,sBAAsB,CAAC,CAAC;SACrC;QACD,IAAI,UAAU,KAAK,WAAW,EAAE;YAC9B,UAAU,CAAC,IAAI,CAAC,EAAC,IAAI,EAAE,SAAS,EAAE,IAAI,EAAE,CAAC,cAAc,CAAC,EAAC,CAAC,CAAC;YAC3D,aAAa,CAAC,QAAQ,IAAI,eAAe,CAAC;SAC3C;QACD,IAAM,MAAM,GAAe,OAAO,CAAC,gBAAgB,CAC/C,aAAa,EAAE,MAAM,EAAE,QAAQ,CAAC,KAAK,EAAE,gBAAgB,CAAC,CAAC;QAE7D,IAAM,QAAQ,GAAG,cAAc;YAC3B,CAAC,CAAC,EAAE,SAAS,EAAE,QAAQ,EAAE,QAAQ,CAAC,WAAW,CAAC;YAC9C,CAAC,CAAC,EAAE,QAAQ,CAAC,WAAW,EAAE,SAAS,EAAE,QAAQ,CAAC,CAAC;QACnD,IAAM,GAAG,GAAG,OAAO,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,MAAM,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,KAAK,EAAE,QAAQ,EAAC,EAAC,CAAC,CAAC;QAE9E,aAAa,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;;YAC3B,KAAgB,IAAA,kBAAAK,SAAA,aAAa,CAAA,4CAAA,uEAAE;gBAA1B,IAAM,CAAC,0BAAA;gBACV,OAAO,CAAC,WAAW,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC;aAC/B;;;;;;;;;QAED,OAAO,GAAG,CAAC;IACb,CAAC;aAEe,UAAU,CAAC,EASZ;YARb,QAAC,EACD,kBAAM,EACN,sBAAQ,EACR,oBAAO,EACP,YAAW,EAAX,gCAAW,EACX,8BAA6B,EAA7B,kDAA6B,EAC7B,sBAAkB,EAAlB,uCAAkB,EAClB,kBAAiB,EAAjB,sCAAiB;QAEjB,IAAM,OAAO,GAAG,IAAI,IAAI,IAAI,CAAC;QAC7B,IAAM,yBAAyB,GAAG,sBAAsB,IAAI,IAAI,CAAC;QACjE,IAAM,cAAc,GAAG,QAAQ,CAAC,UAAU,KAAK,cAAc,CAAC;QAC9D,IAAI,OAA+D,CAAC;QACpE,IAAM,QAAQ,GAAG,cAAc;YAC3B,QAAQ,CAAC,YAAY,KAAK,QAAQ,CAAC,QAAQ;YAC3C,QAAQ,CAAC,WAAW,KAAK,QAAQ,CAAC,OAAO;YACzC,QAAQ,CAAC,OAAO,CAAC,IAAI,KAAK,OAAO,CAAC;QACtC,IAAI,QAAQ;aACP,QAAQ,CAAC,YAAY,KAAK,CAAC,IAAI,QAAQ,CAAC,WAAW,KAAK,CAAC;gBACzD,QAAQ,CAAC,cAAc,KAAK,CAAC,IAAI,QAAQ,CAAC,aAAa,KAAK,CAAC;gBAC7D,QAAQ,CAAC,YAAY,KAAK,CAAC,IAAI,QAAQ,CAAC,WAAW,KAAK,CAAC;iBACxD,QAAQ,CAAC,OAAO,CAAC,IAAI,KAAK,MAAM;oBAChC,QAAQ,CAAC,OAAO,CAAC,IAAI,KAAK,OAAO,CAAC,CAAC,EAAE;YACzC,OAAO,cAAc,CAAC;gBACpB,CAAC,GAAA;gBACD,MAAM,QAAA;gBACN,QAAQ,UAAA;gBACR,OAAO,SAAA;gBACP,IAAI,MAAA;gBACJ,UAAU,YAAA;gBACV,sBAAsB,wBAAA;gBACtB,cAAc,gBAAA;aACf,CAAC,CAAC;SACJ;QAED,IAAIL,MAAG,EAAE,CAAC,OAAO,CAAC,oCAAoC,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE;YAC3EC,OAAI,CAAC,MAAM,CAAC,cAAc,EAAE,cAAM,OAAA,6BAA6B,GAAA,CAAC,CAAC;YACjE,OAAO,gBAAgB,CAAC;gBACtB,CAAC,GAAA;gBACD,MAAM,QAAA;gBACN,QAAQ,UAAA;gBACR,OAAO,SAAA;gBACP,IAAI,MAAA;gBACJ,sBAAsB,wBAAA;gBACtB,cAAc,gBAAA;gBACd,UAAU,YAAA;aACX,CAAC,CAAC;SACJ;QACD,IAAM,QAAQ,GAAGD,MAAG,EAAE,CAAC,OAAO,CAAC,yBAAyB,CAAC,CAAC;QAC1D,IAAM,OAAO,GACT,CAAC,QAAQ,CAAC,UAAU,GAAG,CAAC,KAAK,CAAC;aAC5B,QAAQ,CAAC,UAAU,KAAK,CAAC,IAAI,QAAQ,CAAC,OAAO,CAAC,IAAI,KAAK,OAAO,CAAC;YACjE,QAAQ,CAAC,WAAW,GAAG,CAAC,KAAK,CAAC,IAAI,cAAc,CAAC;QAErD,IAAM,OAAO,GAAG,CAAC,QAAQ,CAAC,OAAO,CAAC,GAAG,EAAE,QAAQ,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC;QAC9D,IAAM,UAAU,GAAG;YACjB,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,YAAY,EAAE,QAAQ,CAAC,WAAW,CAAC,EAAC;YACpE,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,WAAM,OAAO,CAAC,EAAC;YACnC,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,YAAY,EAAE,QAAQ,CAAC,WAAW,CAAC,EAAC;YACpE,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,cAAc,EAAE,QAAQ,CAAC,aAAa,CAAC,EAAC;SACzE,CAAC;QACF,IAAI,QAAQ,EAAE;YACZC,OAAI,CAAC,MAAM,CAAC,cAAc,EAAE,cAAM,OAAA,6BAA6B,GAAA,CAAC,CAAC;;YAEjE,OAAO,GAAG,IAAI,kBAAkB,CAC5B,QAAQ,EAAE,OAAO,EAAE,UAAU,EAAE,yBAAyB,CAAC,CAAC;SAC/D;aAAM;YACL,IAAI,OAAO,EAAE;gBACX,OAAO,GAAG,IAAI,mBAAmB,CAC7B,QAAQ,EAAE,OAAO,EAAE,UAAU,EAAE,yBAAyB,CAAC,CAAC;aAC/D;iBAAM;gBACL,OAAO,GAAG,IAAI,eAAe,CACzB,QAAQ,EAAE,OAAO,EAAE,UAAU,EAAE,yBAAyB,CAAC,CAAC;aAC/D;YAED,IAAM,SAAS,GAAG,QAAQ,CAAC,SAAS,GAAG,QAAQ,CAAC,QAAQ,CAAC;YACzD,IAAM,SAAS,GAAG,QAAQ,CAAC,WAAW,CAAC;YACvC,IAAM,QAAQ,GACV,QAAQ,CAAC,YAAY,GAAG,QAAQ,CAAC,WAAW,GAAG,QAAQ,CAAC,UAAU,CAAC;YACvE,UAAU,CAAC,IAAI,CACX,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,SAAS,CAAC,EAAC,EAAE,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,SAAS,CAAC,EAAC,EACtE,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,EAAC,CAAC,CAAC;SACxC;QAED,IAAM,QAAQ,GAAiB,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC;QAC3C,IAAI,OAAO,EAAE;YACX,QAAQ,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;SACrB;QACD,IAAI,yBAAyB,EAAE;YAC7B,QAAQ,CAAC,IAAI,CAAC,sBAAsB,CAAC,CAAC;SACvC;QACD,IAAI,UAAU,KAAK,WAAW,EAAE;YAC9B,UAAU,CAAC,IAAI,CAAC,EAAC,IAAI,EAAE,SAAS,EAAE,IAAI,EAAE,CAAC,cAAc,CAAC,EAAC,CAAC,CAAC;YAC3D,OAAO,CAAC,QAAQ,IAAI,eAAe,CAAC;SACrC;QACD,OAAO,OAAO,CAAC,gBAAgB,CAAC,OAAO,EAAE,QAAQ,EAAE,CAAC,CAAC,KAAK,EAAE,UAAU,CAAC,CAAC;IAC1E;;ICrUA;;;;;;;;;;;;;;;;aAuBgB,MAAM,CAClB,IAAwE;QACnE,IAAA,oBAAM,EAAE,kBAAK,EAAE,sBAAO,CAAS;QAC/B,IAAA,YAAC,EAAE,sBAAM,CAAW;QACpB,IAAA,uBAAO,EAAE,eAAG,EAAE,6BAAU,EAAE,2BAAS,EAAE,uCAAe,CAAU;QACrE,IAAM,WAAW,GAAGC,eAAY,CAAC,uBAAuB,CAAC,UAAU,CAAC,CAAC;QACrE,IAAM,QAAQ,GAAGA,eAAY,CAAC,iBAAiB,CAC3C,CAAC,CAAC,KAAyC,EAC3C,MAAM,CAAC,KAAyC,EAAE,OAAO,EAAE,SAAS,EAAE,GAAG,EACzE,eAAe,EAAE,KAAK,kBAAkB,WAAW,CAAC,CAAC;QACzD,OAAO,UAAU,CAAC,EAAC,CAAC,GAAA,EAAE,MAAM,QAAA,EAAE,QAAQ,UAAA,EAAE,OAAO,SAAA,EAAC,CAAC,CAAC;IACpD,CAAC;IAEM,IAAM,YAAY,GAAiB;QACxC,UAAU,EAAEkC,SAAM;QAClB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,MAA0B;KACvC;;ICxCD;;;;;;;;;;;;;;;;IAuBA;QAWE,iCAAY,QAAiC;YAN7C,kBAAa,GAAG,CAAC,GAAG,EAAE,GAAG,CAAC,CAAC;YAC3B,aAAQ,GACJ,0IAA0I,CAAC;YAK7I,IAAI,CAAC,WAAW,GAAG,QAAQ,CAAC,OAAO,CAAC;YAEpCnC,OAAI,CAAC,MAAM,CACP,QAAQ,CAAC,UAAU,KAAK,cAAc,EACtC,cAAM,OAAA,6BAA6B,GAAA,CAAC,CAAC;YACzC,IAAI,CAAC,cAAc,GAAG,EAAC,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,EAAC,CAAC;YAClD,IAAI,CAAC,aAAa;gBACd,6BAA6B,CAAC,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,CAAC,CAAC;YACzE,IAAI,CAAC,iBAAiB;gBAClB,6BAA6B,CAAC,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,CAAC,CAAC;YAEzE,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,EACzD,IAAI,CAAC,iBAAiB,CAAC,CAAC;YAE5B,IAAI,CAAC,SAAS,GAAG,sBAAoB,IAAI,CAAC,iBAAmB,CAAC;SAC/D;QAED,6CAAW,GAAX;YACE,IAAM,YAAY,GACd,sBAAsB,CAAC,IAAI,CAAC,iBAAiB,EAAE,IAAI,CAAC,aAAa,CAAC,CAAC;YAEvE,IAAM,YAAY,GAAG,sxBAmBmC,CAAC;YAEzD,IAAM,OAAO,GAAG,uEACZ,YAAY,6BAEJ,CAAC;YAEb,IAAM,QAAQ,GAAG,uHAGb,OAAO,k/BA4BT,YAAY,SACf,CAAC;YACA,OAAO,QAAQ,CAAC;SACjB;sCACF;KAAA;;ICtHD;;;;;;;;;;;;;;;;IAuBA;QAYE,+BAAY,QAAiC;YAX7C,kBAAa,GAAG,CAAC,IAAI,EAAE,GAAG,CAAC,CAAC;YAC5B,aAAQ,GACJ,wFAAwF,CAAC;YAK7F,kBAAa,GAA6B,CAAC,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YAErD,SAAI,GAAG,IAAI,CAAC;YAGV,IAAI,CAAC,WAAW,GAAG,QAAQ,CAAC,OAAO,CAAC;YACpC,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;YAC3D,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,CAAC,CAAC;YAC/D,IAAI,CAAC,cAAc,GAAG,QAAQ,CAAC,UAAU,KAAK,cAAc,CAAC;YAC7D,IAAI,CAAC,SAAS,GAAG,oBAAkB,IAAI,CAAC,cAAgB,CAAC;SAC1D;QAED,2CAAW,GAAX;YACE,IAAM,MAAM,GAAG,IAAI,CAAC,cAAc,GAAG,CAAC,GAAG,CAAC,CAAC;YAC3C,IAAM,MAAM,GAAG,IAAI,CAAC,cAAc,GAAG,CAAC,GAAG,CAAC,CAAC;YAC3C,IAAM,UAAU,GAAG,IAAI,CAAC,cAAc,GAAG,CAAC,GAAG,CAAC,CAAC;YAC/C,OAAO,WACL,iCAAiC,EAAE,wJAIf,UAAU,sDAEM,MAAM,mBACxC,MAAM,koCA0BM,IAAI,CAAC,cAAc,qfAgBpC,CAAC;SACD;oCACF;KAAA;;ICpGD;;;;;;;;;;;;;;;;aAuBgB,mBAAmB,CAAC,IAInC;QACQ,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,cAAE,EAAE,sBAAM,CAAW;QACrB,IAAA,6BAAU,EAAE,uBAAO,EAAE,eAAG,EAAE,6BAAU,EAAE,uCAAe,CAAU;QAEtE,IAAM,WAAW,GAAGC,eAAY,CAAC,uBAAuB,CAAC,UAAU,CAAC,CAAC;QACrE,IAAM,QAAQ,GAAGA,eAAY,CAAC,iBAAiB,CAC3C,UAAU,EAAE,MAAM,CAAC,KAAyC,EAAE,OAAO,EACrE,CAAC,kBAAkB,GAAG,EAAE,eAAe,EAAE,KAAK,EAAE,WAAW,CAAC,CAAC;QAEjE,IAAM,UAAU,GAAG;YACjB,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,YAAY,EAAE,QAAQ,CAAC,WAAW,CAAC,EAAC;YACpE;gBACE,IAAI,EAAE,OAAO;gBACb,IAAI,EAAE;oBACJ,QAAQ,CAAC,YAAY,GAAG,CAAC,GAAG,QAAQ,CAAC,OAAO,CAAC,GAAG;oBAChD,QAAQ,CAAC,WAAW,GAAG,CAAC,GAAG,QAAQ,CAAC,OAAO,CAAC,IAAI;iBACjD;aACF;YACD,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,YAAY,EAAE,QAAQ,CAAC,WAAW,CAAC,EAAC;YACpE;gBACE,IAAI,EAAE,OAAO;gBACb,IAAI,EAAE;oBACJ,QAAQ,CAAC,SAAS,EAAE,QAAQ,CAAC,SAAS,EAAE,QAAQ,CAAC,QAAQ;oBACzD,QAAQ,CAAC,WAAW;iBACrB;aACF;SACF,CAAC;QACF,IAAI,OAAsD,CAAC;QAC3D,IAAIF,MAAG,EAAE,CAAC,OAAO,CAAC,mCAAmC,CAAC,EAAE;;YAEtD,OAAO,GAAG,IAAI,qBAAqB,CAAC,QAAQ,CAAC,CAAC;SAC/C;aAAM;YACL,OAAO,GAAG,IAAI,uBAAuB,CAAC,QAAQ,CAAC,CAAC;YAChD,IAAM,SAAS,GAAG,QAAQ,CAAC,OAAO,CAAC,CAAC,CAAC,GAAG,QAAQ,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC;YAC5D,IAAM,SAAS,GAAG,QAAQ,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC;YACtC,IAAM,QAAQ,GACV,QAAQ,CAAC,YAAY,GAAG,QAAQ,CAAC,WAAW,GAAG,QAAQ,CAAC,WAAW,CAAC;YACxE,UAAU,CAAC,IAAI,CACX,EAAC,IAAI,EAAE,QAAQ,EAAE,IAAI,EAAE,CAAC,SAAS,CAAC,EAAC,EACnC,EAAC,IAAI,EAAE,QAAQ,EAAE,IAAI,EAAE,CAAC,SAAS,CAAC,EAAC,EACnC,EAAC,IAAI,EAAE,QAAQ,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,EAAC,CAAC,CAAC;SACzC;QACD,OAAO,OAAO,CAAC,gBAAgB,CAAC,OAAO,EAAE,CAAC,EAAE,EAAE,MAAM,CAAC,EAAE,SAAS,EAAE,UAAU,CAAC,CAAC;IAChF,CAAC;IAEM,IAAM,yBAAyB,GAAiB;QACrD,UAAU,EAAEqC,sBAAmB;QAC/B,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,mBAAuC;KACpD;;IC7ED;;;;;;;;;;;;;;;;IAuBO,IAAM,GAAG,GAAG,eAAe,CAAC,EAAC,MAAM,EAAE,WAAW,CAAC,GAAG,EAAC,CAAC,CAAC;IAEvD,IAAM,SAAS,GAAiB;QACrC,UAAU,EAAEC,MAAG;QACf,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,GAAG;KAChB;;IC7BD;;;;;;;;;;;;;;;;IAuBO,IAAM,IAAI,GAAG,eAAe,CAAC,EAAC,MAAM,EAAE,WAAW,CAAC,IAAI,EAAC,CAAC,CAAC;IAEzD,IAAM,UAAU,GAAiB;QACtC,UAAU,EAAEC,OAAI;QAChB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,IAAI;KACjB;;ICRD;QAaE,8BACI,QAAgB,EAAE,QAA0B,EAAE,QAA0B,EACxE,MAA4B;YAVhC,kBAAa,GAAG,CAAC,OAAO,EAAE,OAAO,EAAE,QAAQ,CAAC,CAAC;YAC7C,aAAQ,GAAG,2BAA2B,CAAC;YACvC,kBAAa,GAA6B,CAAC,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YAIrD,SAAI,GAAG,IAAI,CAAC;YAKJ,IAAA,wBAAuB,EAAtB,gBAAsB,CAAC;YAC9B,IAAI,CAAC,WAAW,GAAG,CAAC,QAAQ,EAAE,QAAQ,CAAC,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC;YAClE,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;YAC3D,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,CAAC,CAAC;YAE/D,IAAI,CAAC,QAAQ,GAAG,MAAM,KAAK,UAAU,GAAG,CAAC,GAAG,CAAC,CAAC;YAC9C,IAAI,CAAC,qBAAqB,GAAG,IAAI,CAAC,WAAW,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC;YACrD,IAAI,CAAC,oBAAoB,GAAG,IAAI,CAAC,WAAW,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC;YACpD,IAAI,CAAC,SAAS,GAAG,mBAAiB,IAAI,CAAC,QAAQ,SAC3C,IAAI,CAAC,qBAAqB,SAAI,IAAI,CAAC,oBAAsB,CAAC;SAC/D;QAED,0CAAW,GAAX;YACQ,IAAA,sFACoE,EADnE,wBAAgB,EAAE,uBACiD,CAAC;YAErE,IAAA;;;;;;;;;;qBAUD,EAVE,mBAAW,EAAE,mBAAW,EAAE,WAU5B,CAAC;YACA,IAAA;;;;;;;;;;qBAUD,EAVE,kBAAU,EAAE,kBAAU,EAAE,WAU1B,CAAC;;;;YAKN,IAAM,QAAQ,GAAG,aACb,iCAAiC,EAAE,8HAGV,WAAW,0CACZ,UAAU,ieAeb,WAAW,qCACZ,UAAU,8BACjB,GAAG,4CACW,gBAAgB,gIAI9B,GAAG,4CACW,eAAe,+KAKrC,IAAI,CAAC,QAAQ,gqCAuBrB,CAAC;YACF,OAAO,QAAQ,CAAC;SACjB;mCACF;KAAA;;IC3ID;;;;;;;;;;;;;;;;IAsBO,IAAM,aAAa,GAAG,UAAC,IAI7B;QACQ,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,oBAAK,EAAE,oBAAK,EAAE,sBAAM,CAAW;QAC/B,IAAA,yBAAQ,EAAE,qBAAM,EAAE,6CAAkB,CAAU;QAErD,IAAM,OAAO,GAAG,IAAI,oBAAoB,CACpC,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,KAAK,CAAC,KAAyB,EAAE,QAAQ,EAAE,MAAM,CAAC,CAAC;QACvE,IAAM,WAAW,GAAG,CAAC,EAAC,IAAI,EAAE,SAAS,EAAE,IAAI,EAAE,CAAC,kBAAkB,CAAC,EAAC,CAAC,CAAC;QACpE,OAAO,OAAO,CAAC,gBAAgB,CAC3B,OAAO,EAAE,CAAC,KAAK,EAAE,KAAK,EAAE,MAAM,CAAC,EAAE,SAAS,EAAE,WAAW,CAAC,CAAC;IAC/D,CAAC,CAAC;IAEK,IAAM,mBAAmB,GAAiB;QAC/C,UAAU,EAAEC,gBAAa;QACzB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,aAAiC;KAC9C;;IC1CD;;;;;;;;;;;;;;;;IAqBA,IAAY,SAGX;IAHD,WAAY,SAAS;QACnB,uBAAU,CAAA;QACV,sBAAS,CAAA;IACX,CAAC,EAHW,SAAS,KAAT,SAAS,QAGpB;IAED;QAcE,oBACI,EAAa,EAAE,KAAe,EAAE,SAAkB,EAAE,OAAgB;YAVxE,kBAAa,GAAG,CAAC,GAAG,CAAC,CAAC;;YAGtB,aAAQ,GAAG,cAAc,CAAC;YAC1B,SAAI,GAAG,IAAI,CAAC;YAOV,IAAM,cAAc,GAAG,GAAG,CAAC;YAC3B,IAAI,CAAC,aAAa,GAAG,CAAC,cAAc,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YAC5C,IAAI,CAAC,WAAW,GAAG,KAAK,CAAC;YACzB,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;YAC3D,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,CAAC,CAAC;YAC/D,IAAI,CAAC,SAAS,GAAG,SAAS,CAAC;YAC3B,IAAI,CAAC,OAAO,GAAG,OAAO,CAAC;YACvB,IAAI,CAAC,EAAE,GAAG,EAAE,CAAC;YACb,IAAI,CAAC,SAAS,GAAG,SAAO,IAAI,CAAC,EAAE,SAAI,IAAI,CAAC,SAAS,SAAI,IAAI,CAAC,OAAS,CAAC;SACrE;QAED,gCAAW,GAAX;YACE,IAAM,IAAI,GAAG,IAAI,CAAC,WAAW,CAAC,MAAM,CAAC;YACrC,IAAM,OAAO,GAAG,IAAI,CAAC,EAAE,KAAK,SAAS,CAAC,IAAI,GAAG,KAAK,GAAG,KAAK,CAAC;YAC3D,IAAM,GAAG,GAAG,IAAI,CAAC,SAAS,GAAG,OAAO;gBACP,UAAQ,SAAS,CAAC,IAAI,EAAE,QAAQ,EAAE,IAAI,CAAC,EAAE,CAAC,MAAG,CAAC;YAC3E,IAAM,MAAM,GAAG,IAAI,CAAC,WAAW,CAAC,IAAI,CAAC,WAAW,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC;YAC7D,IAAI,SAAS,GAAG,EAAE,CAAC;YACnB,IAAI,SAAS,GAAG,EAAE,CAAC;;;;YAInB,IAAI,IAAI,CAAC,SAAS,EAAE;gBAClB,SAAS,GAAG,IAAI,CAAC,OAAO,GAAG,aAAU,MAAM,GAAG,CAAC,CAAE,GAAG,UAAU,CAAC;gBAC/D,SAAS,GAAG,IAAI,CAAC,OAAO,GAAG,SAAS,GAAG,SAAS,CAAC;aAClD;iBAAM;gBACL,SAAS,GAAG,IAAI,CAAC,OAAO,GAAG,kBAAgB,MAAQ,GAAG,aAAa,CAAC;gBACpE,SAAS,IAAI,IAAI,CAAC,OAAO,GAAG,YAAY,GAAG,YAAY,CAAC,CAAC;aAC1D;YACD,OAAO,aACH,iCAAiC,EAAE,sHAItB,aAAa,CAAC,IAAI,EAAE,QAAQ,EAAE,IAAI,CAAC,EAAE,CAAC,8BACtC,GAAG,4EAET,SAAS,kCACD,SAAS,sBACnB,aAAa,CAAC,IAAI,EAAE,QAAQ,EAAE,IAAI,CAAC,EAAE,CAAC,gCAClC,IAAI,CAAC,EAAE,eAAU,SAAS,CAAC,IAAI,EAAE,QAAQ,EAAE,IAAI,CAAC,EAAE,CAAC,oFAK/D,CAAC;SACH;yBACF;KAAA,IAAA;IAED,SAAS,SAAS,CAAC,IAAY,EAAE,IAAY,EAAE,EAAa;QAC1D,IAAI,IAAI,KAAK,CAAC,EAAE;YACd,OAAO,KAAG,IAAM,CAAC;SAClB;aAAM,IAAI,IAAI,KAAK,CAAC,EAAE;YACrB,OAAU,IAAI,YAAO,IAAI,OAAI,CAAC;SAC/B;aAAM,IAAI,IAAI,KAAK,CAAC,EAAE;YACrB,OAAU,IAAI,YAAO,IAAI,YAAO,IAAI,OAAI,CAAC;SAC1C;aAAM,IAAI,IAAI,KAAK,CAAC,EAAE;YACrB,OAAU,IAAI,YAAO,IAAI,YAAO,IAAI,YAAO,IAAI,OAAI,CAAC;SACrD;aAAM;YACL,MAAM,KAAK,CAAC,gBAAc,EAAE,kBAAa,IAAI,0BAAuB,CAAC,CAAC;SACvE;IACH,CAAC;IAED,SAAS,aAAa,CAAC,IAAY,EAAE,IAAY,EAAE,EAAa;QAC9D,IAAI,IAAI,KAAK,CAAC,EAAE;YACd,OAAO,KAAG,IAAM,CAAC;SAClB;aAAM,IAAI,IAAI,KAAK,CAAC,EAAE;YACrB,OAAU,IAAI,OAAI,CAAC;SACpB;aAAM,IAAI,IAAI,KAAK,CAAC,EAAE;YACrB,OAAU,IAAI,OAAI,CAAC;SACpB;aAAM,IAAI,IAAI,KAAK,CAAC,EAAE;YACrB,OAAU,IAAI,OAAI,CAAC;SACpB;aAAM;YACL,MAAM,KAAK,CAAC,gBAAc,EAAE,kBAAa,IAAI,0BAAuB,CAAC,CAAC;SACvE;IACH;;ICtHA;;;;;;;;;;;;;;;;aAyBgB,OAAO,CACnB,EAAa,EAAE,CAAa,EAAE,OAAsB,EAAE,IAAY,EAClE,SAAkB,EAAE,OAAgB;QACtC,IAAM,KAAK,GAAG,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC;QAC7B,IAAM,WAAW,GAAGtC,eAAY,CAAC,kBAAkB,CAAC,CAAC,IAAI,CAAC,EAAE,KAAK,CAAC,CAAC;QACnE,IAAI,SAAS,GAAG,CAAC,CAAC;QAClB,IAAI,WAAW,IAAI,IAAI,EAAE;YACvB,SAAS,GAAG,SAAS,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,GAAA,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,IAAI,EAAE,WAAW,EAAC,EAAC,CAAC,CAAC;SAC3E;QACD,IAAM,YAAY,GAAGA,eAAY,CAAC,gBAAgB,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC;QAEhE,IAAI,YAAY,KAAK,KAAK,GAAG,CAAC,EAAE;YAC9B,MAAM,IAAI,KAAK,CACX,uDACI,CAAC,CAAC,KAAK,CAAC,MAAM,GAAG,CAAC,OAAG;iBACzB,kBAAgB,IAAM,CAAA,CAAC,CAAC;SAC7B;QACD,IAAM,IAAI,GAAG,SAAS,CAAC,KAAK,CAAC,YAAY,CAAC,CAAC;QAC3C,IAAI,MAAM,GAAG,QAAQ,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,SAAS,EAAC,EAAE,OAAO,SAAA,EAAC,CAAC,CAAC;;;;;QAMzD,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,IAAI,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,EAAE,EAAE;YACxD,IAAM,OAAO,GAAG,IAAI,UAAU,CAAC,EAAE,EAAE,SAAS,CAAC,KAAK,EAAE,KAAK,EAAE,OAAO,CAAC,CAAC;YACpE,IAAM,UAAU,GAAG,MAAM,CAAC;YAC1B,IAAM,WAAW,GAAG,CAAC,EAAC,IAAI,EAAE,SAAS,EAAE,IAAI,EAAE,CAAC,CAAC,CAAC,EAAC,CAAC,CAAC;YACnD,MAAM;gBACF,OAAO,CAAC,gBAAgB,CAAC,OAAO,EAAE,CAAC,MAAM,CAAC,EAAE,MAAM,CAAC,KAAK,EAAE,WAAW,CAAC,CAAC;YAC3E,OAAO,CAAC,WAAW,CAAC,UAAU,CAAC,MAAM,CAAC,CAAC;SACxC;;;QAGD,IAAI,SAAS,EAAE;YACb,IAAM,OAAO,GAAG,IAAI,UAAU,CAAC,EAAE,EAAE,SAAS,CAAC,KAAK,EAAE,SAAS,EAAE,OAAO,CAAC,CAAC;YACxE,IAAM,UAAU,GAAG,MAAM,CAAC;YAC1B,IAAM,WAAW,GAAG,CAAC,EAAC,IAAI,EAAE,SAAS,EAAE,IAAI,EAAE,CAAC,CAAC,CAAC,EAAC,CAAC,CAAC;YACnD,MAAM;gBACF,OAAO,CAAC,gBAAgB,CAAC,OAAO,EAAE,CAAC,MAAM,CAAC,EAAE,MAAM,CAAC,KAAK,EAAE,WAAW,CAAC,CAAC;YAC3E,OAAO,CAAC,WAAW,CAAC,UAAU,CAAC,MAAM,CAAC,CAAC;SACxC;QAED,IAAI,WAAW,IAAI,IAAI,EAAE;YACvB,IAAM,kBAAkB,GAAGA,eAAY,CAAC,sBAAsB,CAAC,WAAW,CAAC,CAAC;YAC5E,IAAM,uBAAuB,GAAG,SAAS,CACrC,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,MAAM,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,IAAI,EAAE,kBAAkB,EAAC,EAAC,CAAC,CAAC;YAEvE,OAAO,CAAC,WAAW,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC;YACnC,OAAO,CAAC,WAAW,CAAC,SAAS,CAAC,MAAM,CAAC,CAAC;YAEtC,OAAO,uBAAuB,CAAC;SAChC;QAED,OAAO,MAAM,CAAC;IAChB;;IChFA;;;;;;;;;;;;;;;;aAuBgB,OAAO,CACnB,IAA0E;QAErE,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,YAAC,CAAW;QACZ,IAAA,iBAAI,EAAE,2BAAS,EAAE,uBAAO,CAAU;QACzC,OAAO,OAAO,CAAC,SAAS,CAAC,IAAI,EAAE,CAAC,EAAE,OAAO,EAAE,IAAI,EAAE,SAAS,EAAE,OAAO,CAAC,CAAC;IACvE,CAAC;IAEM,IAAM,aAAa,GAAiB;QACzC,UAAU,EAAEuC,UAAO;QACnB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,OAA2B;KACxC;;ICpCD;;;;;;;;;;;;;;;;aAuBgB,MAAM,CAClB,IAAwE;QAEnE,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,YAAC,CAAW;QACZ,IAAA,iBAAI,EAAE,2BAAS,EAAE,uBAAO,CAAU;QACzC,OAAO,OAAO,CAAC,SAAS,CAAC,GAAG,EAAE,CAAC,EAAE,OAAO,EAAE,IAAI,EAAE,SAAS,EAAE,OAAO,CAAC,CAAC;IACtE,CAAC;IAEM,IAAM,YAAY,GAAiB;QACxC,UAAU,EAAEC,SAAM;QAClB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,MAA0B;KACvC;;ICpCD;;;;;;;;;;;;;;;;IAqBA;QAWE,6BAAY,WAAqB,EAAE,UAAyB;YAV5D,kBAAa,GAAG,CAAC,GAAG,CAAC,CAAC;YAMtB,kBAAa,GAA6B,CAAC,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YACrD,SAAI,GAAG,IAAI,CAAC;YACZ,aAAQ,GAAG,kBAAkB,CAAC;YAG5B,IAAI,CAAC,WAAW,GAAG,WAAW,CAAC;YAC/B,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;YAC3D,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,CAAC,CAAC;YAC/D,IAAI,CAAC,SAAS,GAAG,kBAAgB,UAAY,CAAC;YAC9C,IAAI,CAAC,UAAU,GAAG,UAAU,CAAC;SAC9B;QAED,yCAAW,GAAX;YACE,IAAM,QAAQ,GAAG,aACb,iCAAiC,EAAE,mJAIvB,IAAI,CAAC,oBAAoB,EAAE,6BAC3B,IAAI,CAAC,mBAAmB,EAAE,6BAC1B,IAAI,CAAC,mBAAmB,EAAE,gSAOhC,IAAI,CAAC,kBAAkB,EAAE,qEAGjB,IAAI,CAAC,sBAAsB,EAAE,mEAG3C,CAAC;YACL,OAAO,QAAQ,CAAC;SACjB;QAEO,kDAAoB,GAApB;YACN,IAAI,IAAI,CAAC,UAAU,KAAK,MAAM,EAAE;gBAC9B,OAAO,WAAW,CAAC;aACpB;iBAAM;gBACL,OAAO,WAAW,CAAC;aACpB;SACF;QAEO,iDAAmB,GAAnB;YACN,IAAI,IAAI,CAAC,UAAU,KAAK,MAAM,EAAE;gBAC9B,OAAO,WAAW,CAAC;aACpB;iBAAM;gBACL,OAAO,WAAW,CAAC;aACpB;SACF;QAEO,iDAAmB,GAAnB;YACN,IAAI,IAAI,CAAC,UAAU,KAAK,MAAM,EAAE;gBAC9B,OAAO,WAAW,CAAC;aACpB;iBAAM;gBACL,OAAO,WAAW,CAAC;aACpB;SACF;QAEO,gDAAkB,GAAlB;YACN,IAAI,IAAI,CAAC,UAAU,KAAK,MAAM,EAAE;gBAC9B,OAAO,sBAAsB,CAAC;aAC/B;iBAAM;gBACL,OAAO,sBAAsB,CAAC;aAC/B;SACF;QAEO,oDAAsB,GAAtB;YACN,IAAI,IAAI,CAAC,UAAU,KAAK,MAAM,EAAE;gBAC9B,OAAO,2BAA2B,CAAC;aACpC;iBAAM;gBACL,OAAO,2BAA2B,CAAC;aACpC;SACF;kCACF;KAAA;;ICzGD;;;;;;;;;;;;;;;;aAsBgB,YAAY,CAAC,IAI5B;QACQ,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,YAAC,CAAW;QACZ,IAAA,2BAAS,EAAE,6BAAU,CAAU;QAEtC,IAAM,SAAS,GAAG,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC;QAC7B,IAAM,WAAW,GAAG,CAAC,UAAU,KAAK,MAAM,IAAI,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC;QACtE,IAAM,UAAU,GAAG,CAAC,UAAU,KAAK,MAAM,IAAI,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC;QACrE,IAAM,UAAU,GAAG,CAAC,UAAU,KAAK,MAAM,IAAI,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC;QAErE,IAAM,YAAY,GAAG,WAAW,GAAG,SAAS,CAAC;QAC7C,IAAM,WAAW,GAAG,UAAU,GAAG,SAAS,CAAC;QAC3C,IAAM,WAAW,GAAG,UAAU,IAAI,SAAS,GAAG,SAAS,CAAC,CAAC;QAEzD,IAAM,WAAW,GAAG,CAAC,UAAU,KAAK,MAAM;YACtC,CAAC,SAAS,EAAE,YAAY,EAAE,WAAW,EAAE,WAAW,CAAC;YACnD,CAAC,SAAS,EAAE,WAAW,EAAE,YAAY,EAAE,WAAW,CAAC,CAAC;QAExD,IAAM,WAAW,GAAG;YAClB,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,SAAS,CAAC,EAAC;SACnC,CAAC;QAEF,IAAM,OAAO,GAAG,IAAI,mBAAmB,CAAC,WAAW,EAAE,UAAU,CAAC,CAAC;QACjE,OAAO,OAAO,CAAC,gBAAgB,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,KAAK,EAAE,WAAW,CAAC,CAAC;IACtE,CAAC;IAEM,IAAM,kBAAkB,GAAiB;QAC9C,UAAU,EAAEC,eAAY;QACxB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,YAAgC;KAC7C;;ICxDD;;;;;;;;;;;;;;;;IAwBA;QAeE,mCACI,QAAiC,EAAE,OAAe,EAClD,UAA0C,EAAE,kBAA0B;YADnC,wBAAA,EAAA,eAAe;YAClD,2BAAA,EAAA,iBAA0C;YAAE,mCAAA,EAAA,0BAA0B;YAZ1E,kBAAa,GAAG,CAAC,GAAG,EAAE,GAAG,CAAC,CAAC;YAC3B,aAAQ,GACJ,gFAAgF,CAAC;YACrF,kBAAa,GAA6B,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YAKpD,WAAM,GAAG,IAAI,CAAC;YAKZ,IAAI,CAAC,WAAW,GAAG,QAAQ,CAAC,QAAQ,CAAC;YACrC,IAAI,CAAC,cAAc,GAAG,EAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,EAAC,CAAC;YAClD,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,EAAE,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;YAE1E1C,OAAI,CAAC,MAAM,CACP,QAAQ,CAAC,UAAU,KAAK,cAAc,EACtC,cAAM,OAAA,6BAA6B,GAAA,CAAC,CAAC;YAEzC,IAAI,OAAO,EAAE;gBACX,IAAI,CAAC,aAAa,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;aACjC;YACD,IAAI,kBAAkB,EAAE;gBACtB,IAAI,CAAC,aAAa,CAAC,IAAI,CAAC,wBAAwB,CAAC,CAAC;aACnD;YAED,IAAI,CAAC,QAAQ,GAAG,QAAQ,CAAC;YACzB,IAAI,CAAC,OAAO,GAAG,OAAO,CAAC;YACvB,IAAI,CAAC,UAAU,GAAG,UAAU,CAAC;YAC7B,IAAI,CAAC,kBAAkB,GAAG,kBAAkB,CAAC;YAE7C,IAAI,CAAC,SAAS,GAAG,kBAAgB,UAAY,CAAC;SAC/C;QAED,+CAAW,GAAX;YACE,IAAI,iBAAiB,GAAG,EAAE,EAAE,sBAAsB,GAAG,EAAE,CAAC;YACxD,IAAI,IAAI,CAAC,UAAU,EAAE;gBACnB,IAAM,YAAY,GACd,4BAA4B,CAAC,IAAI,CAAC,UAAU,EAAE,IAAI,CAAC,MAAM,CAAC,CAAC;gBAC/D,IAAI,IAAI,CAAC,kBAAkB,EAAE;oBAC3B,iBAAiB;wBACb,wJAEA,YAAY,gBACd,CAAC;iBACJ;qBAAM;oBACL,iBAAiB,GAAG,8FAEd,YAAY,4BAEjB,CAAC;iBACH;gBAED,sBAAsB,GAAG,8CAA8C,CAAC;aACzE;YAED,IAAM,cAAc,GAAG,IAAI,CAAC,OAAO;gBAC/B,0DAA0D;gBAC1D,EAAE,CAAC;YAEP,IAAM,QAAQ,GAAG,aACb,iBAAiB,kBAEjB,sBAAsB,EAAE,unEAwDlB,cAAc,sBACd,sBAAsB,oIAK/B,CAAC;YACF,OAAO,QAAQ,CAAC;SACjB;wCACF;KAAA;;IChKD;;;;;;;;;;;;;;;;IAwBA;QAgBE,gCACI,QAAiC,EAAE,OAAe,EAClD,UAA0C,EAAE,kBAA0B;YADnC,wBAAA,EAAA,eAAe;YAClD,2BAAA,EAAA,iBAA0C;YAAE,mCAAA,EAAA,0BAA0B;YAb1E,kBAAa,GAAG,CAAC,GAAG,EAAE,GAAG,CAAC,CAAC;YAC3B,aAAQ,GAAG,uJAEW,CAAC;;YAEvB,kBAAa,GAA6B,CAAC,GAAG,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YASpD,IAAI,CAAC,WAAW,GAAG,QAAQ,CAAC,QAAQ,CAAC;YACrC,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;YAC3D,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,CAAC,CAAC;YAE/DA,OAAI,CAAC,MAAM,CACP,QAAQ,CAAC,UAAU,KAAK,cAAc,EACtC,cAAM,OAAA,6BAA6B,GAAA,CAAC,CAAC;YAEzC,IAAI,OAAO,EAAE;gBACX,IAAI,CAAC,aAAa,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;aACjC;YACD,IAAI,kBAAkB,EAAE;gBACtB,IAAI,CAAC,aAAa,CAAC,IAAI,CAAC,wBAAwB,CAAC,CAAC;aACnD;YAED,IAAI,CAAC,QAAQ,GAAG,QAAQ,CAAC;YACzB,IAAI,CAAC,OAAO,GAAG,OAAO,CAAC;YACvB,IAAI,CAAC,UAAU,GAAG,UAAU,CAAC;YAC7B,IAAI,CAAC,kBAAkB,GAAG,kBAAkB,CAAC;YAC7C,IAAI,CAAC,SAAS,GAAG,eAAa,IAAI,CAAC,UAAY,CAAC;SACjD;QAED,4CAAW,GAAX;YACE,IAAI,iBAAiB,GAAG,EAAE,EAAE,sBAAsB,GAAG,EAAE,CAAC;YACxD,IAAI,IAAI,CAAC,UAAU,EAAE;gBACnB,IAAM,YAAY,GAAG,4BAA4B,CAAC,IAAI,CAAC,UAAU,EAAE,KAAK,CAAC,CAAC;gBAC1E,IAAI,IAAI,CAAC,kBAAkB,EAAE;oBAC3B,iBAAiB;wBACb,4IAEA,YAAY,gBACd,CAAC;iBACJ;qBAAM;oBACL,iBAAiB,GAAG,oFAEd,YAAY,4BAEjB,CAAC;iBACH;gBAED,sBAAsB,GAAG,wCAAwC,CAAC;aACnE;YAED,IAAM,cAAc,GAAG,IAAI,CAAC,OAAO;gBAC/B,oDAAoD;gBACpD,EAAE,CAAC;YAEP,IAAM,QAAQ,GAAG,aACb,iBAAiB,qTAUjB,mBAAmB,EAAE,wwEA0DnB,cAAc,kBACd,sBAAsB,oFAG3B,CAAC;YACF,OAAO,QAAQ,CAAC;SACjB;qCACF;KAAA;;ICvKD;;;;;;;;;;;;;;;;aAuBgB,qBAAqB,CAAC,IAIrC;QACQ,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,YAAC,EAAE,sBAAM,CAAW;QACpB,IAAA,uBAAO,EAAE,eAAG,EAAE,2BAAS,EAAE,uCAAe,CAAU;QAEzD,IAAI,UAAU,GAAG,SAAS,CAAC;QAC3B,IAAI,UAAU,IAAI,IAAI,EAAE;YACtB,UAAU,GAAG,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC;SACrB;QAED,IAAM,QAAQ,GAAGC,eAAY,CAAC,iBAAiB,CAC3C,CAAC,CAAC,KAAyC,EAC3C,MAAM,CAAC,KAAyC,EAAE,OAAO,EAAE,UAAU,EACrE,GAAG,EAAE,eAAe,EAAE,IAAI,iBAAiB,CAAC;QAEhD,IAAM,UAAU,GAAG;YACjB,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,OAAO,CAAC,GAAG,EAAE,QAAQ,CAAC,OAAO,CAAC,IAAI,CAAC,EAAC;YACpE,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,YAAY,EAAE,QAAQ,CAAC,WAAW,CAAC,EAAC;YACpE,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,cAAc,EAAE,QAAQ,CAAC,aAAa,CAAC,EAAC;YACxE,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,QAAQ,EAAE,QAAQ,CAAC,OAAO,CAAC,EAAC;SAC7D,CAAC;QAEF,IAAI,OAAyD,CAAC;;;QAG9D,IAAI,QAAQ,CAAC,SAAS,KAAK,CAAC,IAAI,QAAQ,CAAC,QAAQ,KAAK,QAAQ,CAAC,SAAS;YACpE,QAAQ,CAAC,OAAO,KAAK,QAAQ,CAAC,QAAQ,IAAI,QAAQ,CAAC,YAAY,KAAK,CAAC;YACrE,QAAQ,CAAC,WAAW,KAAK,CAAC;YAC1B,QAAQ,CAAC,YAAY,KAAK,QAAQ,CAAC,WAAW;YAC9C,QAAQ,CAAC,UAAU,KAAK,QAAQ,CAAC,WAAW;YAC5C,QAAQ,CAAC,cAAc,KAAK,CAAC,IAAI,QAAQ,CAAC,aAAa,KAAK,CAAC;YAC7D,QAAQ,CAAC,YAAY,KAAK,CAAC,IAAI,QAAQ,CAAC,UAAU,GAAG,CAAC,KAAK,CAAC,EAAE;YAChE,OAAO,GAAG,IAAI,yBAAyB,CAAC,QAAQ,CAAC,CAAC;SACnD;aAAM;YACL,OAAO,GAAG,IAAI,sBAAsB,CAAC,QAAQ,CAAC,CAAC;YAC/C,UAAU,CAAC,IAAI,CACX,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,YAAY,CAAC,EAAC,EAC9C,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,WAAW,CAAC,EAAC,EAC7C,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,WAAW,GAAG,QAAQ,CAAC,UAAU,CAAC,EAAC,CAAC,CAAC;SAC1E;QAED,OAAO,OAAO,CAAC,gBAAgB,CAAC,OAAO,EAAE,CAAC,CAAC,EAAE,MAAM,CAAC,EAAE,CAAC,CAAC,KAAK,EAAE,UAAU,CAAC,CAAC;IAC7E,CAAC;IAEM,IAAM,2BAA2B,GAAiB;QACvD,UAAU,EAAE0C,wBAAqB;QACjC,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,qBAAyC;KACtD;;IC3ED;;;;;;;;;;;;;;;;IAsBO,IAAM,kBAAkB,GAAG,gBAAgB,CAAC;QACjD,SAAS,EAAE,YAAY,CAAC,GAAG;QAC3B,aAAa,EAAEC,eAAW;QAC1B,eAAe,EAAE,IAAI;KACtB,CAAC,CAAC;IAEI,IAAM,cAAc,GAAiB;QAC1C,UAAU,EAAEC,WAAQ;QACpB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,kBAAkB;KAC/B;;ICTD;QAYE,uBACI,UAAmC,EACnC,UAA2C;YAT/C,kBAAa,GAA6B,CAAC,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YACrD,kBAAa,GAAG,CAAC,GAAG,CAAC,CAAC;YACtB,aAAQ,GAAG,mBAAmB,CAAC;YAG/B,SAAI,GAAG,IAAI,CAAC;YAKV,IAAI,CAAC,UAAU,GAAG,CAAC,UAAU,CAAC,SAAS,EAAE,UAAU,CAAC,MAAM,CAAC,CAAC;YACtD,IAAA,+EAC0D,EADzD,mBACyD,CAAC;YACjE,IAAI,CAAC,WAAW,GAAG,WAAW,CAAC,MAAM,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG,WAAW,CAAC;YAEhE,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;;;YAG3D,IAAI,CAAC,QAAQ;gBACT,eAAe,CAAC,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;YAEtE,IAAI,CAAC,UAAU,GAAG,UAAU,CAAC;YAC7B,IAAI,CAAC,SAAS,GAAG,YAAU,UAAY,CAAC;SACzC;QAED,mCAAW,GAAX;YACE,IAAI,QAAQ,GAAG,EAAE,CAAC;YAClB,IAAI,SAAS,GAAG,KAAK,CAAC;YACtB,IAAI,IAAI,CAAC,UAAU,KAAK,KAAK,IAAI,IAAI,CAAC,UAAU,KAAK,KAAK,EAAE;gBAC1D,QAAQ,GAAG,kIAIP,IAAI,CAAC,UAAU,KAAK,KAAK,GAAG,GAAG,GAAG,GAAG,yDACR,CAAC;gBAClC,SAAS,GAAG,gBAAgB,CAAC;aAC9B;iBAAM,IAAI,IAAI,CAAC,UAAU,KAAK,KAAK,IAAI,IAAI,CAAC,UAAU,KAAK,MAAM,EAAE;gBAClE,QAAQ,GAAG,sCAAsC,CAAC;aACnD;iBAAM,IAAI,IAAI,CAAC,UAAU,KAAK,MAAM,EAAE;gBACrC,QAAQ,GAAG,sCAAsC,CAAC;gBAClD,SAAS,GAAG,KAAK,CAAC;aACnB;YAED,IAAM,aAAa,GAAG,IAAI,CAAC,UAAU,KAAK,MAAM;;gBAE5C,sEAAsE;gBACtE,2CAA2C,CAAC;YAEhD,IAAM,mBAAmB,GAAG,wDACmB,IAAI,CAAC,aAAa,CAAC,CAAC,CAAC,gBAChE,CAAC;YAEL,IAAM,QAAQ,GAAG,8GAKZ,mBAAmB,gJAIpB,IAAI,CAAC,WAAW,CAAC,MAAM,KAAK,CAAC;gBACzB,cAAc;gBACd,iBAAiB,6EAGpB,iCAAiC,EAAE,2IAGjB,SAAS,sTAMvB,QAAQ,+bAWP,QAAQ,oOAQV,aAAa,iCAGnB,CAAC;YACH,OAAO,QAAQ,CAAC;SACjB;4BACF;KAAA;;aCpGe,MAAM,CAClB,CAAa,EAAE,IAAqB,EAAE,QAAiB,EACvD,UAAuB,EAAE,OAAsB;QACjD,IAAM,KAAK,GAAG,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC;QAC7B,IAAM,SAAS,GAAG,EAAE,CAAC;QAErB,IAAM,QAAQ,GAAG7C,OAAI,CAAC,cAAc,CAAC,IAAI,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC;QACpD,IAAI,IAAI,GAAG,QAAQ,CAAC;QACpB,IAAM,YAAY,GAAGC,eAAY,CAAC,kBAAkB,CAAC,IAAI,EAAE,KAAK,CAAC,CAAC;QAElE,IAAI,KAAK,GAAG,CAAC,CAAC;QACd,IAAI,YAAY,IAAI,IAAI,EAAE;YACxB,KAAK,GAAG,SAAS,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,GAAA,EAAC,EAAE,KAAK,EAAE,EAAC,IAAI,EAAE,YAAY,EAAC,EAAE,OAAO,SAAA,EAAC,CAAC,CAAC;YACvE,IAAI,GAAGA,eAAY,CAAC,gBAAgB,CAAC,IAAI,CAAC,MAAM,EAAE,KAAK,CAAC,CAAC;YACzD,SAAS,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC;SACvB;QAEDA,eAAY,CAAC,0BAA0B,CAAC,UAAU,EAAE,IAAI,EAAE,KAAK,CAAC,CAAC;QAE3D,IAAA,4EACuD,EADtD,sBAAc,EAAE,mBACsC,CAAC;QAC9D,IAAI,WAAW,GAAG,cAAc,CAAC;QACjC,IAAI,QAAQ,EAAE;;YAEZ,WAAW,GAAGA,eAAY,CAAC,oBAAoB,CAAC,cAAc,EAAE,QAAQ,CAAC,CAAC;SAC3E;QAED,IAAI,GAAG,CAAC;QACR,IAAI,CAAC,UAAU,KAAK,KAAK,IAAI,UAAU,KAAK,MAAM;YAC9C,OAAO,CAAC,kBAAkB,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE;YACvC,IAAM,KAAK,GAAG,OAAO,CAAC,SAAS,CAAC,GAAG,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,MAAoB,CAAC;YACvE,QAAQ,UAAU;gBAChB,KAAK,KAAK;oBACR,IAAM,SAAS,GAAG,UAAU,CACxB,KAAK,EAAED,OAAI,CAAC,aAAa,CAAC,WAAW,CAAC,EAAE,WAAW,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC;oBAClE,GAAG,GAAG,OAAO,CAAC,cAAc,CAAC,WAAW,EAAE,CAAC,CAAC,KAAK,EAAE,SAAS,CAAC,CAAC;oBAC9D,MAAM;gBACR,KAAK,MAAM;oBACH,IAAA,uDACgD,EAD/C,oBAAO,EAAE,sBAAQ,EAAE,sBAC4B,CAAC;oBACvD,GAAG,GAAG,OAAO,CAAC,cAAc,CAAC,QAAQ,EAAE,QAAQ,EAAE,OAAO,CAAC,CAAC;oBAC1D,MAAM;gBACR;oBACE,MAAM,IAAI,KAAK,CACR,UAAU,8CAA2C,CAAC,CAAC;aACjE;SACF;aAAM;YACL,IAAM,MAAM,GAAGA,OAAI,CAAC,aAAa,CAAC,WAAW,CAAC,CAAC;YAC/C,IAAM,KAAK,GAAGA,OAAI,CAAC,aAAa,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC;YAC9C,IAAM,SAAS,GAAG,KAAK,GAAG,MAAM,CAAC;YAEjC,IAAM,UAAU,GAAG,EAAC,UAAU,EAAE,MAAM,EAAE,MAAM,QAAA,EAAE,SAAS,WAAA,EAAE,OAAO,EAAE,CAAC,EAAC,CAAC;YACvE,IAAM,KAAK,GAAG,UAAU,KAAK,MAAM,GAAG,SAAS,GAAG8C,aAAU,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC;YACtE,IAAM,WAAW,GAAG;gBAClB,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,MAAM,CAAC,EAAC;aAChC,CAAC;YACF,IAAM,OAAO,GAAG,IAAI,aAAa,CAAC,UAAU,EAAE,UAAU,CAAC,CAAC;YAC1D,IAAM,OAAO,GACT,OAAO,CAAC,gBAAgB,CAAC,OAAO,EAAE,CAAC,KAAK,CAAC,EAAE,KAAK,EAAE,WAAW,CAAC,CAAC;YACnE,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;YAExB,GAAG,GAAG,OAAO,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,OAAO,EAAC,EAAE,KAAK,EAAE,EAAC,KAAK,EAAE,WAAW,EAAC,EAAE,OAAO,SAAA,EAAC,CAAC,CAAC;SAC7E;QAED,SAAS,CAAC,OAAO,CAAC,UAAA,CAAC,IAAI,OAAA,OAAO,CAAC,WAAW,CAAC,CAAC,CAAC,MAAM,CAAC,GAAA,CAAC,CAAC;QAEtD,OAAO,GAAG,CAAC;IACb;;IC/FA;;;;;;;;;;;;;;;;aAsBgB,GAAG,CACf,IAAkE;QAE7D,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,YAAC,CAAW;QACZ,IAAA,iBAAI,EAAE,yBAAQ,CAAU;QAE/B,OAAO,MAAM,CAAC,CAAC,EAAE,IAAI,EAAE,QAAQ,EAAE,KAAK,EAAE,OAAO,CAAC,CAAC;IACnD,CAAC;IAEM,IAAM,SAAS,GAAiB;QACrC,UAAU,EAAEC,MAAG;QACf,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,GAAuB;KACpC;;aCVe,MAAM,CAClB,IAAwE;;QAEnE,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,yBAAQ,CAAU;QACzB,IAAM,OAAO,GAAG,MAAkB,CAAC;QAE7B,IAAA,mEACyD,EADxD,oBAAO,EAAE,0BAAU,EAAE,kBACmC,CAAC;QAChE9C,eAAY,CAAC,mBAAmB,CAAC,OAAO,CAAC,MAAM,EAAE,MAAM,EAAE,OAAO,CAAC,CAAC;QAC5D,IAAA,6DAAqE,EAApE,cAAI,EAAE,gBAA8D,CAAC;QAE5E,IAAM,MAAM,GAAG,KAAK,CAAC,MAAM,CAAC;QAC5B,IAAI,GAAG,GAAoB,IAAI,CAAC;QAChC,IAAI,gBAAgB,GAAG,OAAO,CAAC,MAAM,CAAC;QACtC,IAAM,gBAAgB,GAAiB,EAAE,CAAC;QAC1C,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,MAAM,EAAE,EAAE,CAAC,EAAE;;gBAC/B,KAAqB,IAAA,oBAAAG,SAAA,KAAK,CAAC,CAAC,CAAC,CAAA,CAAA,gBAAA,4BAAE;oBAA1B,IAAM,MAAM,WAAA;oBACT,IAAA,2EACiE,EADhE,4BAAwB,EAAE,4BACsC,CAAC;oBACxE,IAAI,CAAa,SAAA,CAAC;oBAClB,IAAIH,eAAY,CAAC,qBAAqB,CAAC,IAAI,CAAC,EAAE;wBAC5C,CAAC,GAAG,OAAO,CAAC,MAAM,CAAC,CAAC;qBACrB;yBAAM;wBACL,CAAC,GAAG,SAAS,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,OAAO,CAAC,MAAM,CAAC,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,IAAI,MAAA,EAAC,EAAC,CAAC,CAAC;wBACtE,gBAAgB,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;qBAC1B;oBACD,IAAM,WAAW,GAAa,CAAC,CAAC,KAAK,CAAC,KAAK,EAAE,CAAC;oBAC9C,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,YAAY,CAAC,MAAM,EAAE,EAAE,CAAC,EAAE;wBAC5C,WAAW,CAAC,MAAM,CAAC,YAAY,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;qBAC3C;oBAED,IAAI,CAACD,OAAI,CAAC,WAAW,CAAC,CAAC,CAAC,KAAK,EAAE,WAAW,CAAC,EAAE;wBAC3C,CAAC,GAAG,OAAO,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,GAAA,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,KAAK,EAAE,WAAW,EAAC,EAAC,CAAC,CAAC;wBACjE,gBAAgB,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;qBAC1B;oBACD,IAAI,GAAG,KAAK,IAAI,EAAE;wBAChB,GAAG,GAAG,CAAC,CAAC;qBACT;yBAAM;;wBAEL,GAAG;4BACC,kBAAkB,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,GAAG,EAAC,EAAE,OAAO,SAAA,EAAC,CAAe,CAAC;wBACxE,gBAAgB,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC;qBAC5B;iBACF;;;;;;;;;YACD,IAAI,CAAC,GAAG,MAAM,GAAG,CAAC,EAAE;gBAClB,IAAI,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE;oBAChB,GAAG,GAAG,GAAG,CAAC;wBACR,MAAM,EAAE,EAAC,CAAC,EAAE,GAAG,EAAC;wBAChB,OAAO,SAAA;wBACP,KAAK,EAAE;4BACL,IAAI,EAAE,IAAI,CAAC,CAAC,CAAC,IAAI,OAAO,CAAC,MAAM,GAAG,gBAAgB,CAAC;4BACnD,QAAQ,EAAE,KAAK;yBAChB;qBACF,CAAC,CAAC;oBACH,gBAAgB,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC;iBAC5B;gBACD,gBAAgB,EAAE,CAAC;aACpB;SACF;;;YAGD,KAAyB,IAAA,qBAAAI,SAAA,gBAAgB,CAAA,kDAAA,gFAAE;gBAAtC,IAAM,UAAU,6BAAA;gBACnB,IAAI,UAAU,KAAK,GAAG,EAAE;oBACtB,SAAS;iBACV;gBACD,OAAO,CAAC,WAAW,CAAC,UAAU,CAAC,MAAM,CAAC,CAAC;aACxC;;;;;;;;;QAED,OAAO,GAAG,CAAC;IACb,CAAC;IAEM,IAAM,YAAY,GAAiB;QACxC,UAAU,EAAE4C,SAAM;QAClB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,MAA0B;KACvC;;ICtGD;;;;;;;;;;;;;;;;IAqBO,IAAM,GAAG,GAAG,eAAe,CAAC,EAAC,MAAM,EAAE,WAAW,CAAC,GAAG,EAAC,CAAC,CAAC;IAEvD,IAAM,SAAS,GAAiB;QACrC,UAAU,EAAEC,MAAG;QACf,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,GAAG;KAChB;;IC3BD;;;;;;;;;;;;;;;;IAwBO,IAAM,KAAK,GAAG,gBAAgB,CACjC,EAAC,SAAS,EAAE,YAAY,CAAC,KAAK,EAAE,KAAK,EAAE,MAAM,EAAE,aAAa,EAAEC,YAAQ,EAAC,CAAC,CAAC;IAEtE,IAAM,WAAW,GAAiB;QACvC,UAAU,EAAEC,QAAK;QACjB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,KAAK;KAClB;;IC/BD;;;;;;;;;;;;;;;;IAsBO,IAAM,GAAG,GAAG,eAAe,CAAC;QACjC,MAAM,EAAE,WAAW,CAAC,GAAG;QACvB,aAAa,EAAE,UAAU;QACzB,KAAK,EAAE,SAAS;KACjB,CAAC,CAAC;IAEI,IAAM,SAAS,GAAiB;QACrC,UAAU,EAAEC,MAAG;QACf,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,GAAG;KAChB;;IChCD;;;;;;;;;;;;;;;;aAsBgB,UAAU,CAAC,IAI1B;QACQ,IAAA,oBAAM,EAAE,kBAAK,EAAE,sBAAO,CAAS;QAC/B,IAAA,eAAG,CAAU;QACb,IAAA,oBAAK,CAAW;QAEvB,IAAM,SAAS,GAAG,KAAK,CAAC,KAAK,CAAC,MAAM,CAAC;QACrC,IAAM,QAAQ,GAAG,KAAK,CAAC,KAAK,CAAC,KAAK,EAAE,CAAC;QACrC,IAAI,IAAI,GAAG,GAAG,CAAC;QACf,IAAI,GAAG,GAAG,CAAC,EAAE;;YAEXpD,OAAI,CAAC,MAAM,CACP,EAAE,SAAS,GAAG,CAAC,CAAC,IAAI,GAAG,EACvB,cAAM,OAAA,mCAAiC,EAAG,SAAS,GAAG,CAAC,CAAC,UACpD,SAAS,MAAG,GAAA,CAAC,CAAC;YACtB,IAAI,GAAG,SAAS,GAAG,GAAG,GAAG,CAAC,CAAC;SAC5B;QACD,QAAQ,CAAC,MAAM,CAAC,IAAI,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;QAE5B,OAAO,OAAO,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,KAAK,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,KAAK,EAAE,QAAQ,EAAC,EAAC,CAAC,CAAC;IAC1E,CAAC;IAEM,IAAM,gBAAgB,GAAiB;QAC5C,UAAU,EAAEqD,aAAU;QACtB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,UAA8B;KAC3C;;ICnDD;;;;;;;;;;;;;;;;IAsBO,IAAM,KAAK,GACd,eAAe,CAAC,EAAC,MAAM,EAAE,WAAW,CAAC,KAAK,EAAE,aAAa,EAAE,YAAY,EAAC,CAAC,CAAC;IAEvE,IAAM,WAAW,GAAiB;QACvC,UAAU,EAAEC,QAAK;QACjB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,KAAK;KAClB;;IC7BD;;;;;;;;;;;;;;;;IAqBA;QAUE,qBAAY,KAAe;YAT3B,kBAAa,GAAa,EAAE,CAAC;YAC7B,gBAAW,GAAa,EAAE,CAAC;YAI3B,aAAQ,GAAG,cAAc,CAAC;YAC1B,kBAAa,GAA6B,CAAC,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YACrD,SAAI,GAAG,IAAI,CAAC;YAGV,IAAI,CAAC,WAAW,GAAG,KAAK,CAAC;YACzB,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;YAC3D,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,CAAC,CAAC;YAE/D,IAAI,CAAC,SAAS,GAAG,MAAM,CAAC;SACzB;QAED,iCAAW,GAAX;YACE,IAAM,QAAQ,GAAG,WACf,iCAAiC,EAAE,+GAKtC,CAAC;YACA,OAAO,QAAQ,CAAC;SACjB;0BACF;KAAA;;IClDD;;;;;;;;;;;;;;;;aAsBgB,IAAI,CAAC,IAAgD;QAE5D,IAAA,sBAAO,EAAE,kBAAK,CAAS;QACvB,IAAA,mBAAK,EAAE,mBAAK,CAAU;QACxB,IAAA,mBAAK,CAAU;QAEpB,KAAK,GAAG,KAAK,IAAItD,OAAI,CAAC,UAAU,CAAC,KAAK,CAAC,CAAC;QAExC,IAAI,KAAK,KAAK,QAAQ,EAAE;;YAEtB,IAAM,MAAM,GAAGA,OAAI,CAAC,iBAAiB,CAAC,KAAK,EAAEA,OAAI,CAAC,aAAa,CAAC,KAAK,CAAC,CAAC,CAAC;YACxE,MAAM,CAAC,IAAI,CAAC,KAAe,CAAC,CAAC;YAC7B,OAAO,OAAO,CAAC,cAAc,CAAC,KAAK,EAAE,KAAK,EAAE,MAAM,CAAC,CAAC;SACrD;aAAM;YACL,IAAM,OAAO,GAAG,IAAI,WAAW,CAAC,KAAK,CAAC,CAAC;YACvC,IAAM,WAAW,GAAG,CAAC,EAAC,IAAI,EAAE,SAAS,EAAE,IAAI,EAAE,CAAC,KAAe,CAAC,EAAC,CAAC,CAAC;YACjE,OAAO,OAAO,CAAC,gBAAgB,CAAC,OAAO,EAAE,EAAE,EAAE,KAAK,EAAE,WAAW,CAAC,CAAC;SAClE;IACH,CAAC;IAEM,IAAM,UAAU,GAAiB;QACtC,UAAU,EAAEuD,OAAI;QAChB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,IAAwB;KACrC;;IC9CD;;;;;;;;;;;;;;;;IAsBA;QASE,8BAAY,UAA4C;YARxD,gBAAW,GAAa,EAAE,CAAC;YAI3B,kBAAa,GAAG,CAAC,GAAG,CAAC,CAAC;YACtB,kBAAa,GAA6B,CAAC,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YACrD,SAAI,GAAG,IAAI,CAAC;YAGV,IAAI,CAAC,WAAW,GAAG,UAAU,CAAC;YAC9B,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;YAC3D,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,CAAC,CAAC;YAC/D,IAAI,CAAC,SAAS,GAAG,eAAe,CAAC;SAClC;QAED,0CAAW,GAAX;YACE,IAAM,QAAQ,GAAG,aACb,iCAAiC,EAAE,iTAQtC,CAAC;YACF,OAAO,QAAQ,CAAC;SACjB;mCACF;KAAA;;ICpDD;;;;;;;;;;;;;;;;IAuBO,IAAM,mBAAmB,GAAiB;QAC7C,UAAU,EAAEC,gBAAa;QACzB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,UAAC,EAAiB;gBAAhB,kBAAM,EAAE,oBAAO;YACpB,IAAA,oBAAK,CAAkC;YAC9C,IAAM,aAAa,GAAG,OAAwB,CAAC;YAE/C,IAAM,OAAO,GAAG,IAAI,oBAAoB,CAAE,KAAkB,CAAC,KAAK,CAAC,CAAC;YACpE,IAAM,MAAM,GACR,aAAa,CAAC,gBAAgB,CAAC,OAAO,EAAE,CAAC,KAAK,CAAC,EAAE,KAAK,CAAC,KAAK,CAAC,CAAC;YAClE,OAAO,MAAM,CAAC;SACjB;KACF;;IClCD;;;;;;;;;;;;;;;;IAsBO,IAAM,KAAK,GACd,eAAe,CAAC,EAAC,MAAM,EAAE,WAAW,CAAC,KAAK,EAAE,aAAa,EAAE,YAAY,EAAC,CAAC,CAAC;IAEvE,IAAM,WAAW,GAAiB;QACvC,UAAU,EAAEC,QAAK;QACjB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,KAAK;KAClB;;IC9BD;;;;;;;;;;;;;;;;IAqBO,IAAM,QAAQ,GACjB,gBAAgB,CAAC,EAAC,SAAS,EAAE,YAAY,CAAC,OAAO,EAAE,KAAK,EAAE,OAAO,EAAC,CAAC,CAAC;IAEjE,IAAM,cAAc,GAAiB;QAC1C,UAAU,EAAEC,WAAQ;QACpB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,QAAQ;KACrB;;IC5BD;;;;;;;;;;;;;;;;IAqBA;QAYE,2BAAY,WAAqB,EAAE,SAAiB;YAAjB,0BAAA,EAAA,iBAAiB;YAXpD,gBAAW,GAAa,CAAC,CAAC,CAAC,CAAC;YAI5B,kBAAa,GAAa,EAAE,CAAC;YAE7B,kBAAa,GACT,CAAC,GAAG,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YAKd,IAAI,CAAC,WAAW,GAAG,WAAW,CAAC;YAC/B,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;YAC3D,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,CAAC,CAAC;YAE/D,IAAI,CAAC,SAAS,GAAG,SAAS,CAAC;YAC3B,IAAI,CAAC,SAAS,GAAG,gBAAc,IAAI,CAAC,SAAW,CAAC;SACjD;QAED,uCAAW,GAAX;YACE,IAAM,WAAW,GAAG,IAAI,CAAC,SAAS;gBAC9B,yCAAyC;gBACzC,2CAA2C,CAAC;YAChD,IAAM,WAAW,GAAG,IAAI,CAAC,SAAS,GAAG,kBAAkB,GAAG,iBAAiB,CAAC;YAC5E,OAAO,4CAC4B,WAAW,mBAE1C,iCAAiC,EAAE,qTAMhB,WAAW,2GAKnC,CAAC;SACD;gCACF;KAAA;;ICtCM,IAAM,gBAAgB,GAAiB;QAC5C,UAAU,EAAEC,aAAU;QACtB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,UAA8B;KAC3C,CAAC;IAEF,IAAI,mBAA6C,CAAC;aAElC,UAAU,CAAC,IAI1B;QACQ,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QACjC,IAAA,sBAAM,CAAW;QACf,IAAA,+BAAW,CAAU;QAE5B,IAAI,MAAM,IAAI,IAAI,EAAE;YAClB,MAAM,IAAI,KAAK,CAAC,0DAA0D,CAAC,CAAC;SAC7E;QAED,IAAM,OAAO,GAAG,QAAQ,gBAAgB,CAAC,KAAK,WAAW;YACrD,MAAM,YAAY,gBAAgB,CAAC;QACvC,IAAM,OAAO,GAAG,QAAQ,gBAAgB,CAAC,KAAK,WAAW;YACrD,MAAM,YAAY,gBAAgB,CAAC;QACvC,IAAM,QAAQ,GAAG,CAAC,QAAQ,iBAAiB,CAAC,KAAK,WAAW;YAC1C,MAAM,YAAY,iBAAiB;aAChD,QAAQ,eAAe,CAAC,KAAK,WAAW;gBACxC,MAAM,YAAY,eAAe,CAAC,CAAC;QACxC,IAAM,aAAa,GACf,QAAQ,WAAW,CAAC,KAAK,WAAW,IAAI,MAAM,YAAY,WAAW,CAAC;QAEpE,IAAA;;;;;6CAK2B,EAL1B,aAAK,EAAE,cAKmB,CAAC;QAClC,IAAM,QAAQ,GAAG,CAAC,MAAM,EAAE,KAAK,EAAE,WAAW,CAAC,CAAC;QAE9C,IAAI5D,MAAG,EAAE,CAAC,OAAO,CAAC,mBAAmB,CAAC,EAAE;YACtC,IAAI,OAAO,EAAE;gBACX,OAAO,uBAAuB,CAAC;oBAC7B,aAAa,EAAE,MAA0B;oBACzC,OAAO,SAAA;oBACP,KAAK,OAAA;oBACL,QAAQ,UAAA;oBACR,SAAS,EAAE,IAAI;iBAChB,CAAC,CAAC;aACJ;SACF;QAED,IAAI,OAAO,IAAI,OAAO,EAAE;YACtB,IAAI,mBAAmB,IAAI,IAAI,EAAE;gBAC/B,mBAAmB,GAAG,QAAQ,CAAC,aAAa,CAAC,QAAQ,CAAC,CAAC,UAAU,CAAC,IAAI,CAAC,CAAC;aACzE;YACD,mBAAmB,CAAC,MAAM,CAAC,KAAK,GAAG,KAAK,CAAC;YACzC,mBAAmB,CAAC,MAAM,CAAC,MAAM,GAAG,MAAM,CAAC;YAC3C,mBAAmB,CAAC,SAAS,CACzB,MAA6C,EAAE,CAAC,EAAE,CAAC,EAAE,KAAK,EAAE,MAAM,CAAC,CAAC;YACxE,MAAM,GAAG,mBAAmB,CAAC,MAAM,CAAC;SACrC;QAED,IAAI,aAAa,IAAI,QAAQ,IAAI,OAAO,IAAI,OAAO,EAAE;YACnD,OAAO,uBAAuB,CAAC;gBAC7B,aAAa,EAAE,MAAyC;gBACxD,OAAO,SAAA;gBACP,KAAK,OAAA;gBACL,QAAQ,UAAA;gBACR,SAAS,EAAE,KAAK;aACjB,CAAC,CAAC;SACJ;;;QAID,IAAM,SAAS,GAAI,MAA6C,CAAC,IAAI,CAAC;QACtE,IAAI,UAAU,GAAG,SAAS,CAAC;QAC3B,IAAI,WAAW,IAAI,IAAI,IAAI,WAAW,KAAK,CAAC,EAAE;YAC5C,UAAU,GAAG,IAAI,UAAU,CAAC,MAAM,CAAC,KAAK,GAAG,MAAM,CAAC,MAAM,GAAG,WAAW,CAAC,CAAC;YAExE,IAAM,UAAU,GAAG,SAAS,CAAC,MAAM,CAAC;YACpC,IAAI,CAAC,GAAG,CAAC,CAAC;YACV,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,UAAU,EAAE,CAAC,EAAE,EAAE;gBACnC,IAAI,CAAC,GAAG,CAAC,GAAG,WAAW,EAAE;oBACvB,UAAU,CAAC,CAAC,EAAE,CAAC,GAAG,SAAS,CAAC,CAAC,CAAC,CAAC;iBAChC;aACF;SACF;QAED,IAAM,MAAM,GAAG,OAAO,CAAC,cAAc,CAAC,QAAQ,EAAE,OAAO,CAAC,CAAC;QAEzD,IAAM,IAAI,GAAG,OAAO,CAAC,SAAS,CAAC,GAAG,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC;QAClD,IAAI,CAAC,MAAM,GAAG,IAAI,UAAU,CAAC,UAAU,CAAC,CAAC;QACzC,OAAO,CAAC,kBAAkB,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC;QAE1C,OAAO,CAAC,WAAW,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC;QACnC,OAAO,MAAM,CAAC;IAChB,CAAC;IAED,SAAS,uBAAuB,CAAC,IAMhC;QACQ,IAAA,kCAAa,EAAE,sBAAO,EAAE,kBAAK,EAAE,wBAAQ,EAAE,0BAAS,CAAS;QAC3D,IAAA,+BAAW,CAAU;QAE5B,IAAM,IAAI,GAAGC,OAAI,CAAC,aAAa,CAAC,QAAQ,CAAC,CAAC;QAC1C,IAAM,OAAO,GAAGA,OAAI,CAAC,cAAc,CAAC,QAAQ,CAAC,CAAC;QAC9C,IAAM,OAAO,GAAG,IAAI,iBAAiB,CAAC,QAAQ,EAAE,SAAS,CAAC,CAAC;QAE3D,IAAM,WAAW,GAAG;YAClB,EAAC,IAAI,EAAE,QAAQ,EAAE,IAAI,EAAE,CAAC,IAAI,CAAC,EAAC,EAAE,EAAC,IAAI,EAAE,QAAQ,EAAE,IAAI,EAAE,CAAC,WAAW,CAAC,EAAC;YACrE,EAAC,IAAI,EAAE,QAAQ,EAAE,IAAI,WAAM,OAAO,CAAC,EAAC;YACpC,EAAC,IAAI,EAAE,QAAQ,EAAE,IAAI,WAAM,OAAO,CAAC,QAAQ,CAAC,EAAC;SAC9C,CAAC;QAEF,IAAM,MAAM,GAAG,OAAO,CAAC,oBAAoB,CACvC,OAAO,EAAE,QAAQ,EAAE,WAAW,EAAE,SAAS,EAAE,aAAa,CAAC,CAAC;QAC9D,OAAO,MAAM,CAAC;IAChB;;ICpJA;;;;;;;;;;;;;;;;IAuBA;QAcE,0BACI,MAAgB,EAAE,SAAmB,EAAE,aAAuB,EAC9D,WAA0B,EAAE,UAAyB;YAVzD,aAAQ,GAAG,wBAAwB,CAAC;;YAEpC,kBAAa,GAA6B,CAAC,GAAG,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YAItD,SAAI,GAAG,IAAI,CAAC;YAKV,IAAI,CAAC,aAAa,GAAG,CAAC,GAAG,EAAE,MAAM,EAAE,UAAU,CAAC,CAAC;YAC/CC,eAAY,CAAC,0BAA0B,CAAC,MAAM,EAAE,SAAS,CAAC,CAAC;YAC3DA,eAAY,CAAC,0BAA0B,CAAC,MAAM,EAAE,aAAa,CAAC,CAAC;YAC/D,IAAI,CAAC,WAAW,GAAG,MAAM,CAAC;YAC1B,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;YAC3D,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,CAAC,CAAC;YAE/D,IAAI,WAAW,IAAI,IAAI,EAAE;gBACvBA,eAAY,CAAC,0BAA0B,CAAC,MAAM,EAAE,WAAW,CAAC,CAAC;gBAC7D,IAAI,CAAC,aAAa,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC;aACnC;YACD,IAAI,UAAU,IAAI,IAAI,EAAE;gBACtBA,eAAY,CAAC,0BAA0B,CAAC,MAAM,EAAE,UAAU,CAAC,CAAC;gBAC5D,IAAI,CAAC,aAAa,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;aAClC;YACD,IAAI,CAAC,WAAW,GAAG,WAAW,CAAC;YAC/B,IAAI,CAAC,UAAU,GAAG,UAAU,CAAC;YAC7B,IAAI,CAAC,SAAS,GAAG,WAAW,CAAC;SAC9B;QAED,sCAAW,GAAX;YACE,IAAI,aAAa,GAAG,KAAK,CAAC;YAC1B,IAAI,IAAI,CAAC,WAAW,IAAI,IAAI,EAAE;gBAC5B,aAAa,GAAG,+BAA+B,CAAC;aACjD;YAED,IAAI,YAAY,GAAG,KAAK,CAAC;YACzB,IAAI,IAAI,CAAC,UAAU,IAAI,IAAI,EAAE;gBAC3B,YAAY,GAAG,8BAA8B,CAAC;aAC/C;YAED,IAAM,QAAQ,GAAG,aACb,iCAAiC,EAAE,6PAMb,aAAa,sCACd,YAAY,wOAKtC,CAAC;YACA,OAAO,QAAQ,CAAC;SACjB;+BACF;KAAA;;ICxFD;;;;;;;;;;;;;;;;IAuBO,IAAM,oBAAoB,GAAiB;QAChD,UAAU,EAAE2D,iBAAc;QAC1B,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,UAAC,EAAwB;gBAAvB,kBAAM,EAAE,gBAAK,EAAE,oBAAO;YAC3B,IAAA,YAAC,EAAE,oBAAK,EAAE,sBAAM,EAAE,kBAAI,EAAE,0BAAQ,CAAmC;YACnE,IAAA,uCAAe,CAA4C;YAClE,IAAM,aAAa,GAAG,OAAwB,CAAC;YAC/C,IAAM,eAAe,GAAG,CAAC,CAAW,EAAE,IAAc,EAAE,QAAkB,CAAC,CAAC;YAC1E,IAAI,WAAW,GAAG,IAAI,CAAC;YACvB,IAAI,MAAM,IAAI,IAAI,EAAE;gBAClB,WAAW,GAAG,MAAM,CAAC,KAAK,CAAC;gBAC3B,eAAe,CAAC,IAAI,CAAC,MAAgB,CAAC,CAAC;aACxC;YACD,IAAI,UAAU,GAAG,IAAI,CAAC;YACtB,IAAI,KAAK,IAAI,IAAI,EAAE;gBACjB,UAAU,GAAG,KAAK,CAAC,KAAK,CAAC;gBACzB,eAAe,CAAC,IAAI,CAAC,KAAe,CAAC,CAAC;aACvC;YACD,IAAM,OAAO,GAAG,IAAI,gBAAgB,CAChC,CAAC,CAAC,KAAK,EAAE,IAAI,CAAC,KAAK,EAAE,QAAQ,CAAC,KAAK,EAAE,WAAW,EAAE,UAAU,CAAC,CAAC;YAClE,IAAM,WAAW,GAAG,CAAC,EAAC,IAAI,EAAE,SAAS,EAAE,IAAI,EAAE,CAAC,eAAe,CAAC,EAAC,CAAC,CAAC;YACjE,OAAO,aAAa,CAAC,gBAAgB,CACjC,OAAO,EAAE,eAAe,EAAE,CAAC,CAAC,KAAK,EAAE,WAAW,CAAC,CAAC;SACrD;KACF;;IC/CD;;;;;;;;;;;;;;;;aAuBgB,WAAW,CAAC,IAI3B;QACQ,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,YAAC,EAAE,sBAAM,EAAE,kBAAI,EAAE,sDAAsB,CAAW;QAEvD,IAAA,uBAAO,EACP,eAAG,EACH,6BAAU,EACV,2BAAS,EACT,uCAAe,EACf,6BAAU,EACV,qCAAc,CACN;QAEV,IAAM,WAAW,GAAG3D,eAAY,CAAC,uBAAuB,CAAC,UAAU,CAAC,CAAC;QACrE,IAAM,QAAQ,GAAGA,eAAY,CAAC,iBAAiB,CAC3C,CAAC,CAAC,KAAyC,EAC3C,MAAM,CAAC,KAAyC,EAAE,OAAO,EAAE,SAAS,EAAE,GAAG,EACzE,eAAe,EAAE,KAAK,kBAAkB,WAAW,CAAC,CAAC;QAEzD,OAAO,UAAU,CAAC,EAAC,CAAC,GAAA,EAAE,MAAM,QAAA,EAAE,QAAQ,UAAA,EAAE,OAAO,SAAA,EAAE,IAAI,MAAA,EAAE,sBAAsB,wBAAA;YACzE,cAAc,gBAAA,EAAE,UAAU,YAAA,EAAC,CAAC,CAAC;IACnC,CAAC;IAEM,IAAM,iBAAiB,GAAiB;QAC7C,UAAU,EAAE4D,cAAW;QACvB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,WAA+B;KAC5C;;ICtDD;;;;;;;;;;;;;;;;aAuBgB,oBAAoB,CAAC,IAIpC;QACQ,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,YAAC,EAAE,sBAAM,EAAE,kBAAI,EAAE,sDAAsB,CAAW;QAClD,IAAA,uBAAO,EAAE,eAAG,EAAE,2BAAS,EAAE,uCAAe,EAAE,6BAAU,EAAE,qCAAc,CACjE;QAEV,IAAI,UAAU,GAAG,SAAS,CAAC;QAC3B,IAAI,UAAU,IAAI,IAAI,EAAE;YACtB,UAAU,GAAG,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC;SACrB;QAED7D,OAAI,CAAC,MAAM,CACPC,eAAY,CAAC,8BAA8B,CAAC,OAAO,EAAE,UAAU,CAAC,EAChE,cAAM,OAAA,gEAAgE;aAClE,oBAAkB,OAAO,wBAAmB,UAAU,MAAG,CAAA,GAAA,CAAC,CAAC;QAEnE,IAAM,QAAQ,GAAGA,eAAY,CAAC,iBAAiB,CAC3C,CAAC,CAAC,KAAyC,EAC3C,MAAM,CAAC,KAAyC,EAAE,OAAO,EAAE,UAAU,EACrE,GAAG,EAAE,eAAe,EAAE,IAAI,iBAAiB,CAAC;QAEhD,IAAM,aAAa,GAAiB,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC;QAEhD,IAAM,OAAO,GAAG,IAAI,IAAI,IAAI,CAAC;QAC7B,IAAM,yBAAyB,GAAG,sBAAsB,IAAI,IAAI,CAAC;QAEjE,IAAI,OAAO,EAAE;YACX,aAAa,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;SAC1B;QACD,IAAI,yBAAyB,EAAE;YAC7B,aAAa,CAAC,IAAI,CAAC,sBAAsB,CAAC,CAAC;SAC5C;QAED,IAAM,UAAU,GAAG;YACjB,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,OAAO,CAAC,GAAG,EAAE,QAAQ,CAAC,OAAO,CAAC,IAAI,CAAC,EAAC;YACpE,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,YAAY,EAAE,QAAQ,CAAC,WAAW,CAAC,EAAC;YACpE,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,cAAc,EAAE,QAAQ,CAAC,aAAa,CAAC,EAAC;YACxE,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,QAAQ,EAAE,QAAQ,CAAC,OAAO,CAAC,EAAC;SAC7D,CAAC;QAEF,IAAI,OAAyD,CAAC;;;QAG9D,IAAI,QAAQ,CAAC,SAAS,KAAK,CAAC,IAAI,QAAQ,CAAC,QAAQ,KAAK,QAAQ,CAAC,SAAS;YACpE,QAAQ,CAAC,OAAO,KAAK,QAAQ,CAAC,QAAQ,IAAI,QAAQ,CAAC,YAAY,KAAK,CAAC;YACrE,QAAQ,CAAC,WAAW,KAAK,CAAC;YAC1B,QAAQ,CAAC,YAAY,KAAK,QAAQ,CAAC,WAAW;YAC9C,QAAQ,CAAC,UAAU,KAAK,QAAQ,CAAC,WAAW;YAC5C,QAAQ,CAAC,cAAc,KAAK,CAAC,IAAI,QAAQ,CAAC,aAAa,KAAK,CAAC;YAC7D,QAAQ,CAAC,YAAY,KAAK,CAAC,IAAI,QAAQ,CAAC,UAAU,GAAG,CAAC,KAAK,CAAC,EAAE;YAChE,OAAO,GAAG,IAAI,yBAAyB,CACnC,QAAQ,EAAE,OAAO,EAAE,UAAU,EAAE,yBAAyB,CAAC,CAAC;SAC/D;aAAM;YACL,OAAO,GAAG,IAAI,sBAAsB,CAChC,QAAQ,EAAE,OAAO,EAAE,UAAU,EAAE,yBAAyB,CAAC,CAAC;YAC9D,UAAU,CAAC,IAAI,CACX,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,YAAY,CAAC,EAAC,EAC9C,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,WAAW,CAAC,EAAC,EAC7C,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,WAAW,GAAG,QAAQ,CAAC,UAAU,CAAC,EAAC,CAAC,CAAC;SAC1E;QACD,IAAI,UAAU,KAAK,WAAW,EAAE;YAC9B,UAAU,CAAC,IAAI,CAAC,EAAC,IAAI,EAAE,SAAS,EAAE,IAAI,EAAE,CAAC,cAAc,CAAC,EAAC,CAAC,CAAC;YAC3D,OAAO,CAAC,QAAQ,IAAI,eAAe,CAAC;SACrC;QACD,IAAM,MAAM,GACR,OAAO,CAAC,gBAAgB,CAAC,OAAO,EAAE,aAAa,EAAE,SAAS,EAAE,UAAU,CAAC,CAAC;QAE5E,OAAO,MAAM,CAAC;IAChB,CAAC;IAEM,IAAM,0BAA0B,GAAiB;QACtD,UAAU,EAAE6D,uBAAoB;QAChC,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,oBAAwC;KACrD;;ICrGD;;;;;;;;;;;;;;;;IAqBA;QAUE,yBAAY,QAAgB,EAAE,KAAe;YAL7C,kBAAa,GAAa,CAAC,GAAG,EAAE,SAAS,CAAC,CAAC;YAE3C,kBAAa,GAA6B,CAAC,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YACrD,SAAI,GAAG,IAAI,CAAC;YAGV,IAAI,CAAC,WAAW,GAAG,KAAK,CAAC;YACzB,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;YAC3D,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,CAAC,CAAC;YAC/D,IAAI,CAAC,SAAS,GAAG,cAAY,QAAU,CAAC;YACxC,IAAI,CAAC,QAAQ,GAAG,QAAQ,CAAC;YACzB,IAAI,CAAC,QAAQ,GAAG,+BAA6B,iBAAiB,CAAC,QAAQ,CAAC,MAAG,CAAC;SAC7E;QAED,qCAAW,GAAX;YACE,IAAI,YAAY,CAAC;YACjB,IAAI,IAAI,CAAC,QAAQ,GAAG,CAAC,EAAE;gBACrB,YAAY,GAAG,qBAAqB,CAAC;aACtC;iBAAM;gBACL,YAAY,GAAG,kBAAkB,CAAC;aACnC;YACD,IAAM,QAAQ,GAAG,eACX,iCAAiC,EAAE,kSAMf,YAAY,sLAOnC,CAAC;YACJ,OAAO,QAAQ,CAAC;SACjB;8BACF;KAAA;;aCxCe,QAAQ,CACpB,IAAsD;QACjD,IAAA,oBAAM,EAAE,sBAAO,CAAS;QACxB,IAAA,sBAAM,EAAE,wBAAO,CAAW;QAEjC,IAAM,YAAY,GAAG,OAAO,CAAC,KAAK,CAAC;QACnC,IAAM,SAAS,GAAG,YAAY,CAAC,YAAY,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC;QACxD,IAAM,UAAU,GAAG9D,OAAI,CAAC,aAAa,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC;QAE9C,IAAA,mEAC8C,EAD7C,mBAAW,EAAE,iBAAS,EAAE,iBAAS,EAAE,eACU,CAAC;QAErD,IAAM,cAAc,GAAG,OAAO,CAC1B,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,OAAO,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,KAAK,EAAE,CAAC,SAAS,EAAE,SAAS,CAAC,EAAC,EAAC,CAAC,CAAC;QAC7E,IAAM,QAAQ,GAAG,OAAO,CAAC;YACvB,MAAM,EAAE,EAAC,CAAC,EAAE,MAAM,EAAC;YACnB,OAAO,SAAA;YACP,KAAK,EAAE,EAAC,KAAK,EAAE,EAAEA,OAAI,CAAC,aAAa,CAAC,MAAM,CAAC,KAAK,CAAC,GAAG,SAAS,GAAG,SAAS,CAAC,EAAC;SAC5E,CAAC,CAAC;QACH,IAAI,OAAO,CAAC,kBAAkB,CAAC,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;YAC7C,MAAM,CAAC,KAAK,KAAK,QAAQ,EAAE;YAC7B,IAAM,WAAW,GAAG,OAAO,CAAC,QAAQ,CAAC,OAAO,CAAC,MAAM,CAAe,CAAC;YACnE,IAAM,SAAS,GAAG,OAAO,CAAC,UAAU,CAAC,MAAM,CAAC,CAAC;YAC7C,IAAM,QAAQ,GAAG,eAAe,CAC5B,WAAW,EAAE,SAAS,EAAE,MAAM,CAAC,KAAK,EAAE,SAAS,EAAE,SAAS,EAAE,SAAS,EACrE,OAAO,EAAE,MAAM,CAAC,KAAK,EAAE,UAAU,CAAC,CAAC;YAEvC,OAAO,OAAO,CAAC,cAAc,CAAC,WAAW,EAAE,MAAM,CAAC,KAAK,EAAE,QAAQ,CAAC,MAAM,CAAC,CAAC;SAC3E;QACD,IAAM,OAAO,GAAG,IAAI,eAAe,CAAC,SAAS,EAAE,CAAC,SAAS,EAAE,SAAS,CAAC,CAAC,CAAC;QACvE,IAAM,WAAW,GACb,CAAC,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,SAAS,CAAC,EAAC,EAAE,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,OAAO,EAAC,CAAC,CAAC;QACzE,IAAM,GAAG,GAAG,OAAO,CAAC,gBAAgB,CAChC,OAAO,EAAE,CAAC,QAAQ,EAAE,cAAc,CAAC,EAAE,QAAQ,CAAC,KAAK,EAAE,WAAW,CAAC,CAAC;QAEtE,IAAM,QAAQ,GACV,OAAO,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,GAAG,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,KAAK,EAAE,WAAW,EAAC,EAAC,CAAC,CAAC;QAEtE,OAAO,CAAC,WAAW,CAAC,cAAc,CAAC,MAAM,CAAC,CAAC;QAC3C,OAAO,CAAC,WAAW,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC;QACrC,OAAO,CAAC,WAAW,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC;QAEhC,OAAO,QAAQ,CAAC;IAClB,CAAC;IAEM,IAAM,cAAc,GAAiB;QAC1C,UAAU,EAAE+D,WAAQ;QACpB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,QAA4B;KACzC;;IC1ED;;;;;;;;;;;;;;;;IAsBA;QAUE,uBAAY,MAAgB,EAAE,WAAqB;YALnD,kBAAa,GAAa,CAAC,GAAG,EAAE,SAAS,CAAC,CAAC;YAC3C,kBAAa,GAA6B,CAAC,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YAErD,SAAI,GAAG,IAAI,CAAC;YAGV,IAAI,CAAC,WAAW,GAAG,MAAM,CAAC,KAAK,EAAE,CAAC;YAClC,IAAI,CAAC,MAAM,GAAG,MAAM,CAAC;YACrB,IAAI,CAAC,WAAW,GAAG,WAAW,CAAC;YAC/B,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;YAC3D,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,CAAC,CAAC;YAC/D,IAAI,CAAC,SAAS,GAAG,QAAQ,CAAC;SAC3B;QAED,mCAAW,GAAX;YACE,IAAM,YAAY,GAAGC,iBAAe,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;YAClD,IAAM,QAAQ,GAAG,aACb,iCAAiC,EAAE,uSAKS,YAAY,kCAG3D,CAAC;YACF,OAAO,QAAQ,CAAC;SACjB;4BACF;KAAA,IAAA;IAED;IACA,SAASA,iBAAe,CAAC,MAAgB;QACvC,IAAM,aAAa,GAAG,CAAC,SAAS,EAAE,SAAS,EAAE,SAAS,EAAE,SAAS,CAAC,CAAC;QACnE,IAAM,YAAY,GAAG,EAAE,CAAC;QACxB,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE;YACtC,IAAI,CAAC,KAAK,CAAC,EAAE;gBACX,YAAY,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC;aAC7B;iBAAM;gBACL,YAAY,CAAC,IAAI,CAAC,KAAG,aAAa,CAAC,CAAC,CAAG,CAAC,CAAC;aAC1C;SACF;QACD,OAAO,YAAY,CAAC,IAAI,EAAE,CAAC;IAC7B;;ICtEA;;;;;;;;;;;;;;;;aAyBgB,QAAQ,CACpB,IAC0E;QAErE,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,YAAC,EAAE,wBAAO,CAAW;QACrB,IAAA,iBAAI,EAAE,2BAAS,CAAU;;;QAIhC,IAAM,UAAU,GAAGhE,OAAI,CAAC,cAAc,CAAC,IAAI,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC;QAEzD,IAAM,SAAS,GAAGC,eAAY,CAAC,YAAY,CAAC,wBAAwB,CAChE,CAAC,EAAE,OAAO,EAAE,UAAU,EAAE,SAAS,CAAC,CAAC;QAEvC,IAAM,WAAW,GAAGD,OAAI,CAAC,aAAa,CAAC,OAAO,CAAC,KAAK,CAAC,CAAC;QAEtD,IAAM,SAAS,GAAG,EAAE,CAAC;QAErB,IAAM,QAAQ,GAAG,OAAO,CAAC;YACvB,MAAM,EAAE,EAAC,CAAC,GAAA,EAAC;YACX,OAAO,SAAA;YACP,KAAK,EAAE;gBACL,KAAK,EAAE;oBACL,SAAS,CAAC,SAAS,EAAE,SAAS,CAAC,SAAS,EAAE,SAAS,CAAC,OAAO;oBAC3D,SAAS,CAAC,SAAS;iBACpB;aACF;SACF,CAAC,CAAC;QAEH,IAAM,YAAY,GAAG,OAAO,CAAC;YAC3B,MAAM,EAAE,EAAC,CAAC,EAAE,OAAO,EAAC;YACpB,OAAO,SAAA;YACP,KAAK,EAAE,EAAC,KAAK,EAAE,CAAC,SAAS,CAAC,SAAS,EAAE,WAAW,GAAG,SAAS,CAAC,SAAS,CAAC,EAAC;SACzE,CAAC,CAAC;QAEH,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC;QACzB,SAAS,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;QAE7B,IAAM,kBAAkB,GAAG;YACzB,SAAS,CAAC,SAAS,EAAE,SAAS,CAAC,SAAS,EAAE,WAAW,GAAG,SAAS,CAAC,SAAS;YAC3E,SAAS,CAAC,SAAS;SACpB,CAAC;QAEF,IAAI,OAAO,CAAC,kBAAkB,CAAC,CAAC,CAAC,EAAE,OAAO,CAAC,CAAC,EAAE;YAC5C,IAAM,iBAAiB,GAAG,OAAO,CAAC,SAAS,CAAC,GAAG,CAAC,YAAY,CAAC,MAAM,CAAC,CAAC;YACrE,IAAM,aAAa,GAAG,iBAAiB,CAAC,MAAoB,CAAC;YAC7D,IAAM,UAAU,GACZU,SAAM,CAAC,YAAY,CAAC,KAAK,EAAE,YAAY,CAAC,KAAK,EAAE,aAAa,CAC1C,CAAC;YACvB,IAAM,WAAW,GAAG,OAAO,CAAC,SAAS,CAAC,GAAG,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC;YAC3D,IAAM,OAAO,GAAG,WAAW,CAAC,MAAoB,CAAC;YACjD,IAAM,IAAI,GACNA,SAAM,CAAC,QAAQ,CAAC,KAAK,EAAE,QAAQ,CAAC,KAAK,EAAE,OAAO,CAAuB,CAAC;YAC1E,IAAM,MAAM,GAAG,eAAe,CAAC,IAAI,EAAE,UAAU,EAAE,kBAAkB,CAAC,CAAC;YAErE,SAAS,CAAC,OAAO,CAAC,UAAA,CAAC,IAAI,OAAA,OAAO,CAAC,WAAW,CAAC,CAAC,CAAC,MAAM,CAAC,GAAA,CAAC,CAAC;YAEtD,OAAO,OAAO,CAAC,cAAc,CACzB,SAAS,CAAC,WAAW,EAAE,MAAM,CAAC,KAAK,EAAE,MAAM,CAAC,MAAoB,CAAC,CAAC;SACvE;QAED,IAAM,OAAO,GAAG,IAAI,aAAa,CAAC,QAAQ,CAAC,KAAK,EAAE,kBAAkB,CAAC,CAAC;QACtE,IAAM,GAAG,GAAG,OAAO,CAAC,gBAAgB,CAChC,OAAO,EAAE,CAAC,QAAQ,EAAE,YAAY,CAAC,EAAE,QAAQ,CAAC,KAAK,CAAC,CAAC;QACvD,SAAS,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC;QAEpB,IAAM,QAAQ,GAAG,OAAO,CACpB,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,GAAG,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,KAAK,EAAE,SAAS,CAAC,WAAW,EAAC,EAAC,CAAC,CAAC;QACxE,SAAS,CAAC,OAAO,CAAC,UAAA,CAAC,IAAI,OAAA,OAAO,CAAC,WAAW,CAAC,CAAC,CAAC,MAAM,CAAC,GAAA,CAAC,CAAC;QACtD,OAAO,QAAQ,CAAC;IAClB,CAAC;IAEM,IAAM,cAAc,GAAiB;QAC1C,UAAU,EAAEuD,WAAQ;QACpB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,QAA4B;KACzC;;ICtGD;;;;;;;;;;;;;;;;IAwBO,IAAM,OAAO,GAAG,gBAAgB,CAAC;QACtC,SAAS,EAAE,YAAY,CAAC,OAAO;QAC/B,aAAa,EAAEC,cAAU;QACzB,KAAK,EAAE,MAAM;KACd,CAAC,CAAC;IAEI,IAAM,aAAa,GAAiB;QACzC,UAAU,EAAEC,UAAO;QACnB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,OAAO;KACpB;;IClCD;;;;;;;;;;;;;;;;IAsBO,IAAM,YAAY,GAAG,gBAAgB,CAAC;QAC3C,SAAS,EAAE,YAAY,CAAC,aAAa;QACrC,KAAK,EAAE,MAAM;QACb,aAAa,EAAEC,mBAAe;KAC/B,CAAC,CAAC;IAEI,IAAM,kBAAkB,GAAiB;QAC9C,UAAU,EAAEC,eAAY;QACxB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,YAAY;KACzB;;IChCD;;;;;;;;;;;;;;;;aAuBgB,SAAS,CAAC,IAIzB;QACQ,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,YAAC,CAAW;QACZ,IAAA,mBAAK,CAAU;QACtB,IAAM,WAAW,GAAG,CAAC,EAAC,IAAI,EAAE,SAAS,EAAE,IAAI,EAAE,CAAC,KAAK,CAAC,EAAC,CAAC,CAAC;QACvD,IAAM,OAAO,GAAG,IAAI,cAAc,CAAC,CAAC,CAAC,KAAK,EAAE,WAAW,CAAC,SAAS,CAAC,CAAC;QACnE,OAAO,CAAC,QAAQ,GAAG,cAAc,CAAC;QAClC,OAAO,OAAO,CAAC,gBAAgB,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC,EAAE,SAAS,EAAE,WAAW,CAAC,CAAC;IACxE,CAAC;IAEM,IAAM,eAAe,GAAiB;QAC3C,UAAU,EAAEC,YAAS;QACrB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,SAA6B;KAC1C;;ICzCD;;;;;;;;;;;;;;;;IAsBO,IAAM,IAAI,GAAG,gBAAgB,CAChC,EAAC,SAAS,EAAE,YAAY,CAAC,IAAI,EAAE,KAAK,EAAE,MAAM,EAAE,aAAa,EAAEC,WAAO,EAAC,CAAC,CAAC;IAEpE,IAAM,UAAU,GAAiB;QACtC,UAAU,EAAEC,OAAI;QAChB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,IAAI;KACjB;;IC7BD;;;;;;;;;;;;;;;;IAsBO,IAAM,SAAS,GAAG,gBAAgB,CAAC;QACxC,SAAS,EAAE,YAAY,CAAC,UAAU;QAClC,KAAK,EAAE,MAAM;QACb,aAAa,EAAEC,gBAAY;KAC5B,CAAC,CAAC;IAEI,IAAM,eAAe,GAAiB;QAC3C,UAAU,EAAEC,YAAS;QACrB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,SAAS;KACtB;;IChCD;;;;;;;;;;;;;;;;IAsBO,IAAM,GAAG,GACZ,eAAe,CAAC,EAAC,MAAM,EAAE,WAAW,CAAC,GAAG,EAAE,aAAa,EAAE,UAAU,EAAC,CAAC,CAAC;IAEnE,IAAM,SAAS,GAAiB;QACrC,UAAU,EAAEC,MAAG;QACf,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,GAAG;KAChB;;IC7BD;;;;;;;;;;;;;;;;IAuBO,IAAM,UAAU,GAAG,gBAAgB,CAAC;QACzC,SAAS,EAAE,YAAY,CAAC,WAAW;QACnC,KAAK,EAAE,MAAM;KACd,CAAC,CAAC;IAEI,IAAM,gBAAgB,GAAiB;QAC5C,UAAU,EAAEC,aAAU;QACtB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,UAAU;KACvB;;IChCD;;;;;;;;;;;;;;;;IAuBO,IAAM,UAAU,GAAG,eAAe,CAAC,EAAC,MAAM,EAAE,WAAW,CAAC,WAAW,EAAC,CAAC,CAAC;IAEtE,IAAM,gBAAgB,GAAiB;QAC5C,UAAU,EAAEC,aAAU;QACtB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,UAAU;KACvB;;IC7BD;;;;;;;;;;;;;;;;aAsBgB,GAAG,CACf,IAAkE;QAE7D,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,YAAC,CAAW;QACZ,IAAA,yCAAgB,EAAE,yBAAQ,CAAU;QAE3C,OAAO,MAAM,CAAC,CAAC,EAAE,gBAAgB,EAAE,QAAQ,EAAE,KAAK,EAAE,OAAO,CAAC,CAAC;IAC/D,CAAC;IAEM,IAAM,SAAS,GAAiB;QACrC,UAAU,EAAEC,MAAG;QACf,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,GAAuB;KACpC;;ICpCD;;;;;;;;;;;;;;;;IAwBO,IAAM,OAAO,GAAG,gBAAgB,CAAC;QACtC,SAAS,EAAE,YAAY,CAAC,GAAG;QAC3B,aAAa,EAAEC,cAAU;KAC1B,CAAC,CAAC;IAEI,IAAM,aAAa,GAAiB;QACzC,UAAU,EAAEC,UAAO;QACnB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,OAAO;KACpB;;ICjCD;;;;;;;;;;;;;;;;aAuBgB,OAAO,CACnB,IAA0E;QAErE,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,YAAC,CAAW;QACZ,IAAA,6BAAU,EAAE,uBAAO,EAAE,eAAG,EAAE,uCAAe,CAAU;QAC1D,IAAM,SAAS,GAAG,CAAC,CAAC;QACpB,IAAM,QAAQ,GAAG/E,eAAY,CAAC,iBAAiB,CAC3C,CAAC,CAAC,KAAyC,EAAE,UAAU,EAAE,OAAO,EAChE,SAAS,EAAE,GAAG,EAAE,eAAe,CAAC,CAAC;QACrC,IAAI,OAAyD,CAAC;QAC9D,IAAM,UAAU,GAAG,EAAE,CAAC;QACtB,IAAI,QAAQ,CAAC,YAAY,KAAK,CAAC,IAAI,QAAQ,CAAC,WAAW,KAAK,CAAC,EAAE;YAC7D,IAAID,OAAI,CAAC,WAAW,CAAC,QAAQ,CAAC,OAAO,EAAE,QAAQ,CAAC,QAAQ,CAAC,EAAE;gBACzD,OAAO,QAAQ,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,GAAA,EAAC,EAAE,OAAO,SAAA,EAAC,CAAC,CAAC;aACzC;YACD,OAAO,GAAG,IAAI,kCAAkC,CAAC,QAAQ,CAAC,CAAC;YAC3D,UAAU,CAAC,IAAI,CACX,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,YAAY,EAAE,QAAQ,CAAC,WAAW,CAAC,EAAC,CAAC,CAAC;SAC3E;aAAM;YACL,OAAO,GAAG,IAAI,aAAa,CAAC,QAAQ,EAAE,KAAK,CAAC,CAAC;YAC7C,UAAU,CAAC,IAAI,CACX,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,YAAY,EAAE,QAAQ,CAAC,WAAW,CAAC,EAAC,EACpE,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,OAAO,CAAC,GAAG,EAAE,QAAQ,CAAC,OAAO,CAAC,IAAI,CAAC,EAAC,EAAE;gBACpE,IAAI,EAAE,OAAO;gBACb,IAAI,EAAE,CAAC,QAAQ,CAAC,cAAc,EAAE,QAAQ,CAAC,aAAa,CAAC;aACxD,EACD,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,QAAQ,CAAC,QAAQ,EAAE,QAAQ,CAAC,OAAO,CAAC,EAAC,EAAE;gBAC5D,IAAI,EAAE,OAAO;gBACb,IAAI,EAAE,CAAC,QAAQ,CAAC,qBAAqB,EAAE,QAAQ,CAAC,oBAAoB,CAAC;aACtE,CAAC,CAAC;SACR;QAED,OAAO,OAAO,CAAC,gBAAgB,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,KAAK,EAAE,UAAU,CAAC,CAAC;IACrE,CAAC;IAEM,IAAM,aAAa,GAAiB;QACzC,UAAU,EAAEiF,UAAO;QACnB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,OAA2B;KACxC;;IC/DD;;;;;;;;;;;;;;;;aAsBgB,IAAI,CAChB,IAAoE;QAE/D,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,YAAC,CAAW;QACZ,IAAA,yBAAQ,EAAE,iBAAI,CAAU;QAE/B,OAAO,MAAM,CAAC,CAAC,EAAE,IAAI,EAAE,QAAQ,EAAE,MAAM,EAAE,OAAO,CAAC,CAAC;IACpD,CAAC;IAEM,IAAM,UAAU,GAAiB;QACtC,UAAU,EAAEC,OAAI;QAChB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,IAAwB;KACrC;;ICpCD;;;;;;;;;;;;;;;;aAsBgB,GAAG,CACf,IAAkE;QAE7D,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,YAAC,CAAW;QACZ,IAAA,iBAAI,EAAE,yBAAQ,CAAU;QAE/B,OAAO,MAAM,CAAC,CAAC,EAAE,IAAI,EAAE,QAAQ,EAAE,KAAK,EAAE,OAAO,CAAC,CAAC;IACnD,CAAC;IAEM,IAAM,SAAS,GAAiB;QACrC,UAAU,EAAEC,MAAG;QACf,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,GAAuB;KACpC;;ICpCD;;;;;;;;;;;;;;;;IAwBO,IAAM,OAAO,GAAG,gBAAgB,CAAC;QACtC,SAAS,EAAE,YAAY,CAAC,GAAG;QAC3B,aAAa,EAAEC,cAAU;KAC1B,CAAC,CAAC;IAEI,IAAM,aAAa,GAAiB;QACzC,UAAU,EAAEC,UAAO;QACnB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,OAAO;KACpB;;ICjCD;;;;;;;;;;;;;;;;IAqBA;QAYE,0BACI,MAAgB,EAAE,QAAiC,EACnD,IAA2B;YAF/B,iBAeC;YAxBD,aAAQ,GAAG,EAAE,CAAC;YAGd,kBAAa,GAAG,CAAC,GAAG,CAAC,CAAC;YACtB,kBAAa,GAA6B,CAAC,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YAGrD,SAAI,GAAG,IAAI,CAAC;YAKV,IAAI,CAAC,WAAW,GAAG,QAAQ,CAAC,GAAG,CAC3B,UAAC,CAAC,EAAE,CAAC,IAAK,OAAA,CAAC,CAAC,CAAC,CAAC,mBAAmB,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,GAAA,gBAAgB,CAAC;YACtE,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;YAC3D,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,CAAC,CAAC;YAE/D,IAAI,CAAC,MAAM,GAAG,MAAM,CAAC;YACrB,QAAQ,CAAC,GAAG,CAAC,UAAC,CAAC,EAAE,CAAC;gBAChB,KAAI,CAAC,QAAQ,IAAI,SAAO,CAAC,kBAAe,CAAC;aAC1C,CAAC,CAAC;YACH,IAAI,CAAC,MAAM,GAAG,IAAI,KAAK,SAAS,GAAG,CAAC,GAAG,CAAC,CAAC;YACzC,IAAI,CAAC,SAAS,GAAG,eAAa,IAAM,CAAC;SACtC;QAED,sCAAW,GAAX;YACE,IAAM,IAAI,GAAG,IAAI,CAAC,MAAM,CAAC,MAAM,CAAC;;YAEhC,IAAM,KAAK,GAAG,IAAI,CAAC,MAAM,CAAC,GAAG,CAAC,UAAC,CAAC,EAAE,CAAC,IAAK,OAAA,iBAAe,CAAC,QAAK,GAAA,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC;YACzE,IAAM,GAAG,GAAG,IAAI,CAAC,MAAM;iBACN,GAAG,CACA,UAAC,CAAC,EAAE,CAAC,IAAK,OAAA,iBAAe,CAAC,8BACtB,IAAI,GAAG,CAAC,GAAG,MAAI,CAAC,MAAG,GAAG,EAAE,CAAE,GAAA,CAAC;iBAClC,IAAI,CAAC,GAAG,CAAC,CAAC;YAE3B,IAAM,WAAW,GAAG,IAAI,KAAK,CAAC,GAAG,OAAO,GAAG,UAAU,CAAC;YACtD,IAAM,SAAS,GAAG,IAAI,KAAK,CAAC,GAAG,KAAK,GAAG,QAAQ,CAAC;YAChD,IAAM,UAAU,GAAG,IAAI,KAAK,CAAC,GAAG,MAAM,GAAG,SAAS,CAAC;YACnD,IAAM,KAAK,GAAG,iBAAiB,CAAC,IAAI,CAAC,CAAC;YACtC,IAAM,cAAc,GAAG,IAAI,GAAG,CAAC;gBAC3B,CAAC,WAAW,EAAE,WAAW,EAAE,WAAW,EAAE,WAAW,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,IAAI,CAAC;gBACnE,QAAQ,CAAC;YAEb,OAAO,aACH,iCAAiC,EAAE,sEAEnB,KAAK,SAAI,KAAK,gCAChB,KAAK,SAAI,GAAG,2FAEF,IAAI,wCAClB,UAAU,WAAM,WAAW,2BAC7B,UAAU,WAAM,WAAW,eAAU,UAAU,WACvD,IAAI,CAAC,MAAM,iCACK,UAAU,YAAO,SAAS,2BAClC,UAAU,YAAO,SAAS,oBAAe,UAAU,WAC3D,IAAI,CAAC,MAAM,oHAIsB,cAAc,kCAGlD,CAAC;SACH;+BACF;KAAA;;ICzFD;;;;;;;;;;;;;;;;IAuBO,IAAM,eAAe,GAAiB;QAC3C,UAAU,EAAEC,YAAS;QACrB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,UAAC,EAAwB;gBAAvB,kBAAM,EAAE,gBAAK,EAAE,oBAAO;YAC3B,IAAA,YAAC,CAA8B;YAC/B,IAAA,yBAAQ,EAAE,iBAAI,CAAuC;YAC5D,IAAM,aAAa,GAAG,OAAwB,CAAC;YAE/C,IAAM,WAAW,GAAG,QAAQ,CAAC,GAAG,CAAC,UAAA,CAAC;gBAChC,OAAO,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAC,CAAC;aAC5C,CAAC,CAAC;YACH,IAAM,OAAO,GAAG,IAAI,gBAAgB,CAAC,CAAC,CAAC,KAAK,EAAE,QAAQ,EAAE,IAAI,CAAC,CAAC;YAC9D,IAAM,MAAM,GACR,aAAa,CAAC,gBAAgB,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,KAAK,EAAE,WAAW,CAAC,CAAC;YAEvE,OAAO,MAAM,CAAC;SACf;KACF;;ICfD;IACA;aACgB,GAAG,CAAC,IAAiD;QAE5D,IAAA,oBAAM,EAAE,sBAAO,CAAS;QACxB,IAAA,YAAC,CAAW;QAEnB,IAAI,OAAO,CAAC,kBAAkB,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;YACnC,IAAM,KAAK,GAAG,OAAO,CAAC,SAAS,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC;YACxC,IAAA,0DACsD,EADrD,iBAAS,EAAE,gBAC0C,CAAC;YAC7D,OAAO,OAAO,CAAC,cAAc,CAAC,QAAQ,EAAE,CAAC,CAAC,KAAK,EAAE,SAAS,CAAC,CAAC;SAC7D;QAED,IAAM,OAAO,GAAG,IAAI,cAAc,CAAC,CAAC,CAAC,KAAK,EAAE,WAAW,CAAC,GAAG,CAAC,CAAC;QAE7D,OAAO,OAAO,CAAC,gBAAgB,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC;IACzD,CAAC;IAEM,IAAM,SAAS,GAAiB;QACrC,UAAU,EAAEC,MAAG;QACf,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,GAAuB;KACpC;;IChDD;;;;;;;;;;;;;;;;aAoBgB,mBAAmB,CAAC,IAInC;QACC,OAAO,CAAC,IAAI,CACR,wDAAwD;YACxD,0CAA0C,CAAC,CAAC;QAEzC,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,oBAAK,EAAE,sBAAM,CAAW;QACxB,IAAA,mCAAa,EAAE,iCAAY,EAAE,qCAAc,CAAU;QAE5D,IAAM,SAAS,GAAG,OAAO,CAAC,QAAQ,CAAC,KAAK,CAAC,MAAM,CAAe,CAAC;QAC/D,IAAM,UAAU,GAAG,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,MAAM,CAAe,CAAC;QAE1D,IAAA,6IAAe,CACkD;QAExE,OAAO,OAAO,CAAC,cAAc,CACzB,CAAC,eAAe,CAAC,MAAM,CAAC,EAAE,OAAO,EAAE,IAAI,UAAU,CAAC,eAAe,CAAC,CAAC,CAAC;IAC1E,CAAC;IAEM,IAAM,yBAAyB,GAAiB;QACrD,UAAU,EAAEC,sBAAmB;QAC/B,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,mBAAuC;KACpD;;IC/CD;;;;;;;;;;;;;;;;aAqBgB,mBAAmB,CAAC,IAInC;QACC,OAAO,CAAC,IAAI,CACR,wDAAwD;YACxD,0CAA0C,CAAC,CAAC;QAEzC,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,oBAAK,EAAE,sBAAM,CAAW;QACxB,IAAA,mCAAa,EAAE,iCAAY,EAAE,qCAAc,EAAE,iCAAY,CAAU;QAE1E,IAAM,SAAS,GAAG,OAAO,CAAC,QAAQ,CAAC,KAAK,CAAC,MAAM,CAAe,CAAC;QAC/D,IAAM,UAAU,GAAG,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAC,MAAM,CAAe,CAAC;QAEjE,IAAM,gBAAgB,GAAG,aAAa,CAAC;QACvC,IAAM,eAAe,GAAG,YAAY,CAAC;QACrC,IAAM,iBAAiB,GAAG,cAAc,CAAC;QACzC,IAAM,eAAe,GAAG,YAAY,CAAC;QAE/B,IAAA,0IAGqC,EAHpC,oCAAe,EAAE,kCAGmB,CAAC;QAE5C,OAAO;YACL,OAAO,CAAC,cAAc,CAClB,CAAC,eAAe,CAAC,MAAM,CAAC,EAAE,OAAO,EAAE,IAAI,UAAU,CAAC,eAAe,CAAC,CAAC;YACvE,OAAO,CAAC,cAAc,CAClB,CAAC,cAAc,CAAC,MAAM,CAAC,EAAE,SAAS,EAAE,IAAI,YAAY,CAAC,cAAc,CAAC,CAAC;SAC1E,CAAC;IACJ,CAAC;IAEM,IAAM,yBAAyB,GAAiB;QACrD,UAAU,EAAEC,sBAAmB;QAC/B,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,mBAAuC;KACpD;;IC3DD;;;;;;;;;;;;;;;;aA0BgB,SAAS,CACrB,IAAuD;QAClD,IAAA,oBAAM,EAAE,sBAAO,CAAS;QACxB,IAAA,YAAC,CAAW;QACnB,IAAI,CAAC,CAAC,KAAK,KAAK,WAAW,EAAE;YAC3B,IAAM,QAAQ,GAAG,IAAI,CAAC,EAAC,MAAM,EAAE,EAAC,KAAK,EAAE,CAAC,EAAC,EAAE,OAAO,SAAA,EAAC,CAAC,CAAC;YACrD,IAAM,CAAC,GAAG,SAAS,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,QAAQ,EAAC,EAAE,OAAO,SAAA,EAAC,CAAC,CAAC;YACtD,IAAM,QAAQ,GAAG,IAAI,CAAC,EAAC,MAAM,EAAE,EAAC,KAAK,EAAE,CAAC,EAAC,EAAE,OAAO,SAAA,EAAC,CAAC,CAAC;YACrD,IAAM,CAAC,GAAG,SAAS,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,QAAQ,EAAC,EAAE,OAAO,SAAA,EAAC,CAAC,CAAC;YAEtD,IAAM,MAAM,GAAG,OAAO,CAAC,EAAC,MAAM,EAAE,EAAC,IAAI,EAAE,CAAC,EAAE,IAAI,EAAE,CAAC,EAAC,EAAE,OAAO,SAAA,EAAC,CAAC,CAAC;YAE9D,OAAO,CAAC,WAAW,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC;YACrC,OAAO,CAAC,WAAW,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC;YAC9B,OAAO,CAAC,WAAW,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC;YACrC,OAAO,CAAC,WAAW,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC;YAE9B,OAAO,MAAM,CAAC;SACf;aAAM;YACL,OAAO,IAAI,CAAC;gBACV,KAAK,EAAE;oBACL,KAAK,EAAE,CAAC,CAAC,KAAK;oBACd,KAAK,EAAE,CAAC,CAAC,KAAK;oBACd,KAAK,EAAE,CAAC,CAAC,KAAK,KAAK,QAAQ,GAAG,EAAE,GAAG,CAAC;iBACrC;gBACD,OAAO,SAAA;aACR,CAAC,CAAC;SACJ;IACH,CAAC;IAEM,IAAM,eAAe,GAAiB;QAC3C,UAAU,EAAEC,YAAS;QACrB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,SAA6B;KAC1C;;IC5DD;;;;;;;;;;;;;;;;aA2BgB,QAAQ,CACpB,IAAsD;QACjD,IAAA,oBAAM,EAAE,sBAAO,CAAS;QACxB,IAAA,YAAC,CAAW;QAEnB,IAAI,CAAC,CAAC,KAAK,KAAK,QAAQ,EAAE;YACxB,MAAM,IAAI,KAAK,CAAC,8CAA8C,CAAC,CAAC;SACjE;aAAM,IAAI,CAAC,CAAC,KAAK,KAAK,WAAW,EAAE;YAClC,IAAM,QAAQ,GAAG,IAAI,CAAC,EAAC,MAAM,EAAE,EAAC,KAAK,EAAE,CAAC,EAAC,EAAE,OAAO,SAAA,EAAC,CAAC,CAAC;YACrD,IAAM,CAAC,GAAG,QAAQ,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,QAAQ,EAAC,EAAE,OAAO,SAAA,EAAC,CAAC,CAAC;YACrD,IAAM,QAAQ,GAAG,IAAI,CAAC,EAAC,MAAM,EAAE,EAAC,KAAK,EAAE,CAAC,EAAC,EAAE,OAAO,SAAA,EAAC,CAAC,CAAC;YACrD,IAAM,CAAC,GAAG,SAAS,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,QAAQ,EAAC,EAAE,OAAO,SAAA,EAAC,CAAC,CAAC;YAEtD,IAAM,MAAM,GAAG,OAAO,CAAC,EAAC,MAAM,EAAE,EAAC,IAAI,EAAE,CAAC,EAAE,IAAI,EAAE,CAAC,EAAC,EAAE,OAAO,SAAA,EAAC,CAAC,CAAC;YAE9D,OAAO,CAAC,WAAW,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC;YACrC,OAAO,CAAC,WAAW,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC;YAC9B,OAAO,CAAC,WAAW,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC;YACrC,OAAO,CAAC,WAAW,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC;YAE9B,OAAO,MAAM,CAAC;SACf;aAAM;YACL,OAAO,IAAI,CAAC,EAAC,KAAK,EAAE,EAAC,KAAK,EAAE,CAAC,CAAC,KAAK,EAAE,KAAK,EAAE,CAAC,CAAC,KAAK,EAAE,KAAK,EAAE,CAAC,EAAC,EAAE,OAAO,SAAA,EAAC,CAAC,CAAC;SAC3E;IACH,CAAC;IAEM,IAAM,cAAc,GAAiB;QAC1C,UAAU,EAAEC,WAAQ;QACpB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,QAA4B;KACzC;;ICzDD;;;;;;;;;;;;;;;;aAuBgB,IAAI,CAChB,IAAoE;QAE/D,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,iBAAI,CAAU;QAErB,IAAI,MAAM,CAAC,MAAM,KAAK,CAAC,EAAE;YACvB,OAAO,UAAU,CACb,EAAC,MAAM,EAAE,EAAC,KAAK,EAAE,MAAM,CAAC,CAAC,CAAC,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,GAAG,EAAE,IAAI,EAAC,EAAC,CAAC,CAAC;SAChE;QAED,IAAM,KAAK,GAAG,MAAM,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC;QAC9B,IAAM,KAAK,GAAG,MAAM,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC;QAE9B,MAAM,CAAC,OAAO,CAAC,UAAA,CAAC;YACd3F,OAAI,CAAC,iBAAiB,CAClB,KAAK,EAAE,CAAC,CAAC,KAAK,EACd,uDAAuD,CAAC,CAAC;YAC7DA,OAAI,CAAC,MAAM,CACP,KAAK,KAAK,CAAC,CAAC,KAAK,EACjB,cAAM,OAAA,uDAAuD,GAAA,CAAC,CAAC;SACpE,CAAC,CAAC;QAEH,IAAM,uBAAuB,GAAiB,EAAE,CAAC;QACjD,IAAM,eAAe,GAAG,MAAM,CAAC,GAAG,CAAC,UAAA,CAAC;YAClC,IAAM,SAAS,GACX,UAAU,CAAC,EAAC,MAAM,EAAE,EAAC,KAAK,EAAE,CAAC,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,GAAG,EAAE,IAAI,EAAC,EAAC,CAAC,CAAC;YAClE,uBAAuB,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC;YACxC,OAAO,SAAS,CAAC;SAClB,CAAC,CAAC;QAEH,IAAM,MAAM,GAAG,MAAM,CAAC,EAAC,MAAM,EAAE,eAAe,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,IAAI,MAAA,EAAC,EAAC,CAAC,CAAC;QAEzE,uBAAuB,CAAC,OAAO,CAAC,UAAA,CAAC,IAAI,OAAA,OAAO,CAAC,WAAW,CAAC,CAAC,CAAC,MAAM,CAAC,GAAA,CAAC,CAAC;QAEpE,OAAO,MAAM,CAAC;IAChB,CAAC;IAEM,IAAM,UAAU,GAAiB;QACtC,UAAU,EAAE4F,OAAI;QAChB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,IAAwB;KACrC;;ICjED;;;;;;;;;;;;;;;;IAqBA;QAWE,oBAAY,MAAgB,EAAE,QAAiC;YAA/D,iBAWC;YAjBD,kBAAa,GAAG,CAAC,GAAG,CAAC,CAAC;YACtB,aAAQ,GAAG,sBAAsB,CAAC;YAClC,kBAAa,GAA6B,CAAC,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YAErD,SAAI,GAAG,IAAI,CAAC;YAGV,IAAI,CAAC,WAAW,GAAG,QAAQ,CAAC,GAAG,CAC3B,UAAC,CAAC,EAAE,CAAC,IAAK,OAAA,CAAC,CAAC,CAAC,CAAC,mBAAmB,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,GAAA,gBAAgB,CAAC;YACtE,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;YAC3D,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,CAAC,CAAC;YAC/D,QAAQ,CAAC,GAAG,CAAC,UAAC,CAAC,EAAE,CAAC;gBAChB,KAAI,CAAC,QAAQ,IAAI,SAAO,CAAC,kBAAe,CAAC;aAC1C,CAAC,CAAC;YACH,IAAI,CAAC,MAAM,GAAG,MAAM,CAAC;YACrB,IAAI,CAAC,SAAS,GAAG,KAAK,CAAC;SACxB;QAED,gCAAW,GAAX;YACE,IAAM,IAAI,GAAG,IAAI,CAAC,MAAM,CAAC,MAAM,CAAC;YAChC,IAAM,IAAI,GAAG,iBAAiB,CAAC,IAAI,CAAC,CAAC;;YAErC,IAAM,KAAK,GAAG,IAAI,CAAC,MAAM,CAAC,GAAG,CAAC,UAAC,CAAC,EAAE,CAAC,IAAK,OAAA,iBAAe,CAAC,QAAK,GAAA,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC;YACzE,IAAM,GAAG,GAAG,IAAI,CAAC,MAAM;iBACN,GAAG,CACA,UAAC,CAAC,EAAE,CAAC,IAAK,OAAA,iBAAe,CAAC,8BACtB,IAAI,GAAG,CAAC,GAAG,MAAI,CAAC,MAAG,GAAG,EAAE,CAAE,GAAA,CAAC;iBAClC,IAAI,CAAC,GAAG,CAAC,CAAC;YAC3B,IAAM,UAAU,GAAG,IAAI,GAAG,CAAC,GAAM,IAAI,SAAI,KAAK,MAAG,GAAG,KAAG,KAAO,CAAC;YAC/D,IAAM,QAAQ,GAAG,IAAI,GAAG,CAAC,GAAM,IAAI,SAAI,GAAG,MAAG,GAAG,KAAG,GAAK,CAAC;YAEzD,IAAM,gBAAgB,GAAG,IAAI,GAAG,CAAC,GAAG,mBAAmB,GAAG,cAAc,CAAC;YACzE,IAAM,iBAAiB,GAAG,IAAI,GAAG,CAAC,GAAG,kBAAkB,GAAG,aAAa,CAAC;YAExE,IAAM,cAAc,GAAG,IAAI,GAAG,CAAC;gBAC3B,CAAC,WAAW,EAAE,WAAW,EAAE,WAAW,EAAE,WAAW,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,IAAI,CAAC;gBACnE,QAAQ,CAAC;YAEb,IAAM,QAAQ,GAAG,aACb,iCAAiC,EAAE,sEAEnB,UAAU,+BACZ,QAAQ,4EAGd,gBAAgB,YAAO,iBAAiB,gLAIb,cAAc,+CAIpD,CAAC;YACF,OAAO,QAAQ,CAAC;SACjB;yBACF;KAAA;;ICnFD;;;;;;;;;;;;;;;;IAwBO,IAAM,KAAK,GACd,UAAC,IAEyB;QACjB,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,YAAC,CAAW;QACZ,IAAA,yBAAQ,EAAE,mCAAa,CAAU;QACxC,IAAI,QAAQ,CAAC,KAAK,CAAC,UAAA,CAAC,IAAI,OAAA5F,OAAI,CAAC,WAAW,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,GAAA,CAAC,EAAE;YACpD,OAAO,QAAQ,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,GAAA,EAAC,EAAE,OAAO,SAAA,EAAC,CAAC,CAAC;SACzC;QACD,IAAIA,OAAI,CAAC,aAAa,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE;;;YAGrC,IAAM,WAAW,GAAG,QAAQ,CAAC,GAAG,CAC5B,UAAC,CAAC,EAAE,CAAC,IACD,OAAA,CAAC,CAAC,CAAC,CAAC,mBAAmB,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,GAAA,gBAAgB,CAAC;YACjE,OAAO,IAAI,CAAC;gBACV,OAAO,SAAA;gBACP,KAAK,EAAE,EAAC,KAAK,EAAE,WAAW,EAAE,KAAK,EAAE,aAAa,EAAE,KAAK,EAAE,CAAC,CAAC,KAAK,EAAC;aAClE,CAAC,CAAC;SACJ;QACD,IAAM,WAAW,GAAG,CAAC,EAAC,IAAI,EAAE,SAAS,EAAE,IAAI,EAAE,CAAC,aAAa,CAAC,EAAC,CAAC,CAAC;QAC/D,QAAQ,CAAC,GAAG,CAAC,UAAA,CAAC,IAAI,OAAA,WAAW,CAAC,IAAI,CAAC,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAC,CAAC,GAAA,CAAC,CAAC;QACzE,IAAM,OAAO,GAAG,IAAI,UAAU,CAAC,CAAC,CAAC,KAAK,EAAE,QAAQ,CAAC,CAAC;QAClD,OAAO,OAAO,CAAC,gBAAgB,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,KAAK,EAAE,WAAW,CAAC,CAAC;IACtE,CAAC,CAAC;IAEC,IAAM,WAAW,GAAiB;QACvC,UAAU,EAAE6F,QAAK;QACjB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,KAAyB;KACtC;;ICvDD;;;;;;;;;;;;;;;;IAsBO,IAAM,GAAG,GAAG,gBAAgB,CAAC;QAClC,SAAS,EAAE,YAAY,CAAC,GAAG;KAC5B,CAAC,CAAC;IAEI,IAAM,SAAS,GAAiB;QACrC,UAAU,EAAEC,MAAG;QACf,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,GAAG;KAChB;;IC9BD;;;;;;;;;;;;;;;;aAwBgB,KAAK,CAAC,IAAmD;QAEhE,IAAA,oBAAM,EAAE,sBAAO,CAAS;QACxB,IAAA,YAAC,EAAE,oBAAK,CAAW;QAE1B,IAAM,OAAO,GAAG,IAAI,eAAe,CAAC,YAAY,CAAC,KAAK,EAAE,CAAC,CAAC,KAAK,EAAE,KAAK,CAAC,KAAK,CAAC,CAAC;QAC9E,OAAO,OAAO,CAAC,gBAAgB,CAAC,OAAO,EAAE,CAAC,CAAC,EAAE,KAAK,CAAC,EAAE,SAAS,CAAC,CAAC;IAClE,CAAC;IAEM,IAAM,WAAW,GAAiB;QACvC,UAAU,EAAEC,QAAK;QACjB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,KAAyB;KACtC;;ICrCD;;;;;;;;;;;;;;;;aAsBgB,IAAI,CAChB,IAAoE;QAE/D,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,YAAC,CAAW;QACZ,IAAA,iBAAI,EAAE,yBAAQ,CAAU;QAE/B,OAAO,MAAM,CAAC,CAAC,EAAE,IAAI,EAAE,QAAQ,EAAE,MAAM,EAAE,OAAO,CAAC,CAAC;IACpD,CAAC;IAEM,IAAM,UAAU,GAAiB;QACtC,UAAU,EAAEC,OAAI;QAChB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,IAAwB;KACrC;;ICpCD;;;;;;;;;;;;;;;;IAsBO,IAAM,KAAK,GACd,UAAC,IAAiD;QACzC,IAAA,sBAAO,EAAE,kBAAK,CAAS;QACvB,IAAA,mBAAK,EAAE,iBAAI,EAAE,iBAAI,EAAE,mBAAK,CAAU;QACzC,IAAM,MAAM,GAAG,YAAY,CAAC,KAAK,EAAE,IAAI,EAAE,IAAI,EAAE,KAAK,CAAC,CAAC;QACtD,OAAO,OAAO,CAAC,cAAc,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,EAAE,KAAK,EAAE,MAAM,CAAC,CAAC;IAChE,CAAC,CAAC;IAEC,IAAM,WAAW,GAAiB;QACvC,UAAU,EAAEC,QAAK;QACjB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,KAAyB;KACtC;;IClCD;;;;;;;;;;;;;;;;IAuBO,IAAM,OAAO,GAAG,gBAAgB,CAAC,EAAC,SAAS,EAAE,YAAY,CAAC,GAAG,EAAC,CAAC,CAAC;IAEhE,IAAM,aAAa,GAAiB;QACzC,UAAU,EAAEC,UAAO;QACnB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,OAA2B;KACxC;;IC7BD;;;;;;;;;;;;;;;;IAqBO,IAAM,IAAI,GAAG,eAAe,CAAC,EAAC,MAAM,EAAE,WAAW,CAAC,IAAI,EAAC,CAAC,CAAC;IAEzD,IAAM,UAAU,GAAiB;QACtC,UAAU,EAAEC,OAAI;QAChB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,IAAI;KACjB;;IC3BD;;;;;;;;;;;;;;;;IAqBO,IAAM,KAAK,GAAG,eAAe,CAAC,EAAC,MAAM,EAAE,WAAW,CAAC,KAAK,EAAC,CAAC,CAAC;IAE3D,IAAM,WAAW,GAAiB;QACvC,UAAU,EAAEC,QAAK;QACjB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,KAAK;KAClB;;IC3BD;;;;;;;;;;;;;;;;IAqBA;QAUE,+BACI,UAA4C,EAAE,SAAiB,EAC/D,QAAgB;YAPpB,kBAAa,GAAG,CAAC,GAAG,CAAC,CAAC;YACtB,aAAQ,GAAG,wDAAwD,CAAC;YACpE,kBAAa,GAA6B,CAAC,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YACrD,SAAI,GAAG,IAAI,CAAC;YAKV,IAAI,CAAC,WAAW,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,EAAE,SAAS,EAAE,QAAQ,EAAE,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC;YAEvE,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;YAE3D,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,CAAC,CAAC;YAE/D,IAAI,CAAC,SAAS,GAAG,gBAAgB,CAAC;SACnC;QAED,2CAAW,GAAX;YACE,IAAM,QAAQ,GAAG,aACb,iCAAiC,EAAE,2uDA0CtC,CAAC;YACF,OAAO,QAAQ,CAAC;SACjB;oCACF;KAAA;;aCrEe,cAAc,CAAC,IAI9B;QACQ,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,sBAAM,CAAW;QACjB,IAAA,iCAAY,EAAE,iBAAI,EAAE,yCAAgB,CAAU;QAE/C,IAAA,oBAA4B,EAA3B,iBAAS,EAAE,gBAAgB,CAAC;QACnC,IAAM,YAAY,GAAG,YAAY,IAAI,SAAS,GAAG,CAAC,GAAG,GAAG,GAAG,GAAG,CAAC;QAC/D,IAAM,WAAW,GAAG,YAAY,IAAI,QAAQ,GAAG,CAAC,GAAG,GAAG,GAAG,GAAG,CAAC;QAC7D,IAAM,qBAAqB,GAAG,gBAAgB,GAAG,GAAG,GAAG,GAAG,CAAC;QAC3D,IAAM,WAAW,GAAG;YAClB,EAAC,IAAI,EAAE,SAAS,EAAE,IAAI,EAAE,CAAC,YAAY,EAAE,WAAW,CAAC,EAAC;YACpD,EAAC,IAAI,EAAE,SAAS,EAAE,IAAI,EAAE,CAAC,qBAAqB,CAAC,EAAC;SACjD,CAAC;QAEF,IAAM,OAAO,GAAG,IAAI,qBAAqB,CACrC,MAAM,CAAC,KAAyC,EAAE,SAAS,EAAE,QAAQ,CAAC,CAAC;QAE3E,OAAO,OAAO,CAAC,gBAAgB,CAAC,OAAO,EAAE,CAAC,MAAM,CAAC,EAAE,SAAS,EAAE,WAAW,CAAC,CAAC;IAC7E,CAAC;IAEM,IAAM,oBAAoB,GAAiB;QAChD,UAAU,EAAEC,iBAAc;QAC1B,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,cAAkC;KAC/C;;IClDD;;;;;;;;;;;;;;;;IAqBA;QAWE,sCACI,UAA4C,EAAE,SAAiB,EAC/D,QAAgB,EAAE,gBAAyB;YAR/C,kBAAa,GAAG,CAAC,GAAG,CAAC,CAAC;YACtB,aAAQ,GAAG,iDAAiD,CAAC;YAC7D,kBAAa,GAA6B,CAAC,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YAErD,SAAI,GAAG,IAAI,CAAC;YAKV,IAAI,CAAC,WAAW,GAAG,CAAC,UAAU,CAAC,CAAC,CAAC,EAAE,SAAS,EAAE,QAAQ,EAAE,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC;YAEvE,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;YAE3D,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,CAAC,CAAC;YAE/D,IAAI,CAAC,gBAAgB,GAAG,gBAAgB,CAAC;YACzC,IAAI,CAAC,SAAS,GAAG,mBAAiB,gBAAkB,CAAC;SACtD;QAED,kDAAW,GAAX;YACE,IAAI,iBAAyB,CAAC;YAC9B,IAAI,IAAI,CAAC,gBAAgB,EAAE;gBACzB,iBAAiB;oBACb,wEAAwE;wBACxE,mBAAmB,CAAC;aACzB;iBAAM;gBACL,iBAAiB,GAAG,iDAAiD,CAAC;aACvE;YAED,IAAM,QAAQ,GAAG,aACb,iCAAiC,EAAE,quBAmBP,iBAAiB,2bAWhD,CAAC;YACF,OAAO,QAAQ,CAAC;SACjB;2CACF;KAAA;;aCpEe,qBAAqB,CAAC,IAIrC;QACQ,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,sBAAM,CAAW;QACjB,IAAA,iCAAY,EAAE,yCAAgB,EAAE,iBAAI,CAAU;QAE/C,IAAA,oBAA4B,EAA3B,iBAAS,EAAE,gBAAgB,CAAC;QACnC,IAAM,YAAY,GAAG,YAAY,IAAI,SAAS,GAAG,CAAC,GAAG,GAAG,GAAG,GAAG,CAAC;QAC/D,IAAM,WAAW,GAAG,YAAY,IAAI,QAAQ,GAAG,CAAC,GAAG,GAAG,GAAG,GAAG,CAAC;;QAE7D,IAAM,SAAS,GAAG,YAAY,GAAG,GAAG,GAAG,GAAG,CAAC;QAC3C,IAAM,WAAW,GAAG;YAClB,EAAC,IAAI,EAAE,SAAS,EAAE,IAAI,EAAE,CAAC,YAAY,EAAE,WAAW,CAAC,EAAC;YACpD,EAAC,IAAI,EAAE,SAAS,EAAE,IAAI,EAAE,CAAC,SAAS,CAAC,EAAC;SACrC,CAAC;QAEF,IAAM,OAAO,GAAG,IAAI,4BAA4B,CAC5C,MAAM,CAAC,KAAyC,EAAE,SAAS,EAAE,QAAQ,EACrE,gBAAgB,CAAC,CAAC;QACtB,OAAO,OAAO,CAAC,gBAAgB,CAAC,OAAO,EAAE,CAAC,MAAM,CAAC,EAAE,MAAM,CAAC,KAAK,EAAE,WAAW,CAAC,CAAC;IAChF,CAAC;IAEM,IAAM,2BAA2B,GAAiB;QACvD,UAAU,EAAEC,wBAAqB;QACjC,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,qBAAyC;KACtD;;ICnDD;;;;;;;;;;;;;;;;IAqBA;QAWE,uBACI,UAA4C,EAC5C,SAA0C;YAZ9C,gBAAW,GAAa,EAAE,CAAC;YAI3B,kBAAa,GAAG,CAAC,GAAG,CAAC,CAAC;YAEtB,kBAAa,GAA6B,CAAC,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YAErD,SAAI,GAAG,IAAI,CAAC;YAKV,IAAI,CAAC,WAAW,GAAG,UAAU,CAAC;YAC9B,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;YAC3D,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,CAAC,CAAC;YAC/D,IAAI,CAAC,QAAQ,GAAG,8EACQ,CAAC;YACzB,IAAI,CAAC,SAAS,GAAG,QAAQ,CAAC;YAC1B,IAAI,CAAC,WAAW,GAAG,UAAU,CAAC;YAE9B,IAAI,OAAO,SAAS,KAAK,QAAQ,EAAE;gBACjC,IAAI,CAAC,QAAQ,IAAI,mBAAmB,CAAC;gBACrC,IAAI,CAAC,WAAW,GAAG,uCAAuC,CAAC;gBAC3D,IAAI,CAAC,SAAS,IAAI,QAAQ,CAAC;aAC5B;iBAAM;gBACL,IAAI,CAAC,QAAQ,IAAI,yBAAyB,CAAC;gBAC3C,IAAI,CAAC,WAAW,GAAG,kDAAkD,CAAC;gBACtE,IAAI,CAAC,SAAS,IAAI,OAAO,CAAC;aAC3B;SACF;QAED,mCAAW,GAAX;YACE,IAAM,QAAQ,GAAG,eACX,iCAAiC,EAAE,inBAY/B,IAAI,CAAC,WAAW,6SAQvB,CAAC;YACJ,OAAO,QAAQ,CAAC;SACjB;4BACF;KAAA;;ICzDM,IAAM,sBAAsB,GAAiB;QAChD,UAAU,EAAEC,mBAAgB;QAC5B,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,UAAC,EAAwB;gBAAvB,kBAAM,EAAE,gBAAK,EAAE,oBAAO;YAC3B,IAAA,oBAAK,CAAqC;YAC1C,IAAA,uBAAO,EAAE,2BAAS,EAAE,qBAAM,CAAyC;YAC1E,IAAM,aAAa,GAAG,OAAwB,CAAC;YAE/C,IAAM,OAAO,GAAG,IAAI,aAAa,CAAE,KAAkB,CAAC,KAAK,EAAE,SAAS,CAAC,CAAC;YAClE,IAAA,sFACiE,EADhE,eAAO,EAAE,eACuD,CAAC;YACxE,IAAM,WAAW,GAAG;gBACd,EAAC,IAAI,EAAE,SAAS,EAAE,IAAI,EAAE,CAAC,OAAO,CAAC,EAAC;gBAClC,EAAC,IAAI,EAAE,SAAS,EAAE,IAAI,EAAE,CAAC,OAAO,CAAC,EAAC;gBAClC,EAAC,IAAI,EAAE,SAAS,EAAE,IAAI,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC,EAAC;gBAC5C,EAAC,IAAI,EAAE,SAAS,EAAE,IAAI,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC,EAAC;aAC7C,CAAC;YAEN,IAAI,OAAO,SAAS,KAAK,QAAQ,EAAE;gBACjC,WAAW,CAAC,IAAI,CACZ,EAAC,IAAI,EAAE,SAAS,EAAE,IAAI,EAAE,CAAC,MAAM,CAAC,UAAU,CAAC,SAAS,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,EAAC,CAAC,CAAC;aACzE;iBAAM;gBACL,WAAW,CAAC,IAAI,CAAC,EAAC,IAAI,EAAE,SAAS,EAAE,IAAI,EAAE,SAAS,EAAC,CAAC,CAAC;aACtD;YAED,IAAM,MAAM,GAAG,aAAa,CAAC,gBAAgB,CACzC,OAAO,EAAE,CAAC,KAAK,CAAC,EAAE,KAAK,CAAC,KAAK,EAAE,WAAW,CAAC,CAAC;YAChD,OAAO,MAAM,CAAC;SAChB;KACF;;ICpDF;;;;;;;;;;;;;;;;IAsBO,IAAM,KAAK,GACd,eAAe,CAAC,EAAC,MAAM,EAAE,WAAW,CAAC,KAAK,EAAE,aAAa,EAAE,YAAY,EAAC,CAAC,CAAC;IAEvE,IAAM,WAAW,GAAiB;QACvC,UAAU,EAAEC,QAAK;QACjB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,KAAK;KAClB;;IC7BD;;;;;;;;;;;;;;;;IAuBA;QAcE,iCACI,aAAuB,EAAE,QAAgB,EAAE,WAAmB,EAC9D,WAAmB,EAAE,OAAiB,EAAE,KAAe,EACvD,WAAqB;YAhBzB,kBAAa,GAAG,CAAC,SAAS,EAAE,SAAS,CAAC,CAAC;YAMvC,kBAAa,GAA6B,CAAC,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YAIrD,WAAM,GAAG,IAAI,CAAC;YAOZ,IAAI,CAAC,WAAW,GAAG,KAAK,CAAC;YACzB,IAAI,CAAC,IAAI,GAAG,WAAW,CAAC;YACxB,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,aAAa,CAAC,CAAC;;YAExD,IAAI,CAAC,QAAQ;gBACT,eAAe,CAAC,IAAI,CAAC,cAAc,EAAE,aAAa,EAAE,IAAI,CAAC,aAAa,CAAC,CAAC;YAC5E,IAAI,CAAC,sBAAsB,GAAG,QAAQ,GAAG,CAAC,CAAC;YAC3C,IAAI,CAAC,SAAS,GAAG,aAAW,WAAW,SAAI,WAAW,SAClD,IAAI,CAAC,sBAAsB,SAAI,WAAa,CAAC;YACjD,IAAM,WAAW,GAAG,iBAAiB,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC;YACtD,IAAI,CAAC,QAAQ,GAAG,8BAA4B,WAAW,iBAAc,CAAC;YACtE,IAAI,CAAC,WAAW,GAAG,WAAW,CAAC;YAC/B,IAAI,CAAC,WAAW,GAAG,WAAW,CAAC;SAChC;QAED,6CAAW,GAAX;YACE,IAAI,aAAa,GAAG,EAAE,CAAC;YACvB,IAAI,IAAI,CAAC,WAAW,KAAK,CAAC,EAAE;gBAC1B,aAAa,GAAG,WAAW,CAAC;aAC7B;iBAAM,IAAI,IAAI,CAAC,WAAW,KAAK,CAAC,EAAE;gBACjC,aAAa,GAAG,cAAc,CAAC;aAChC;YACD,IAAM,cAAc,GAAG,gBAAc,aAAa,MAAG,CAAC;YAEtD,IAAM,YAAY,GAAG,IAAI,CAAC,sBAAsB,GAAG,qBAAqB;gBACrB,kBAAkB,CAAC;YAEtE,IAAI,aAAa,GAAG,EAAE,CAAC;YACvB,IAAI,eAAe,GAAG,EAAE,CAAC;YACzB,IAAI,6BAA6B,GAAG,EAAE,CAAC;YACvC,IAAI,IAAI,CAAC,WAAW,KAAK,CAAC,EAAE;gBAC1B,aAAa,GAAG,WAAW,CAAC;gBAC5B,eAAe,GAAG,gBAAgB,CAAC;gBACnC,6BAA6B,GAAG,wGAI/B,CAAC;aACH;iBAAM,IAAI,IAAI,CAAC,WAAW,KAAK,CAAC,EAAE;gBACjC,aAAa,GAAG,sBAAsB,CAAC;gBACvC,eAAe,GAAG,sCAAsC,CAAC;gBACzD,6BAA6B,GAAG,uOAM/B,CAAC;aACH;YACD,IAAM,cAAc,GAAG,gBAAc,aAAa,MAAG,CAAC;;;YAItD,IAAM,gBAAgB,GAAG,IAAI,CAAC,IAAI,KAAK,OAAO;gBAC1C,oDAAoD;gBACpD,kXAUF,CAAC;YAEH,IAAM,QAAQ,GAAG,WACf,6BAA6B,kBAE3B,iCAAiC,EAAE,0PAMD,cAAc,yEACM,YAAY,oDAE5C,cAAc,8DACS,eAAe,uBAEzD,gBAAgB,yBAEnB,CAAC;YACL,OAAO,QAAQ,CAAC;SACjB;sCACF;KAAA;;IChID;;;;;;;;;;;;;;;;aAyBgB,SAAS,CAAC,IAIzB;QACQ,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,wBAAO,EAAE,wBAAO,CAAW;QAC3B,IAAA,mBAAK,CAAU;QAEhB,IAAA,6DACmD,EADlD,wBAAS,EAAE,0BAAU,EAAE,wBAAS,EAAE,oBAAO,EAAE,0BACO,CAAC;QAE1D,IAAM,YAAY,GAAG,CAAC,UAAU,GAAG,SAAS,EAAE,SAAS,CAAC,CAAC;QAEzD,IAAI,UAAU,KAAK,CAAC,EAAE;YACpB,OAAO,OAAO,CAAC,cAAc,CAAC,KAAK,EAAE,OAAO,CAAC,KAAK,CAAC,CAAC;SACrD;QAED,IAAM,cAAc,GAAG,OAAO,CAC1B,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,OAAO,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,KAAK,EAAE,CAAC,UAAU,EAAE,SAAS,CAAC,EAAC,EAAC,CAAC,CAAC;QAC9E,IAAM,QAAQ,GAAG,OAAO,CACpB,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,OAAO,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,KAAK,EAAE,CAAC,UAAU,EAAE,SAAS,CAAC,EAAC,EAAC,CAAC,CAAC;QAE9E,IAAM,IAAI,GAAG,QAAQ,CAAC,KAAK,CAAC;QAC5B,IAAM,MAAM,GACR,IAAI,CAAC,EAAC,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,KAAK,EAAE,YAAY,EAAE,KAAK,EAAE,CAAC,EAAE,KAAK,EAAE,IAAI,EAAC,EAAC,CAAC,CAAC;QACzE,IAAM,IAAI,GAAGxG,OAAI,CAAC,aAAa,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC;QAChD,IAAM,WAAW,GAAG;YAClB,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,SAAS,CAAC,EAAC,EAAE,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,OAAO,EAAC;YAClE,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,IAAI,CAAC,EAAC;SAC9B,CAAC;QACF,IAAM,OAAO,GAAG,IAAI,uBAAuB,CACvC,QAAQ,CAAC,KAAK,EAAE,SAAS,EAAE,cAAc,CAAC,KAAK,CAAC,MAAM,EACtD,QAAQ,CAAC,KAAK,CAAC,MAAM,EAAE,OAAO,EAAE,YAAY,EAAE,IAAI,CAAC,CAAC;QACxD,IAAM,GAAG,GAAG,OAAO,CAAC,gBAAgB,CAChC,OAAO,EAAE,CAAC,QAAQ,EAAE,cAAc,CAAC,EAAE,IAAI,EAAE,WAAW,EAAE,MAAM,CAAC,CAAC;QAEpE,IAAM,QAAQ,GAAG,OAAO,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,GAAG,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,KAAK,OAAA,EAAC,EAAC,CAAC,CAAC;QAEtE,OAAO,CAAC,WAAW,CAAC,cAAc,CAAC,MAAM,CAAC,CAAC;QAC3C,OAAO,CAAC,WAAW,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC;QACrC,OAAO,CAAC,WAAW,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC;QAEhC,OAAO,QAAQ,CAAC;IAClB,CAAC;IAEM,IAAM,eAAe,GAAiB;QAC3C,UAAU,EAAEyG,YAAS;QACrB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,SAA6B;KAC1C;;IC3ED;;;;;;;;;;;;;;;;IAsBA;QAWE,uBAAY,KAAa,EAAE,KAAe,EAAE,IAAY;YAVxD,kBAAa,GAAG,CAAC,GAAG,EAAE,GAAG,EAAE,GAAG,CAAC,CAAC;YAKhC,kBAAa,GAA6B,CAAC,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YAGrD,SAAI,GAAG,IAAI,CAAC;YAGV,IAAI,CAAC,WAAW,GAAG,KAAK,CAAC;YACzB,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;YAC3D,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,CAAC,CAAC;YAE/D,IAAI,CAAC,KAAK,GAAG,KAAK,CAAC;YACnB,IAAI,CAAC,IAAI,GAAG,IAAI,CAAC;YACjB,IAAI,CAAC,SAAS,GAAG,QAAQ,CAAC;SAC3B;QAED,mCAAW,GAAX;;YAEE,IAAI,OAAO,CAAC;YACZ,IAAI,QAAQ,CAAC;YACb,IAAI,IAAI,CAAC,IAAI,GAAG,CAAC,EAAE;gBACjB,MAAM,KAAK,CAAC,oBAAkB,IAAI,CAAC,IAAI,0BAAuB,CAAC,CAAC;aACjE;YAED,IAAI,IAAI,CAAC,IAAI,KAAK,CAAC,EAAE;gBACnB,QAAQ,GAAG,OAAO,CAAC;gBACnB,OAAO,GAAG,OAAO,CAAC;aACnB;iBAAM;gBACL,IAAM,aAAa,GAAG,CAAC,SAAS,EAAE,SAAS,EAAE,SAAS,EAAE,SAAS,CAAC,CAAC;gBACnE,IAAM,UAAU,GAAG,EAAE,CAAC;gBACtB,IAAM,WAAW,GAAG,EAAE,CAAC;gBACvB,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,IAAI,CAAC,WAAW,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE;oBAChD,WAAW,CAAC,IAAI,CAAC,KAAG,aAAa,CAAC,CAAC,CAAG,CAAC,CAAC;oBACxC,IAAI,CAAC,GAAG,IAAI,CAAC,KAAK,EAAE;wBAClB,UAAU,CAAC,IAAI,CAAC,KAAG,aAAa,CAAC,CAAC,CAAG,CAAC,CAAC;qBACxC;iBACF;gBACD,OAAO,GAAG,UAAU,CAAC,IAAI,EAAE,CAAC;gBAC5B,QAAQ,GAAG,WAAW,CAAC,IAAI,EAAE,CAAC;aAC/B;YAED,IAAM,QAAQ,GAAG,aACb,iCAAiC,EAAE,4HAGf,OAAO,mFAEQ,QAAQ,0EAER,QAAQ,+CAI9C,CAAC;YACF,OAAO,QAAQ,CAAC;SACjB;4BACF;KAAA;;ICpFD;;;;;;;;;;;;;;;;aAsBgB,MAAM,CAAC,IAAoD;QAElE,IAAA,oBAAM,EAAE,sBAAO,CAAS;QACxB,IAAA,4BAAS,EAAE,YAAC,EAAE,YAAC,CAAW;QAEjC,IAAM,OAAO,GACT,IAAI,aAAa,CAAC,SAAS,CAAC,KAAK,CAAC,MAAM,EAAE,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC;QACvE,OAAO,OAAO,CAAC,gBAAgB,CAC3B,OAAO,EAAE,CAAC,SAAS,EAAE,CAAC,EAAE,CAAC,CAAC,EAAEjG,aAAU,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC;IAChE,CAAC;IAEM,IAAM,YAAY,GAAiB;QACxC,UAAU,EAAEkG,SAAM;QAClB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,MAA0B;KACvC;;ICrCD;;;;;;;;;;;;;;;;IAqBO,IAAM,OAAO,GAAG,eAAe,CAAC,EAAC,MAAM,EAAE,WAAW,CAAC,OAAO,EAAC,CAAC,CAAC;IAE/D,IAAM,aAAa,GAAiB;QACzC,UAAU,EAAEC,UAAO;QACnB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,OAAO;KACpB;;IC3BD;;;;;;;;;;;;;;;;IAuBO,IAAM,GAAG,GAAG,eAAe,CAAC,EAAC,MAAM,EAAE,WAAW,CAAC,GAAG,EAAC,CAAC,CAAC;IAEvD,IAAM,SAAS,GAAiB;QACrC,UAAU,EAAEC,MAAG;QACf,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,GAAG;KAChB;;IC7BD;;;;;;;;;;;;;;;;IAuBO,IAAM,IAAI,GAAG,eAAe,CAAC,EAAC,MAAM,EAAE,WAAW,CAAC,IAAI,EAAC,CAAC,CAAC;IAEzD,IAAM,UAAU,GAAiB;QACtC,UAAU,EAAEC,OAAI;QAChB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,IAAI;KACjB;;IC7BD;;;;;;;;;;;;;;;;IAsBO,IAAM,GAAG,GAAG,gBAAgB,CAAC;QAClC,SAAS,EAAE,YAAY,CAAC,GAAG;QAC3B,aAAa,EAAEC,UAAM;QACrB,eAAe,EAAE,IAAI;KACtB,CAAC,CAAC;IAEI,IAAM,SAAS,GAAiB;QACrC,UAAU,EAAEC,MAAG;QACf,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,GAAG;KAChB;;IChCD;;;;;;;;;;;;;;;;aA4BgB,OAAO,CACnB,IAA0E;QAErE,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,sBAAM,CAAW;QACjB,IAAA,eAAG,CAAU;QAEpB,IAAM,IAAI,GAAG/G,OAAI,CAAC,cAAc,CAAC,CAAC,GAAG,CAAC,EAAE,MAAM,CAAC,KAAK,CAAC,CAAC;QAEtD,IAAM,QAAQ,GAAG,GAAG,CAAC;YACnB,MAAM,EAAE,EAAC,CAAC,EAAE,MAAM,EAAC;YACnB,OAAO,SAAA;YACP,KAAK,EAAE,EAAC,gBAAgB,EAAE,IAAI,EAAE,QAAQ,EAAE,KAAK,EAAC;SACjD,CAAC,CAAC;QAEH,IAAM,aAAa,GAAGC,eAAY,CAAC,oBAAoB,CAAC,QAAQ,CAAC,KAAK,EAAE,IAAI,CAAC,CAAC;QAE9E,IAAM,iBAAiB,GACnB,OAAO,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,QAAQ,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,KAAK,EAAE,aAAa,EAAC,EAAC,CAAC,CAAC;QAC7E,IAAM,CAAC,GACH,GAAG,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,MAAM,EAAE,CAAC,EAAE,iBAAiB,EAAC,EAAE,OAAO,SAAA,EAAC,CAAe,CAAC;QAC5E,IAAM,CAAC,GAAG,GAAG,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,CAAC,EAAC,EAAE,OAAO,SAAA,EAAC,CAAe,CAAC;QACvD,IAAM,MAAM,GACR,GAAG,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,CAAC,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,IAAI,EAAE,IAAI,EAAE,QAAQ,EAAE,KAAK,EAAC,EAAC,CAAC,CAAC;QACzE,IAAM,cAAc,GAChB,OAAO,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,MAAM,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,KAAK,EAAE,aAAa,EAAC,EAAC,CAAC,CAAC;QAC3E,IAAM,GAAG,GACL,OAAO,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,cAAc,EAAC,EAAE,OAAO,SAAA,EAAC,CAAe,CAAC;QAExE,OAAO,CAAC,WAAW,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC;QACrC,OAAO,CAAC,WAAW,CAAC,iBAAiB,CAAC,MAAM,CAAC,CAAC;QAC9C,OAAO,CAAC,WAAW,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC;QAC9B,OAAO,CAAC,WAAW,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC;QAC9B,OAAO,CAAC,WAAW,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC;QACnC,OAAO,CAAC,WAAW,CAAC,cAAc,CAAC,MAAM,CAAC,CAAC;QAE3C,OAAO,GAAG,CAAC;IACb,CAAC;IAEM,IAAM,aAAa,GAAiB;QACzC,UAAU,EAAE+G,UAAO;QACnB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,OAA2B;KACxC;;IC9CM,IAAM,cAAc,GAAG,UAAC,IAI9B;QACQ,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,YAAC,CAAW;QACZ,IAAA,6BAAU,EAAE,yBAAQ,CAAU;QAErChH,OAAI,CAAC,MAAM,CACP,CAAC,CAAC,KAAK,CAAC,MAAM,IAAI,CAAC,EACnB,cAAM,OAAA,wDAAwD;YAC1D,iBAAiB,GAAA,CAAC,CAAC;QAE3B,IAAM,IAAI,GAAG,UAAU,CAAC,MAAM,CAAC,UAAC,CAAC,EAAE,CAAC,IAAK,OAAA,CAAC,GAAG,CAAC,GAAA,CAAC,CAAC;QAEhD,IAAM,gBAAgB,GAA4B,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;QAC3D,gBAAgB,CAAC,IAAI,OAArB,gBAAgB,WAAS,QAAmC,GAAE;QAC9D,KAAK,IAAI,CAAC,GAAG,CAAC,GAAG,UAAU,CAAC,MAAM,EAAE,CAAC,GAAG,CAAC,CAAC,KAAK,CAAC,MAAM,EAAE,EAAE,CAAC,EAAE;YAC3D,gBAAgB,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;SAC/B;QAED,IAAM,SAAS,GAAG,EAAE,CAAC;QAErB,IAAM,OAAO,GAAG,KAAK,CAAC;YACpB,MAAM,EAAE,EAAC,CAAC,GAAA,EAAC;YACX,OAAO,SAAA;YACP,KAAK,EAAE,EAAC,QAAQ,EAAE,gBAAgB,EAAE,aAAa,EAAE,CAAC,EAAC;SACtD,CAAC,CAAC;QAEH,IAAM,mBAAmB,GACrBC,eAAY,CAAC,WAAW,CAAC,OAAO,CAAC,KAAK,EAAE,UAAU,EAAE,IAAI,EAAE,KAAK,CAAC,CAAC;QAErE,IAAM,iCAAiC,GAAGA,eAAY,CAAC,WAAW,CAC9D,mBAAmB,CAAC,MAAM,EAAE,UAAU,CAAC,MAAM,EAAE,KAAK,CAAC,CAAC;QAE1D,IAAM,YAAY,GACdA,eAAY,CAAC,mBAAmB,CAAC,OAAO,CAAC,KAAK,EAAE,UAAU,EAAE,IAAI,EAAE,KAAK,CAAC,CAAC;QAE7E,IAAM,eAAe,GAAG,OAAO,CAC3B,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,OAAO,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,KAAK,EAAE,mBAAmB,EAAC,EAAC,CAAC,CAAC;QAE1E,IAAM,QAAQ,GAAG,SAAS,CAAC;YACzB,MAAM,EAAE,EAAC,CAAC,EAAE,eAAe,EAAC;YAC5B,OAAO,SAAA;YACP,KAAK,EAAE,EAAC,IAAI,EAAE,iCAAiC,EAAC;SACjD,CAAC,CAAC;QAEH,IAAM,MAAM,GACR,OAAO,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,QAAQ,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,KAAK,EAAE,YAAY,EAAC,EAAC,CAAC,CAAC;QAE5E,SAAS,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;QACxB,SAAS,CAAC,IAAI,CAAC,eAAe,CAAC,CAAC;QAChC,SAAS,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC;QAEzB,SAAS,CAAC,OAAO,CAAC,UAAA,CAAC,IAAI,OAAA,OAAO,CAAC,WAAW,CAAC,CAAC,CAAC,MAAM,CAAC,GAAA,CAAC,CAAC;QAEtD,OAAO,MAAM,CAAC;IAChB,CAAC,CAAC;IAEK,IAAM,oBAAoB,GAAiB;QAChD,UAAU,EAAEgH,iBAAc;QAC1B,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,cAAkC;KAC/C;;ICzFD;;;;;;;;;;;;;;;;IAqBA;QAcE,wBACI,UAAkB,EAAE,QAAgB,EAAE,WAAmB,EACzD,WAAmB,EAAE,OAAiB,EAAE,KAAe,EACvD,gBAAuB;YAhB3B,kBAAa,GAAG,CAAC,SAAS,EAAE,SAAS,EAAE,cAAc,CAAC,CAAC;YAMvD,kBAAa,GAA6B,CAAC,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YACrD,kBAAa,GAAG,CAAC,CAAC;YAClB,SAAI,GAAG,IAAI,CAAC;YASV,IAAI,CAAC,WAAW,GAAG,KAAK,CAAC;YACzB,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;YAC3D,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,EACzD,CAAC,IAAI,CAAC,aAAa,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;YAChC,IAAM,sBAAsB,GAAG,QAAQ,GAAG,CAAC,CAAC;YAC5C,IAAI,CAAC,SAAS;gBACV,aAAW,WAAW,SAAI,WAAW,SAAI,sBAAwB,CAAC;YACtE,IAAM,WAAW,GAAG,iBAAiB,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC;YACtD,IAAI,CAAC,QAAQ;gBACT,gDAA8C,WAAW,MAAG,CAAC;YACjE,IAAI,aAAa,GAAG,EAAE,CAAC;YACvB,IAAI,WAAW,KAAK,CAAC,EAAE;gBACrB,aAAa,GAAG,GAAG,CAAC;aACrB;iBAAM,IAAI,WAAW,KAAK,CAAC,EAAE;gBAC5B,aAAa,GAAG,MAAM,CAAC;aACxB;YACD,IAAI,CAAC,cAAc,GAAG,gBAAc,aAAa,MAAG,CAAC;YAErD,IAAI,aAAa,GAAG,EAAE,CAAC;YACvB,IAAI,WAAW,KAAK,CAAC,EAAE;gBACrB,aAAa,GAAG,GAAG,CAAC;aACrB;iBAAM,IAAI,WAAW,KAAK,CAAC,EAAE;gBAC5B,aAAa,GAAG,cAAc,CAAC;aAChC;YACD,IAAI,CAAC,cAAc,GAAG,gBAAc,aAAa,MAAG,CAAC;YAErD,IAAI,CAAC,YAAY;gBACb,sBAAsB,GAAG,qBAAqB,GAAG,kBAAkB,CAAC;SACzE;QAED,oCAAW,GAAX;YACE,IAAM,QAAQ,GAAG,aACb,iCAAiC,EAAE,8CAEP,IAAI,CAAC,aAAa,uVAOV,IAAI,CAAC,cAAc,2EAEvD,IAAI,CAAC,YAAY,4EAGjB,IAAI,CAAC,aAAa,kQAI4B,IAAI,CAAC,cAAc,mJAMjE,IAAI,CAAC,aAAa,iTAQlB,CAAC;YACL,OAAO,QAAQ,CAAC;SACjB;6BACF;KAAA;;IC3GD;;;;;;;;;;;;;;;;aAwBgB,aAAa,CAAC,IAI7B;QACQ,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,oCAAa,EAAE,kCAAY,EAAE,kCAAY,CAAW;QACpD,IAAA,+BAAW,CAAU;QAEtB,IAAA,8EACoE,EADnE,wBAAS,EAAE,0BAAU,EAAE,oBAAO,EAAE,0BACmC,CAAC;QAE3E,IAAM,cAAc,GAAG,KAAK,CAAC;QAC7B,IAAM,WAAW,GAAG;YAClB,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,UAAU,CAAC,EAAC;YACnC,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,SAAS,CAAC,EAAC;YAClC,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,OAAO,EAAC;SAC/B,CAAC;QACF,IAAM,OAAO,GAAG,IAAI,cAAc,CAC9B,UAAU,EAAE,SAAS,EAAE,aAAa,CAAC,KAAK,CAAC,MAAM,EACjD,YAAY,CAAC,KAAK,CAAC,MAAM,EAAE,OAAO,EAAE,CAAC,UAAU,EAAE,CAAC,CAAC,EAAE,cAAc,CAAC,CAAC;QAEzE,IAAM,GAAG,GAAG,OAAO,CAAC,gBAAgB,CAChC,OAAO,EAAE,CAAC,YAAY,EAAE,aAAa,EAAE,YAAY,CAAC,EAAE,YAAY,CAAC,KAAK,EACxE,WAAW,CAAC,CAAC;QAEjB,IAAM,QAAQ,GACV,OAAO,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,GAAG,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,KAAK,EAAE,WAAW,EAAC,EAAC,CAAC,CAAC;QAEtE,OAAO,CAAC,WAAW,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC;QAChC,OAAO,QAAQ,CAAC;IAClB,CAAC;IAEM,IAAM,mBAAmB,GAAiB;QAC/C,UAAU,EAAEC,gBAAa;QACzB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,aAAiC;KAC9C;;aCvCe,MAAM,CAClB,IAAwE;QAEnE,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,YAAC,CAAW;QACZ,IAAA,uCAAe,EAAE,iBAAI,CAAU;QAEtC,IAAM,KAAK,GAAGlH,OAAI,CAAC,cAAc,CAAC,IAAI,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC;QACpD,IAAM,UAAU,GAAGC,eAAY,CAAC,gBAAgB,CAAC,CAAC,EAAE,eAAe,EAAE,KAAK,CAAC,CAAC;QAE5E,IAAM,KAAK,GAAG,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC;QAC7B,IAAM,KAAK,GAAG,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;QACvC,IAAM,IAAI,GAAG,CAAC,CAAC,KAAK,CAAC,KAAK,EAAE,CAAC;QAE7B,OAAO,UAAU,CAAC,GAAG,CAAC,UAAA,CAAC;YACrB,IAAM,SAAS,YAAO,IAAI,CAAC,CAAC;YAC5B,SAAS,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC;YACrB,IAAM,MAAM,GACR,KAAK,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,GAAA,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,KAAK,OAAA,EAAE,IAAI,EAAE,SAAS,EAAC,EAAC,CAAC,CAAC;YACnE,KAAK,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC;YAClB,OAAO,MAAM,CAAC;SACf,CAAC,CAAC;IACL,CAAC;IAEM,IAAM,YAAY,GAAiB;QACxC,UAAU,EAAEkH,SAAM;QAClB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,MAA0B;KACvC;;IClDD;;;;;;;;;;;;;;;;IAqBO,IAAM,IAAI,GAAG,eAAe,CAAC,EAAC,MAAM,EAAE,WAAW,CAAC,IAAI,EAAC,CAAC,CAAC;IAEzD,IAAM,UAAU,GAAiB;QACtC,UAAU,EAAEC,OAAI;QAChB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,IAAI;KACjB;;IC3BD;;;;;;;;;;;;;;;;IAsBO,IAAM,YAAY,GAAiB;QACxC,UAAU,EAAEC,SAAM;QAClB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,UAAC,EAAiB;gBAAhB,kBAAM,EAAE,oBAAO;YACpB,IAAA,YAAC,CAA2B;YACnC,IAAM,aAAa,GAAG,OAAwB,CAAC;YAC/C,IAAM,OAAO,GAAG,IAAI,cAAc,CAAC,CAAC,CAAC,KAAK,EAAE,WAAW,CAAC,MAAM,CAAC,CAAC;YAChE,OAAO,aAAa,CAAC,gBAAgB,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC;SAC9D;KACF;;IC/BD;;;;;;;;;;;;;;;;IAuBO,IAAM,iBAAiB,GAAG,gBAAgB,CAAC;QAChD,SAAS,EAAE,YAAY,CAAC,kBAAkB;KAC3C,CAAC,CAAC;IAEI,IAAM,uBAAuB,GAAiB;QACnD,UAAU,EAAEC,oBAAiB;QAC7B,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,iBAAiB;KAC9B;;IC/BD;;;;;;;;;;;;;;;;IAqBA;QAYE,6BAAY,QAAkB;YAX9B,kBAAa,GAAG,CAAC,GAAG,CAAC,CAAC;;YAOtB,kBAAa,GAAG,CAAC,CAAC;YAClB,kBAAa,GAA6B,CAAC,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YACrD,SAAI,GAAG,IAAI,CAAC;YAGV,IAAI,CAAC,WAAW,GAAG,QAAQ,CAAC;YAC5B,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;YAC3D,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,EACzD,CAAC,IAAI,CAAC,aAAa,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;YAEhC,IAAM,KAAK,GAAG,iBAAiB,CAAC,IAAI,CAAC,WAAW,CAAC,MAAM,CAAC,CAAC;YACzD,IAAI,CAAC,QAAQ,GAAG,aAAW,KAAK,qBAAgB,KAAK,OAAI,CAAC;YAC1D,IAAI,CAAC,SAAS,GAAG,cAAc,CAAC;SACjC;QAED,yCAAW,GAAX;YAAA,iBA4BC;YA3BC,IAAM,IAAI,GAAG,IAAI,CAAC,WAAW,CAAC,MAAM,CAAC;YACrC,IAAI,SAAS,GAAG,EAAE,CAAC;YACnB,IAAI,IAAI,KAAK,CAAC,EAAE;gBACd,SAAS,GAAG,4CAA4C,CAAC;aAC1D;iBAAM;gBACL,IAAI,YAAU,GAAG,CAAC,CAAC;gBACnB,SAAS;oBACL,IAAI,CAAC,WAAW;yBACX,GAAG,CAAC,UAAC,CAAC,EAAE,CAAC;wBACR,YAAU,EAAE,CAAC;wBACb,OAAO,KAAI,CAAC,WAAW,CAAC,MAAM,KAAK,CAAC;4BAChC,+BAA6B,CAAC,2BAAsB,CAAC,MAAG;4BACxD,aAAU,YAAU,GAAG,CAAC,8BACpB,CAAC,2BAAsB,CAAC,MAAG,CAAC;qBACrC,CAAC;yBACD,IAAI,CAAC,GAAG,CAAC,CAAC;aACpB;YAED,IAAM,QAAQ,GAAG,cACZ,iCAAiC,EAAE,6IAGF,SAAS,qCAG7C,CAAC;YACH,OAAO,QAAQ,CAAC;SACjB;kCACF;KAAA;;IC1ED;;;;;;;;;;;;;;;;aA0BgB,YAAY,CAAC,IAI5B;QACQ,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,YAAC,CAAW;QAEjB,IAAA,mBAAK,EACL,eAAG,EACH,uBAAO,EACP,2BAAS,EACT,uBAAO,EACP,iCAAY,EACZ,+BAAW,EACX,qCAAc,CACN;QAEJ,IAAA,yHAY8B,EAXlC,sCAAgB,EAChB,0BAAU,EACV,0BAAU,EACV,wBAAS,EACT,gCAAa,EACb,iBAAa,EACb,aAAS,EACT,qBAIkC,CAAC;QAErC,IAAI,MAAM,CAAC;QAEX,IAAI,UAAU,EAAE;;YAEd,MAAM,GAAG,OAAO,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,GAAA,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,KAAK,EAAE,UAAU,EAAC,EAAC,CAAC,CAAC;SACtE;aAAM,IAAI,SAAS,IAAI,aAAa,EAAE;;YAErCtH,OAAI,CAAC,MAAM,CACP,CAAC,CAAC,KAAK,CAAC,MAAM,IAAI,CAAC,EACnB,cAAM,OAAA,2CAAyC,CAAC,CAAC,KAAK,CAAC,MAAQ,GAAA,CAAC,CAAC;YAErE,IAAM,IAAI,GAAGW,aAAU,CAAC,eAAe,CAAC,MAAM,EAAE,IAAI,EAAE,QAAQ,CAAC,CAAC;;YAEhE,IAAM,MAAM,GAAG,KAAK,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,GAAA,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,KAAK,EAAE,MAAM,EAAE,IAAI,MAAA,EAAC,EAAC,CAAC,CAAC;YAC3E,MAAM;gBACF,OAAO,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,MAAM,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,KAAK,EAAE,UAAU,EAAC,EAAC,CAAC,CAAC;YACxE,OAAO,CAAC,WAAW,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC;SACpC;aAAM;YACL,IAAM,kBAAkB,GAAG,OAAO,CAAC,kBAAkB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;YAC3D,IAAI,kBAAkB,EAAE;gBACtB,IAAM,MAAM,GAAG,OAAO,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAe,CAAC;gBACxD,IAAM,IAAI,GAAGD,SAAM,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,KAAK,EAAE,MAAM,CAAuB,CAAC;gBACpE,IAAM,YAAY,GACd,mBAAmB,CAAC,gBAAgB,EAAE,IAAI,EAAE,QAAQ,EAAE,MAAM,CAAC,CAAC;gBAClE,MAAM,GAAG,OAAO,CAAC,cAAc,CAAC,UAAU,EAAE,CAAC,CAAC,KAAK,EAAE,YAAY,CAAC,MAAM,CAAC,CAAC;aAC3E;iBAAM;gBACL,IAAM,OAAO,GAAG,IAAI,mBAAmB,CAAC,gBAAgB,CAAC,CAAC;gBAC1D,IAAM,WAAW,GACb,CAAC,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,MAAM,EAAC,EAAE,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,QAAQ,EAAC,CAAC,CAAC;gBACrE,IAAM,YAAY,GACd,OAAO,CAAC,gBAAgB,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,KAAK,EAAE,WAAW,CAAC,CAAC;gBACjE,MAAM,GAAG,OAAO,CACZ,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,YAAY,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,KAAK,EAAE,UAAU,EAAC,EAAC,CAAC,CAAC;gBACtE,OAAO,CAAC,WAAW,CAAC,YAAY,CAAC,MAAM,CAAC,CAAC;aAC1C;SACF;QAED,OAAO,MAAM,CAAC;IAChB,CAAC;IAEM,IAAM,kBAAkB,GAAiB;QAC9C,UAAU,EAAE6G,eAAY;QACxB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,YAAgC;KAC7C;;aChFe,YAAY,CAAC,IAI5B;QACQ,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAEpC,IAAA,2BAAS,EACT,+BAAW,EACX,uBAAO,EACP,yBAAQ,EACR,yBAAQ,EACR,qDAAsB,CACd;QACH,IAAA,kBAAI,EAAE,8BAAU,CAAW;QAClC,IAAM,KAAK,GAAG,OAAO,CAAC,QAAQ,CAAC,IAAI,CAAC,MAAM,CAAiB,CAAC;QAC5D,IAAM,WAAW,GAAG,OAAO,CAAC,QAAQ,CAAC,UAAU,CAAC,MAAM,CAAe,CAAC;QAEhE,IAAA,oIAEqB,EAFpB,cAAM,EAAE,oBAEY,CAAC;QAC5B,OAAO;YACL,OAAO,CAAC,cAAc,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,EAAE,QAAQ,EAAE,MAAM,CAAC;YACzD,OAAO,CAAC,cAAc,CAAC,UAAU,CAAC,KAAK,EAAE,OAAO,EAAE,YAAY,CAAC;SAChE,CAAC;IACJ,CAAC;IAEM,IAAM,kBAAkB,GAAiB;QAC9C,UAAU,EAAEC,eAAY;QACxB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,YAAgC;KAC7C;;ICrDD;;;;;;;;;;;;;;;;IAqBO,IAAM,IAAI,GAAG,eAAe,CAAC,EAAC,MAAM,EAAE,WAAW,CAAC,IAAI,EAAC,CAAC,CAAC;IAEzD,IAAM,UAAU,GAAiB;QACtC,UAAU,EAAEC,OAAI;QAChB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,IAAI;KACjB;;IC3BD;;;;;;;;;;;;;;;;IAsBA;QAUE,qBAAY,MAAgB,EAAE,IAAc;YAT5C,kBAAa,GAAG,CAAC,GAAG,CAAC,CAAC;YAKtB,kBAAa,GAA6B,CAAC,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YACrD,SAAI,GAAG,IAAI,CAAC;YAIV,IAAM,WAAW,GAAa,IAAI,KAAK,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC;YACvD,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,WAAW,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE;gBAC3C,WAAW,CAAC,CAAC,CAAC,GAAG,MAAM,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,CAAC;aACtC;YACD,IAAI,CAAC,WAAW,GAAG,WAAW,CAAC;YAC/B,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;YAC3D,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,CAAC,CAAC;YAC/D,IAAI,CAAC,IAAI,GAAG,IAAI,CAAC,WAAW,CAAC,MAAM,CAAC;YACpC,IAAI,CAAC,SAAS,GAAG,MAAM,CAAC;SACzB;QAED,iCAAW,GAAX;YACE,IAAM,YAAY,GAAG,eAAe,CAAC,IAAI,CAAC,IAAI,EAAE,WAAW,CAAC,CAAC;YAE7D,IAAM,QAAQ,GAAG,aACb,iCAAiC,EAAE,yIAGF,YAAY,kCAGhD,CAAC;YACF,OAAO,QAAQ,CAAC;SACjB;0BACF;KAAA,IAAA;IAED,SAAS,eAAe,CAAC,IAAY,EAAE,aAAkB;QAAlB,8BAAA,EAAA,kBAAkB;QACvD,IAAI,IAAI,IAAI,CAAC,EAAE;YACb,MAAM,KAAK,CAAC,mBAAiB,IAAI,0BAAuB,CAAC,CAAC;SAC3D;QACD,IAAI,IAAI,KAAK,CAAC,EAAE;YACd,OAAO,cAAY,aAAa,YAAS,CAAC;SAC3C;QAED,IAAM,aAAa,GAAG,CAAC,SAAS,EAAE,SAAS,EAAE,SAAS,EAAE,SAAS,CAAC,CAAC;QACnE,IAAM,YAAY,GAAG,EAAE,CAAC;QACxB,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,IAAI,EAAE,CAAC,EAAE,EAAE;YAC7B,YAAY,CAAC,IAAI,CAAC,MAAI,aAAa,CAAC,CAAC,CAAC,WAAM,aAAa,eAAU,CAAC,OAAI,CAAC,CAAC;SAC3E;QACD,OAAO,YAAY,CAAC,IAAI,EAAE,CAAC;IAC7B;;IC1EA;;;;;;;;;;;;;;;;aAuBgB,IAAI,CAChB,MAAsE;QAEjE,IAAA,sBAAM,EAAE,wBAAO,EAAE,oBAAK,CAAW;QACjC,IAAA,YAAC,CAAW;QACZ,IAAA,iBAAI,CAAU;;QAGrB,IAAI,OAAO,CAAC,kBAAkB,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,KAAK,KAAK,QAAQ;YACvD,CAAC,CAAC,KAAK,CAAC,MAAM,IAAI,CAAC,EAAE;;;YAGvB,IAAM,IAAI,GAAG,OAAO,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC;YACxC,IAAM,KAAK,GAAG,CAAC,CAAC,KAAK,KAAK,QAAQ;gBAC7B,IAAqB,CAAC,GAAG,CAAC,UAAA,CAAC,IAAI,OAAAzH,OAAI,CAAC,YAAY,CAAC,CAAC,CAAC,GAAA,CAAC;gBACrD,IAAkB,CAAC;YACvB,IAAM,GAAG,GAAGU,SAAM,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,KAAK,EAAE,KAAK,CAAC,CAAC;YAC5C,IAAM,MAAM,GAAG,WAAW,CAAC,GAAG,EAAE,IAAI,CAAC,CAAC;YACtC,OAAO,OAAO,CAAC,cAAc,CAAC,MAAM,CAAC,KAAK,EAAE,MAAM,CAAC,KAAK,EAAE,MAAM,CAAC,MAAM,CAAC,CAAC;SAC1E;QAED,IAAM,OAAO,GAAG,IAAI,WAAW,CAAC,CAAC,CAAC,KAAK,EAAE,IAAI,CAAC,CAAC;QAC/C,IAAM,MAAM,GAAG,OAAO,CAAC,gBAAgB,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,KAAK,CAAC,CAAC;QAE/D,OAAO,MAAM,CAAC;IAChB,CAAC;IAEM,IAAM,UAAU,GAAiB;QACtC,UAAU,EAAEgH,OAAI;QAChB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,IAAwB;KACrC;;ICtDD;;;;;;;;;;;;;;;;IAqBA;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IAEA;QAUE,qBAAY,KAAe;YAL3B,kBAAa,GAAG,CAAC,GAAG,EAAE,SAAS,CAAC,CAAC;YAEjC,kBAAa,GAA6B,CAAC,GAAG,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YACtD,SAAI,GAAG,IAAI,CAAC;YAGV,IAAI,CAAC,WAAW,GAAG,KAAK,CAAC;YACzB,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;YAC3D,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,CAAC,CAAC;YAC/D,IAAI,CAAC,QAAQ,GAAG,qFACU,CAAC;YAC3B,IAAI,CAAC,SAAS,GAAG,MAAM,CAAC;SACzB;QAED,iCAAW,GAAX;YACE,IAAM,QAAQ,GAAG,eACX,iCAAiC,EAAE,y9EAkEtC,CAAC;YACJ,OAAO,QAAQ,CAAC;SACjB;0BACF;KAAA,IAAA;IAED;QAUE,sBAAY,KAAe;YAL3B,kBAAa,GAAG,CAAC,GAAG,EAAE,SAAS,CAAC,CAAC;YAEjC,kBAAa,GAA6B,CAAC,GAAG,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YACtD,SAAI,GAAG,IAAI,CAAC;YAGV,IAAI,CAAC,WAAW,GAAG,KAAK,CAAC;YACzB,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;YAC3D,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,CAAC,CAAC;;;;;YAK/D,IAAI,CAAC,QAAQ,GAAG,4CAA4C,CAAC;YAC7D,IAAI,CAAC,SAAS,GAAG,OAAO,CAAC;SAC1B;QAED,kCAAW,GAAX;YACE,IAAM,QAAQ,GAAG,eACX,iCAAiC,EAAE,s3EA0DtC,CAAC;YACJ,OAAO,QAAQ,CAAC;SACjB;2BACF;KAAA;;ICvLD,SAAS,mCAAmC,CACxC,OAAsB,EAAE,UAAsB;QAChD,IAAI,UAAU,KAAK,IAAI,EAAE;YACvB,OAAO,CAAC,WAAW,CAAC,UAAU,CAAC,MAAM,CAAC,CAAC;SACxC;IACH,CAAC;IAED,SAAS,aAAa,CAAC,GAAW;QAChC,IAAI,IAAI,GAAG,CAAC,CAAC;QACb,OAAO,IAAI,GAAG,GAAG,EAAE;YACjB,IAAI,IAAI,CAAC,CAAC;SACX;QACD,OAAO,IAAI,CAAC;IACd,CAAC;IAED;IACA;aACgB,IAAI,CAChB,IAAoE;QAE/D,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,YAAC,CAAW;QACZ,IAAA,WAAC,EAAE,qBAAM,CAAS;QAEzB,IAAM,MAAM,GAAG,CAAC,CAAC,KAAK,CAAC;QACvB,IAAM,OAAO,GAAG,MAAM,CAAC,MAAM,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC;QAE1C,IAAI,OAAO,CAAC,kBAAkB,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE;YACnC,IAAM,KAAK,GAAG,OAAO,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAe,CAAC;YACjD,IAAA,8DAC+D,EAD9D,mBAAW,EAAE,sBACiD,CAAC;YAEtE,OAAO;gBACL,OAAO,CAAC,cAAc,CAClB,WAAW,CAAC,KAAK,EAAE,WAAW,CAAC,KAAK,EAAE,WAAW,CAAC,MAAM,CAAC;gBAC7D,OAAO,CAAC,cAAc,CAClB,cAAc,CAAC,KAAK,EAAE,cAAc,CAAC,KAAK,EAAE,cAAc,CAAC,MAAM,CAAC;aACvE,CAAC;SACH;QAED,IAAI,CAAC,KAAK,CAAC,EAAE;YACX,MAAM,CAAC,MAAM,CAAC,MAAM,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC;YAC9B,OAAO;gBACL,OAAO,CAAC,cAAc,CAAC,MAAM,EAAE,CAAC,CAAC,KAAK,EAAE,EAAE,CAAC;gBAC3C,OAAO,CAAC,cAAc,CAAC,MAAM,EAAE,OAAO,EAAE,EAAE,CAAC;aAC5C,CAAC;SACH;QAED,IAAI,OAAO,KAAK,CAAC,kBAAkB;YACjC,OAAO;gBACL,CAAC,EAAE,IAAI,CAAC,EAAC,KAAK,EAAE,EAAC,KAAK,EAAE,MAAM,EAAE,KAAK,EAAE,OAAO,EAAE,KAAK,EAAE,CAAC,EAAC,EAAE,OAAO,SAAA,EAAC,CAAC;aACrE,CAAC;SACH;;QAGD,IAAM,KAAK,GAAG1H,OAAI,CAAC,aAAa,CAAC,MAAM,CAAC,CAAC;QACzC,IAAM,KAAK,GAAG,KAAK,GAAG,OAAO,CAAC;QAC9B,IAAM,GAAG,GAAG,OAAO,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,GAAA,EAAC,EAAE,KAAK,EAAE,EAAC,KAAK,EAAE,CAAC,KAAK,EAAE,OAAO,CAAC,EAAC,EAAE,OAAO,SAAA,EAAC,CAAC,CAAC;QAE9E,IAAM,KAAK,GAAG,aAAa,CAAC,CAAC,CAAC,CAAC;QAC/B,IAAM,WAAW,GAAG,aAAa,CAAC,OAAO,CAAC,CAAC;;;;;QAM3C,IAAI,OAAO,GAAe,IAAI,CAAC;;;;QAK/B,IAAM,SAAS,GAAG,cAAM,OAAA,OAAO,KAAK,IAAI,GAAG,CAAC,GAAG,EAAE,GAAG,CAAC,GAAG,CAAC,GAAG,EAAE,OAAO,CAAC,GAAA,CAAC;QAEvE,IAAM,OAAO,GAAG,UAAC,GAAW,EAAE,GAAW,EAAE,KAAe;YACxD,IAAM,MAAM,GAAG,SAAS,EAAE,CAAC;YAC3B,IAAM,OAAO,GAAG,IAAI,WAAW,CAAC,KAAK,CAAC,CAAC;YACvC,IAAM,SAAS,GAAG,OAAO,KAAK,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC;YAC3C,IAAM,eAAe,GAAG;gBACpB,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,OAAO,CAAC,EAAC;gBAChC,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,SAAS,CAAC,EAAC;gBAClC,EAAC,IAAI,EAAE,SAAS,EAAE,IAAI,EAAE,CAAC,MAAM,CAAC,iBAAiB,CAAC,EAAC;gBACnD,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,GAAG,CAAC,EAAC;gBAC5B,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,GAAG,CAAC,EAAC;aAC/B,CAAC;YACF,IAAM,WAAW,GAAG,OAAO,CAAC;YAC5B,OAAO,GAAG,OAAO,CAAC,gBAAgB,CAC9B,OAAO,EAAE,MAAM,EAAE,OAAO,EAAE,eAAe,CAAC,CAAC;YAC/C,mCAAmC,CAAC,OAAO,EAAE,WAAW,CAAC,CAAC;SAC3D,CAAC;;QAGF,KAAK,IAAI,GAAG,GAAG,CAAC,EAAE,GAAG,GAAG,KAAK,EAAE,GAAG,IAAI,CAAC,EAAE;YACvC,IAAM,GAAG,GAAG,GAAG,GAAG,CAAC,CAAC;YACpB,KAAK,IAAI,GAAG,GAAG,GAAG,EAAE,GAAG,IAAI,CAAC,EAAE,GAAG,IAAI,CAAC,EAAE;gBACtC,OAAO,CAAC,GAAG,EAAE,GAAG,EAAE,CAAC,KAAK,EAAE,WAAW,CAAC,CAAC,CAAC;aACzC;SACF;;QAGD,KAAK,IAAI,WAAW,GAAG,WAAW,EAAE,WAAW,GAAG,KAAK,EAAE,WAAW,IAAI,CAAC,EAAE;YACzE,IAAM,QAAM,GAAG,SAAS,EAAE,CAAC;YAC3B,IAAM,YAAY,GAAG,IAAI,YAAY,CAAC,CAAC,KAAK,EAAE,WAAW,GAAG,CAAC,CAAC,CAAC,CAAC;YAChE,IAAM,SAAS,GAAG,OAAO,KAAK,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC;YAC3C,IAAM,gBAAgB,GAAG;gBACrB,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,OAAO,CAAC,EAAC;gBAChC,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,SAAS,CAAC,EAAC;gBAClC,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,KAAK,CAAC,EAAC;aACjC,CAAC;YACF,IAAM,aAAW,GAAG,OAAO,CAAC;YAC5B,OAAO,GAAG,OAAO,CAAC,gBAAgB,CAC9B,YAAY,EAAE,QAAM,EAAE,OAAO,EAAE,gBAAgB,CAAC,CAAC;YACrD,mCAAmC,CAAC,OAAO,EAAE,aAAW,CAAC,CAAC;;YAG1D,IAAM,GAAG,GAAG,KAAK,GAAG,CAAC,CAAC;YACtB,IAAM,GAAG,GAAG,GAAG,GAAG,CAAC,CAAC;YACpB,KAAK,IAAI,GAAG,GAAG,GAAG,EAAE,GAAG,IAAI,CAAC,EAAE,GAAG,IAAI,CAAC,EAAE;gBACtC,OAAO,CAAC,GAAG,EAAE,GAAG,EAAE,OAAO,CAAC,KAAK,CAAC,CAAC;aAClC;SACF;;QAGD,IAAI,WAAW,GAAG,OAAO,CAAC;QAC1B,OAAO,GAAG,KAAK,CACX,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,OAAO,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,KAAK,EAAE,CAAC,EAAE,IAAI,EAAE,CAAC,KAAK,EAAE,CAAC,CAAC,EAAC,EAAC,CAAC,CAAC;QAC1E,mCAAmC,CAAC,OAAO,EAAE,WAAW,CAAC,CAAC;;QAG1D,IAAI,MAAM,GAAG,QAAQ,CACjB,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,GAAG,EAAE,OAAO,SAAA,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,IAAI,EAAE,CAAC,EAAE,SAAS,EAAE,CAAC,EAAC,EAAC,CAAC,CAAC;QAC1E,mCAAmC,CAAC,OAAO,EAAE,GAAG,CAAC,CAAC;;;QAIlD,IAAM,QAAQ,GAAG,MAAM,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;QACrC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;QAEjB,WAAW,GAAG,OAAO,CAAC;QACtB,OAAO,GAAG,OAAO,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,OAAO,EAAC,EAAE,KAAK,EAAE,EAAC,KAAK,EAAE,QAAQ,EAAC,EAAE,OAAO,SAAA,EAAC,CAAC,CAAC;QAC7E,mCAAmC,CAAC,OAAO,EAAE,WAAW,CAAC,CAAC;QAE1D,IAAM,UAAU,GAAG,MAAM,CAAC;QAC1B,MAAM,GAAG,OAAO,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,MAAM,EAAC,EAAE,KAAK,EAAE,EAAC,KAAK,EAAE,QAAQ,EAAC,EAAE,OAAO,SAAA,EAAC,CAAC,CAAC;QAC3E,mCAAmC,CAAC,OAAO,EAAE,UAAU,CAAC,CAAC;QAEzD,OAAO,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;IAC3B,CAAC;IAEM,IAAM,UAAU,GAAiB;QACtC,UAAU,EAAE2H,OAAI;QAChB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,IAAwB;KACrC;;ICnLD;;;;;;;;;;;;;;;;IAqBA;QAUE,0BAAY,QAA0C;YATtD,kBAAa,GAAG,CAAC,OAAO,EAAE,YAAY,CAAC,CAAC;YAExC,aAAQ,GAAG,+DAA+D,CAAC;YAI3E,kBAAa,GAA6B,CAAC,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;YACrD,SAAI,GAAG,IAAI,CAAC;YAGV,IAAI,CAAC,WAAW,GAAG,QAAQ,CAAC;YAC5B,IAAI,CAAC,cAAc,GAAG,kBAAkB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;YAC3D,IAAI,CAAC,QAAQ,GAAG,eAAe,CAC3B,IAAI,CAAC,cAAc,EAAE,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,aAAa,CAAC,CAAC;YAC/D,IAAI,CAAC,SAAS,GAAG,WAAW,CAAC;SAC9B;QAED,sCAAW,GAAX;YACE,IAAM,QAAQ,GAAG,05EAgET,iCAAiC,EAAE,g2EAoDtC,CAAC;YACN,OAAO,QAAQ,CAAC;SACjB;+BACF;KAAA;;aCzIe,SAAS,CAAC,IAIzB;QACQ,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,oBAAK,EAAE,8BAAU,CAAW;QAC5B,IAAA,mCAAa,EAAE,yBAAQ,EAAE,2BAAS,EAAE,+BAAW,CAAU;QAE1D,IAAA,2BAA2D,EAA1D,aAAK,EAAE,mBAAW,EAAE,kBAAU,EAAE,mBAA0B,CAAC;QAC5D,IAAA,6EAC2D,EAD1D,iBAAS,EAAE,gBAC+C,CAAC;QAClE,IAAM,QAAQ,GACV,CAAC,KAAK,EAAE,SAAS,EAAE,QAAQ;YAC1B,WAAW,CAAqC,CAAC;QAEtD,IAAM,OAAO,GAAG,IAAI,gBAAgB,CAAC,QAAQ,CAAC,CAAC;QAC/C,IAAM,mBAAmB,GAAG,aAAa,KAAK,SAAS,GAAG,CAAC,GAAG,CAAC,CAAC;QAChE,IAAI,UAAkB,CAAC;QACvB,QAAQ,QAAQ;YACd,KAAK,UAAU;gBACb,UAAU,GAAG,CAAC,CAAC;gBACf,MAAM;YACR,KAAK,SAAS;gBACZ,UAAU,GAAG,CAAC,CAAC;gBACf,MAAM;YACR,KAAK,MAAM;gBACT,UAAU,GAAG,CAAC,CAAC;gBACf,MAAM;YACR,KAAK,SAAS;gBACZ,UAAU,GAAG,CAAC,CAAC;gBACf,MAAM;YACR;gBACE,UAAU,GAAG,CAAC,CAAC;gBACf,MAAM;SACT;QACD,IAAM,WAAW,GAAG;YAClB,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,mBAAmB,CAAC,EAAC;YAC5C,EAAC,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC,UAAU,CAAC,EAAC,EAAE,EAAC,IAAI,EAAE,SAAS,EAAE,IAAI,EAAE,CAAC,SAAS,CAAC,EAAC;SAC1E,CAAC;QACF,OAAO,OAAO,CAAC,gBAAgB,CAC3B,OAAO,EAAE,CAAC,KAAK,EAAE,UAAU,CAAC,EAAE,SAAS,EAAE,WAAW,CAAC,CAAC;IAC5D,CAAC;IAEM,IAAM,eAAe,GAAiB;QAC3C,UAAU,EAAEC,YAAS;QACrB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,SAA6B;KAC1C;;ICtED;;;;;;;;;;;;;;;;aAwBgB,MAAM,CAClB,IACsE;QAEjE,IAAA,oBAAM,EAAE,sBAAO,EAAE,kBAAK,CAAS;QAC/B,IAAA,oBAAK,CAAW;QAClB,IAAA,iBAAI,CAAU;QAEnB,IAAI,IAAI,GAAG,CAAC,EAAE;YACZ,IAAI,IAAI,KAAK,CAAC,KAAK,CAAC,MAAM,CAAC;SAC5B;QAED,IAAM,CAAC,GAAG,KAAK,CAAC;QAChB,IAAM,KAAK,GAAG,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC;QAE7B,IAAM,GAAG,GAAG,KAAK,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC;QAC9B,IAAM,QAAQ,GAAa,IAAI,KAAK,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC;QAChD,IAAI,QAAQ,GAAG,CAAC,CAAC;QACjB,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,KAAK,EAAE,CAAC,EAAE,EAAE;YAC9B,IAAI,CAAC,KAAK,IAAI,EAAE;gBACd,QAAQ,CAAC,QAAQ,EAAE,CAAC,GAAG,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC;aACnC;SACF;QAED,IAAM,SAAS,GAAG,EAAE,CAAC;QAErB,IAAM,KAAK,GAAG,IAAI,KAAK,CAAC,KAAK,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;QACvC,IAAM,IAAI,GAAG,CAAC,CAAC,KAAK,CAAC,KAAK,EAAE,CAAC;QAC7B,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC;QACf,IAAM,GAAG,GAAiB,IAAI,KAAK,CAAC,GAAG,CAAC,CAAC;QACzC,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,GAAG,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE;YACnC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC;YAChB,IAAM,MAAM,GAAG,KAAK,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,GAAA,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,KAAK,OAAA,EAAE,IAAI,MAAA,EAAC,EAAC,CAAC,CAAC;YACnE,IAAM,QAAQ,GACV,OAAO,CAAC,EAAC,MAAM,EAAE,EAAC,CAAC,EAAE,MAAM,EAAC,EAAE,OAAO,SAAA,EAAE,KAAK,EAAE,EAAC,KAAK,EAAE,QAAQ,EAAC,EAAC,CAAC,CAAC;YACtE,GAAG,CAAC,CAAC,CAAC,GAAG,QAAQ,CAAC;YAElB,SAAS,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;SACxB;QAED,SAAS,CAAC,OAAO,CAAC,UAAA,CAAC,IAAI,OAAA,OAAO,CAAC,WAAW,CAAC,CAAC,CAAC,MAAM,CAAC,GAAA,CAAC,CAAC;QACtD,OAAO,GAAG,CAAC;IACb,CAAC;IAEM,IAAM,YAAY,GAAiB;QACxC,UAAU,EAAEC,SAAM;QAClB,WAAW,EAAE,QAAQ;QACrB,UAAU,EAAE,MAA0B;KACvC;;;ICgDD;IACA,IAAM,aAAa,GAAmB;QACpC,kBAAkB;QAClB,SAAS;QACT,SAAS;QACT,UAAU;QACV,YAAY;QACZ,YAAY;QACZ,aAAa;QACb,iBAAiB;QACjB,oBAAoB;QACpB,UAAU;QACV,UAAU;QACV,iBAAiB;QACjB,aAAa;QACb,YAAY;QACZ,YAAY;QACZ,yBAAyB;QACzB,SAAS;QACT,UAAU;QACV,mBAAmB;QACnB,aAAa;QACb,YAAY;QACZ,kBAAkB;QAClB,2BAA2B;QAC3B,YAAY;QACZ,SAAS;QACT,WAAW;QACX,SAAS;QACT,gBAAgB;QAChB,WAAW;QACX,UAAU;QACV,mBAAmB;QACnB,gBAAgB;QAChB,WAAW;QACX,cAAc;QACd,oBAAoB;QACpB,iBAAiB;QACjB,0BAA0B;QAC1B,cAAc;QACd,cAAc;QACd,aAAa;QACb,kBAAkB;QAClB,cAAc;QACd,UAAU;QACV,eAAe;QACf,UAAU;QACV,eAAe;QACf,SAAS;QACT,gBAAgB;QAChB,gBAAgB;QAChB,SAAS;QACT,aAAa;QACb,aAAa;QACb,UAAU;QACV,SAAS;QACT,aAAa;QACb,eAAe;QACf,cAAc;QACd,SAAS;QACT,yBAAyB;QACzB,yBAAyB;QACzB,cAAc;QACd,cAAc;QACd,UAAU;QACV,WAAW;QACX,SAAS;QACT,WAAW;QACX,UAAU;QACV,WAAW;QACX,UAAU;QACV,aAAa;QACb,UAAU;QACV,WAAW;QACX,aAAa;QACb,oBAAoB;QACpB,2BAA2B;QAC3B,sBAAsB;QACtB,WAAW;QACX,eAAe;QACf,YAAY;QACZ,aAAa;QACb,SAAS;QACT,UAAU;QACV,WAAW;QACX,kBAAkB;QAClB,kBAAkB;QAClB,aAAa;QACb,oBAAoB;QACpB,mBAAmB;QACnB,YAAY;QACZ,UAAU;QACV,YAAY;QACZ,uBAAuB;QACvB,SAAS;QACT,SAAS;QACT,UAAU;QACV,UAAU;QACV,UAAU;QACV,eAAe;QACf,eAAe;QACf,YAAY;QACZ,eAAe;KAChB,CAAC;;QAEF,KAA2B,IAAA,kBAAAzH,SAAA,aAAa,CAAA,4CAAA,uEAAE;YAArC,IAAM,YAAY,0BAAA;YACrB0H,iBAAc,CAAC,YAAY,CAAC,CAAC;SAC9B;;;;;;;;;;ICnOD;;;;;;;;;;;;;;;;IAiBA;QASE,uBAAoB,MAAiB;YAAjB,WAAM,GAAN,MAAM,CAAW;YAR7B,mBAAc,GAAG,CAAC,CAAC;YACnB,mBAAc,GAAG,CAAC,CAAC;YACnB,gBAAW,GAA6B,IAAI,GAAG,EAAE,CAAC;YAClD,gBAAW,GAA6B,IAAI,GAAG,EAAE,CAAC;YAEnD,iBAAY,GAAG,CAAC,CAAC;YACjB,sBAAiB,GAAG,CAAC,CAAC;SAEY;QAEzC,2CAAmB,GAAnB,UAAoB,QAAgB,EAAE,KAA0B;YAC9D,OAAO,IAAI,CAAC,aAAa,CAAC,QAAQ,EAAE,KAAK,EAAE,IAAI,CAAC,CAAC;SAClD;QAED,qCAAa,GAAb,UACI,QAAgB,EAAE,KAA0B,EAAE,gBAAwB;YAAxB,iCAAA,EAAA,wBAAwB;YACxE,IAAM,GAAG,GAAG,YAAY,CAAC,QAAQ,EAAE,KAAK,CAAC,CAAC;YAC1C,IAAI,CAAC,IAAI,CAAC,WAAW,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE;gBAC9B,IAAI,CAAC,WAAW,CAAC,GAAG,CAAC,GAAG,EAAE,EAAE,CAAC,CAAC;aAC/B;YAED,IAAI,CAAC,IAAI,CAAC,WAAW,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE;gBAC9B,IAAI,CAAC,WAAW,CAAC,GAAG,CAAC,GAAG,EAAE,EAAE,CAAC,CAAC;aAC/B;YAED,IAAI,CAAC,YAAY,IAAI,QAAQ,CAAC;YAC9B,IAAI,CAAC,cAAc,EAAE,CAAC;YAEtB,IAAI,IAAI,CAAC,WAAW,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,MAAM,GAAG,CAAC,EAAE;gBACxC,IAAI,CAAC,cAAc,EAAE,CAAC;gBAEtB,IAAM,WAAS,GAAG,IAAI,CAAC,WAAW,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,KAAK,EAAE,CAAC;gBACpD,IAAI,CAAC,WAAW,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,IAAI,CAAC,WAAS,CAAC,CAAC;gBAC1C,OAAO,WAAS,CAAC;aAClB;YAED,IAAI,CAAC,iBAAiB,IAAI,QAAQ,CAAC;YACnC,IAAM,SAAS,GACX,IAAI,CAAC,MAAM,CAAC,YAAY,CAAC,EAAC,gBAAgB,kBAAA,EAAE,IAAI,EAAE,QAAQ,EAAE,KAAK,OAAA,EAAC,CAAC,CAAC;YACxE,IAAI,CAAC,WAAW,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC;YAE1C,OAAO,SAAS,CAAC;SAClB;QAED,qCAAa,GAAb,UACI,MAAiB,EAAE,QAAgB,EAAE,KAA0B;YACjE,IAAI,IAAI,CAAC,WAAW,CAAC,IAAI,KAAK,CAAC,EAAE;gBAC/B,OAAO;aACR;YAED,IAAM,GAAG,GAAG,YAAY,CAAC,QAAQ,EAAE,KAAK,CAAC,CAAC;YAC1C,IAAI,CAAC,IAAI,CAAC,WAAW,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE;gBAC9B,IAAI,CAAC,WAAW,CAAC,GAAG,CAAC,GAAG,EAAE,EAAE,CAAC,CAAC;aAC/B;YAED,IAAI,CAAC,WAAW,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;YACvC,IAAI,CAAC,cAAc,EAAE,CAAC;YACtB,IAAI,CAAC,cAAc,EAAE,CAAC;YAEtB,IAAM,UAAU,GAAG,IAAI,CAAC,WAAW,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC;YAC7C,IAAM,WAAW,GAAG,UAAU,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC;YAC/C,IAAI,WAAW,GAAG,CAAC,EAAE;gBACnB,MAAM,IAAI,KAAK,CACX,0DAA0D;oBAC1D,gBAAgB,CAAC,CAAC;aACvB;YACD,UAAU,CAAC,MAAM,CAAC,WAAW,EAAE,CAAC,CAAC,CAAC;YAClC,IAAI,CAAC,YAAY,IAAI,QAAQ,CAAC;SAC/B;QAED,2CAAmB,GAAnB,UACI,MAAiB,EAAE,QAAgB,EAAE,KAA0B;YADnE,iBAUC;YARC,MAAM,CAAC,QAAQ,CAAC,UAAU,CAAC,KAAK,CAAC;iBAC5B,IAAI,CACD;gBACE,KAAI,CAAC,aAAa,CAAC,MAAM,EAAE,QAAQ,EAAE,KAAK,CAAC,CAAC;aAC7C,EACD,UAAC,GAAG;;aAEH,CAAC,CAAC;SACZ;QAED,yCAAiB,GAAjB;YACE,OAAO,IAAI,CAAC,cAAc,CAAC;SAC5B;QAED,yCAAiB,GAAjB;YACE,OAAO,IAAI,CAAC,cAAc,CAAC;SAC5B;QAED,+BAAO,GAAP;YACE,IAAI,CAAC,WAAW,CAAC,OAAO,CAAC,UAAC,OAAO,EAAE,GAAG;gBACpC,OAAO,CAAC,OAAO,CAAC,UAAA,IAAI;oBAClB,IAAI,CAAC,OAAO,EAAE,CAAC;iBAChB,CAAC,CAAC;aACJ,CAAC,CAAC;YAEH,IAAI,CAAC,WAAW,CAAC,OAAO,CAAC,UAAC,OAAO,EAAE,GAAG;gBACpC,OAAO,CAAC,OAAO,CAAC,UAAA,IAAI;oBAClB,IAAI,CAAC,OAAO,EAAE,CAAC;iBAChB,CAAC,CAAC;aACJ,CAAC,CAAC;YAEH,IAAI,CAAC,WAAW,GAAG,IAAI,GAAG,EAAE,CAAC;YAC7B,IAAI,CAAC,WAAW,GAAG,IAAI,GAAG,EAAE,CAAC;YAC7B,IAAI,CAAC,cAAc,GAAG,CAAC,CAAC;YACxB,IAAI,CAAC,cAAc,GAAG,CAAC,CAAC;YACxB,IAAI,CAAC,YAAY,GAAG,CAAC,CAAC;YACtB,IAAI,CAAC,iBAAiB,GAAG,CAAC,CAAC;SAC5B;4BACF;KAAA,IAAA;IAED,SAAS,YAAY,CAAC,QAAgB,EAAE,KAA0B;QAChE,OAAU,QAAQ,SAAI,KAAO,CAAC;IAChC;;ICpIA;;;;;;;;;;;;;;;;IAiBA;QASE,wBAAoB,MAAiB;YAAjB,WAAM,GAAN,MAAM,CAAW;YAR7B,oBAAe,GAAG,CAAC,CAAC;YACpB,oBAAe,GAAG,CAAC,CAAC;YACpB,iBAAY,GAA8B,IAAI,GAAG,EAAE,CAAC;YACpD,iBAAY,GAA8B,IAAI,GAAG,EAAE,CAAC;YAErD,iBAAY,GAAG,CAAC,CAAC;YACjB,sBAAiB,GAAG,CAAC,CAAC;SAEY;QAEzC,uCAAc,GAAd,UACI,KAAa,EAAE,MAAc,EAAE,MAAwB,EACvD,KAA2B;YAC7B,IAAM,eAAe,GAAG,kBAAkB,CAAC,MAAM,CAAC,CAAC;YACnD,IAAM,QAAQ,GAAG,KAAK,GAAG,MAAM,GAAG,eAAe,CAAC;YAClD,IAAM,GAAG,GAAG,aAAa,CAAC,KAAK,EAAE,MAAM,EAAE,MAAM,EAAE,KAAK,CAAC,CAAC;YACxD,IAAI,CAAC,IAAI,CAAC,YAAY,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE;gBAC/B,IAAI,CAAC,YAAY,CAAC,GAAG,CAAC,GAAG,EAAE,EAAE,CAAC,CAAC;aAChC;YAED,IAAI,CAAC,IAAI,CAAC,YAAY,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE;gBAC/B,IAAI,CAAC,YAAY,CAAC,GAAG,CAAC,GAAG,EAAE,EAAE,CAAC,CAAC;aAChC;YAED,IAAI,CAAC,YAAY,IAAI,QAAQ,CAAC;YAC9B,IAAI,CAAC,eAAe,EAAE,CAAC;YAEvB,IAAI,IAAI,CAAC,YAAY,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,MAAM,GAAG,CAAC,EAAE;gBACzC,IAAI,CAAC,eAAe,EAAE,CAAC;gBAEvB,IAAM,YAAU,GAAG,IAAI,CAAC,YAAY,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,KAAK,EAAE,CAAC;gBACtD,IAAI,CAAC,YAAY,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,IAAI,CAAC,YAAU,CAAC,CAAC;gBAC5C,OAAO,YAAU,CAAC;aACnB;YAED,IAAI,CAAC,iBAAiB,IAAI,QAAQ,CAAC;YAEnC,IAAM,UAAU,GAAG,IAAI,CAAC,MAAM,CAAC,aAAa,CAAC;gBAC3C,IAAI,EAAE,CAAC,KAAK,EAAE,MAAM,CAAC;gBACrB,MAAM,QAAA;gBACN,KAAK,OAAA;aACN,CAAC,CAAC;YACH,IAAI,CAAC,YAAY,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,IAAI,CAAC,UAAU,CAAC,CAAC;YAE5C,OAAO,UAAU,CAAC;SACnB;QAED,uCAAc,GAAd,UACI,OAAmB,EAAE,KAAa,EAAE,MAAc,EAClD,MAAwB,EAAE,KAA2B;YACvD,IAAI,IAAI,CAAC,YAAY,CAAC,IAAI,KAAK,CAAC,EAAE;gBAChC,OAAO;aACR;YAED,IAAM,GAAG,GAAG,aAAa,CAAC,KAAK,EAAE,MAAM,EAAE,MAAM,EAAE,KAAK,CAAC,CAAC;YACxD,IAAI,CAAC,IAAI,CAAC,YAAY,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE;gBAC/B,IAAI,CAAC,YAAY,CAAC,GAAG,CAAC,GAAG,EAAE,EAAE,CAAC,CAAC;aAChC;YAED,IAAI,CAAC,YAAY,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;YACzC,IAAI,CAAC,eAAe,EAAE,CAAC;YACvB,IAAI,CAAC,eAAe,EAAE,CAAC;YAEvB,IAAM,WAAW,GAAG,IAAI,CAAC,YAAY,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC;YAC/C,IAAM,YAAY,GAAG,WAAW,CAAC,OAAO,CAAC,OAAO,CAAC,CAAC;YAClD,IAAI,YAAY,GAAG,CAAC,EAAE;gBACpB,MAAM,IAAI,KAAK,CACX,2DAA2D;oBAC3D,iBAAiB,CAAC,CAAC;aACxB;YACD,WAAW,CAAC,MAAM,CAAC,YAAY,EAAE,CAAC,CAAC,CAAC;YACpC,IAAM,eAAe,GAAG,kBAAkB,CAAC,MAAM,CAAC,CAAC;YACnD,IAAM,QAAQ,GAAG,KAAK,GAAG,MAAM,GAAG,eAAe,CAAC;YAClD,IAAI,CAAC,YAAY,IAAI,QAAQ,CAAC;SAC/B;QAED,2CAAkB,GAAlB;YACE,OAAO,IAAI,CAAC,eAAe,CAAC;SAC7B;QAED,2CAAkB,GAAlB;YACE,OAAO,IAAI,CAAC,eAAe,CAAC;SAC7B;QAED,gCAAO,GAAP;YACE,IAAI,CAAC,YAAY,CAAC,OAAO,CAAC,UAAC,QAAQ,EAAE,GAAG;gBACtC,QAAQ,CAAC,OAAO,CAAC,UAAA,OAAO;oBACtB,OAAO,CAAC,OAAO,EAAE,CAAC;iBACnB,CAAC,CAAC;aACJ,CAAC,CAAC;YAEH,IAAI,CAAC,YAAY,CAAC,OAAO,CAAC,UAAC,QAAQ,EAAE,GAAG;gBACtC,QAAQ,CAAC,OAAO,CAAC,UAAA,OAAO;oBACtB,OAAO,CAAC,OAAO,EAAE,CAAC;iBACnB,CAAC,CAAC;aACJ,CAAC,CAAC;YAEH,IAAI,CAAC,YAAY,GAAG,IAAI,GAAG,EAAE,CAAC;YAC9B,IAAI,CAAC,YAAY,GAAG,IAAI,GAAG,EAAE,CAAC;YAC9B,IAAI,CAAC,eAAe,GAAG,CAAC,CAAC;YACzB,IAAI,CAAC,eAAe,GAAG,CAAC,CAAC;YACzB,IAAI,CAAC,YAAY,GAAG,CAAC,CAAC;YACtB,IAAI,CAAC,iBAAiB,GAAG,CAAC,CAAC;SAC5B;6BACF;KAAA,IAAA;IAED,SAAS,aAAa,CAClB,KAAa,EAAE,MAAc,EAAE,MAAwB,EACvD,KAA2B;QAC7B,OAAU,KAAK,SAAI,MAAM,SAAI,MAAM,SAAI,KAAO,CAAC;IACjD,CAAC;IAED,SAAS,kBAAkB,CAAC,MAAwB;QAClD,IAAI,MAAM,KAAK,YAAY,EAAE;YAC3B,OAAO,EAAE,CAAC;SACX;aAAM;YACL,MAAM,IAAI,KAAK,CAAI,MAAM,uBAAoB,CAAC,CAAC;SAChD;IACH;;ICpFO,IAAM,aAAa,GACtB,UAAC,MAAiB,EAAE,eAAmC,EACtD,MAA4B,EAAE,MAA0B,EACxD,QAA6B;QAC5B,IAAM,QAAQ,aAAI,MAAM,GAAK,MAAM,CAAC,CAAC;QACrC,IAAI,QAAQ,EAAE;YACZ,QAAQ,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC;SACzB;QACD,OAAO,MAAM,CAAC,eAAe,CAAC;YAC5B,MAAM,EAAE,eAAe;YACvB,OAAO,EAAE,QAAQ,CAAC,GAAG,CAAC,UAAC,CAAC,EAAE,CAAC,IAAK,QAAC,EAAC,OAAO,EAAE,CAAC,EAAE,QAAQ,EAAE,CAAC,EAAC,IAAC,CAAC;SAC7D,CAAC,CAAC;IACL,CAAC,CAAC;IAEC,IAAM,cAAc,GACvB,UAAC,MAAiB,EAAE,OAAsB,EACzC,cAAiC,EACjC,UAA2C,EAAE,MAAkB,EAC/D,WAAmB;QAAnB,4BAAA,EAAA,mBAAmB;QAClB,IAAM,UAAU,GAAG,EAAC,KAAK,EAAE,MAAM,CAAC,KAAK,EAAE,KAAK,EAAE,MAAM,CAAC,KAAK,EAAC,CAAC;QAE9D,IAAM,MAAM,GAAGC,UAA8B,CACzC,UAAU,EAAE,UAAU,EAAE,OAAO,EAAE,WAAW,CAAC,CAAC;QAClD,IAAM,MAAM,GAAG,MAAM,CAAC,kBAAkB,CACpC,EAAC,IAAI,EAAE,MAAM,EAAE,KAAK,EAAE,OAAO,CAAC,WAAW,CAAC,IAAI,EAAC,CAAC,CAAC;QACrD,IAAM,QAAQ,GAAG,MAAM,CAAC,qBAAqB,CAAC;YAC5C,MAAM,EAAE,cAAc;YACtB,OAAO,EAAE,EAAC,MAAM,QAAA,EAAE,UAAU,EAAE,MAAM,EAAC;YACrC,KAAK,EAAE,OAAO,CAAC,WAAW,CAAC,IAAI;SAChC,CAAC,CAAC;QAEH,OAAO,QAAQ,CAAC;IAClB,CAAC,CAAC;aAEU,aAAa,CACzB,OAAsB,EAAE,MAA0B,EAAE,KAAoB,EACxE,gBAAqB,EAAE,yBAA8B;QADD,sBAAA,EAAA,UAAoB;QACxE,iCAAA,EAAA,qBAAqB;QAAE,0CAAA,EAAA,8BAA8B;QACvD,IAAM,GAAG,GAAG,OAAO,CAAC,SAAS,GAAG,GAAG;aAC9B,OAAO,CAAC,aAAa,GAAG,OAAO,CAAC,aAAa,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,EAAE,CAAC;YAC9D,MAAM,CAAC,GAAG,CAAC,UAAA,KAAK,IAAI,OAAA,KAAK,CAAC,MAAM,GAAA,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC;YAC7D,OAAO,CAAC,aAAa,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,gBAAgB;YAClD,yBAAyB,CAAC;QAC9B,OAAO,GAAG,CAAC;IACb;;ICpBA;IACA;IACA,IAAM,0BAA0B,GAC5BhI,MAAG,EAAE,CAAC,SAAS,CAAC,mCAAmC,CAAC,CAAC;IAEzD;IACA,IAAM,eAAe,GACjB,UAAC,MAAiB,EACjB,OAAqC;QACpC,IAAM,uCAAuC,GACzC,MAAM,CAAC,MAAM,CAAC,gCAAgC,CAAC;QACnD,IAAM,MAAM,GAAG,OAAO,CAAC,gBAAgB,CAAC,CAAC;QACzC,IAAM,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC,CAAC;QACrC,IAAI,QAAQ,CAAC,KAAK,CAAC,UAAC,CAAC,IAAK,OAAA,CAAC,IAAI,uCAAuC,GAAA,CAAC,EAAE;YACvE,OAAO,QAAQ,CAAC;SACjB;QAEDC,OAAI,CAAC,MAAM,CACP,QAAQ,CAAC,CAAC,CAAC,GAAG,uCAAuC;YACjD,MAAM,CAAC,CAAC,KAAK,SAAS,IAAI,MAAM,CAAC,CAAC,KAAK,SAAS,EACpD,cAAM,OAAA,0DAA0D,GAAA,CAAC,CAAC;QAEtE,IAAI,eAAe,GAAG,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;QACxD,IAAI,eAAe,GAAG,uCAAuC,EAAE;YAC7D,eAAe,GAAG,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;YACpDA,OAAI,CAAC,MAAM,CACP,eAAe,IAAI,uCAAuC,EAC1D,cAAM,OAAA,6CAA6C,GAAA,CAAC,CAAC;YACzD,OAAO,CAAC,eAAe,EAAE,eAAe,EAAE,eAAe,CAAC,CAAC;SAC5D;aAAM;YACL,OAAO,CAAC,eAAe,EAAE,eAAe,EAAE,CAAC,CAAC,CAAC;SAC9C;IACH,CAAC,CAAC;IAEN;QAAmCgI,iCAAa;QAoC9C,uBAAY,MAAiB,EAAE,gBAAwB;YAAxB,iCAAA,EAAA,wBAAwB;YAAvD,YACE,iBAAO,SAqCR;YA5DO,0BAAoB,GAAG,IAAI,OAAO,EAAU,CAAC;YAM7C,yBAAmB,GAAa,EAAE,CAAC;YACnC,0BAAoB,GAAiB,EAAE,CAAC;YACxC,0BAAoB,GAAiB,EAAE,CAAC;YACxC,0BAAoB,GAAkB,EAAE,CAAC;YAEzC,cAAQ,GAAG,KAAK,CAAC;YAIjB,kBAAY,GAAG,CAAC,CAAC;YACjB,oBAAc,GAAG,CAAC,CAAC;YACnB,6BAAuB,GAAG,CAAC,CAAC;YAE5B,4BAAsB,GAAiB,IAAI,CAAC;YAC5C,kCAA4B,GAAiB,IAAI,CAAC;YAIxD,IAAI,CAACC,iBAA6B,EAAE,EAAE;gBACpC,MAAM,IAAI,KAAK,CAAC,wCAAwC,CAAC,CAAC;aAC3D;YACD,KAAI,CAAC,WAAW,GAAG,EAAE,CAAC;YACtB,KAAI,CAAC,aAAa,GAAG,EAAE,CAAC;YACxB,KAAI,CAAC,MAAM,GAAG,MAAM,CAAC;YACrB,KAAI,CAAC,KAAK,GAAG,MAAM,CAAC,KAAK,CAAC;YAC1B,KAAI,CAAC,qBAAqB,GAAG,IAAI,CAAC;YAClC,KAAI,CAAC,kBAAkB,GAAG,IAAI,CAAC;YAC/B,KAAI,CAAC,gBAAgB,GAAG,gBAAgB,CAAC;YAEzC,KAAI,CAAC,aAAa,GAAG,IAAI,aAAa,CAAC,KAAI,CAAC,MAAM,CAAC,CAAC;YACpD,KAAI,CAAC,cAAc,GAAG,IAAI,cAAc,CAAC,KAAI,CAAC,MAAM,CAAC,CAAC;YACtD,KAAI,CAAC,SAAS,GAAG,IAAIC,cAAW,CAAC,KAAI,EAAEC,SAAM,EAAE,CAAC,CAAC;YACjD,IAAI,KAAI,CAAC,gBAAgB,EAAE;gBACzB,KAAI,CAAC,QAAQ,GAAG,KAAI,CAAC,MAAM,CAAC,cAAc,CAAC;oBACzC,IAAI,EAAE,WAAW;oBACjB,KAAK,EAAE,CAAC;iBACT,CAAC,CAAC;aACJ;;;YAID,IAAIpI,MAAG,EAAE,CAAC,OAAO,CAAC,yBAAyB,CAAC,EAAE;gBAC5C,KAAI,CAAC,WAAW,GAAG,QAAQ,CAAC,aAAa,CAAC,QAAQ,CAAC,CAAC;gBACpD,KAAI,CAAC,WAAW,CAAC,KAAK,GAAG,CAAC,CAAC;gBAC3B,KAAI,CAAC,WAAW,CAAC,MAAM,GAAG,CAAC,CAAC;gBAE5B,KAAI,CAAC,YAAY,GAAG,KAAI,CAAC,WAAW,CAAC,UAAU,CAAC,QAAQ,CAAC,CAAC;gBAC1D,KAAI,CAAC,YAAY,CAAC,SAAS,CAAC;oBAC1B,MAAM,QAAA;oBACN,MAAM,EAAE,YAAY;iBACrB,CAAC,CAAC;gBAEH,QAAQ,CAAC,IAAI,CAAC,WAAW,CAAC,KAAI,CAAC,WAAW,CAAC,CAAC;aAC7C;;SACF;QA/DO,kCAAU,GAAV;YACN,OAAO,aAAa,CAAC,UAAU,EAAE,CAAC;SACnC;QA+DD,sCAAc,GAAd;YACE,OAAO,EAAE,CAAC;SACX;QAED,6CAAqB,GAArB;YACE,OAAO,cAAc,CAAC,OAAO,GAAG,cAAc,CAAC,QAAQ;gBACnD,cAAc,CAAC,QAAQ,CAAC;SAC7B;QAED,0CAAkB,GAAlB;YAAA,iBAmBC;YAlBC,IAAI,CAAC,mBAAmB,CAAC,OAAO,CAAC,UAAA,CAAC;gBAChC,KAAI,CAAC,kBAAkB,CAAC,CAAC,CAAC,CAAC;gBAC3B,KAAI,CAAC,SAAS,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC;aAC1B,CAAC,CAAC;YACH,IAAI,CAAC,oBAAoB,CAAC,OAAO,CAC7B,UAAA,CAAC,IAAI,OAAA,KAAI,CAAC,aAAa,CAAC,aAAa,CAAC,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC,KAAK,CAAC,GAAA,CAAC,CAAC;YAC1E,IAAI,CAAC,oBAAoB,CAAC,OAAO,CAC7B,UAAA,CAAC,IAAI,OAAA,KAAI,CAAC,aAAa,CAAC,mBAAmB,CACvC,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC,KAAK,CAAC,GAAA,CAAC,CAAC;YACxC,IAAI,CAAC,oBAAoB,CAAC,OAAO,CAC7B,UAAA,CAAC,IAAI,OAAA,KAAI,CAAC,cAAc,CAAC,cAAc,CACnC,CAAC,CAAC,OAAO,EAAE,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC,KAAK,CAAC,GAAA,CAAC,CAAC;YAE1D,IAAI,CAAC,mBAAmB,GAAG,EAAE,CAAC;YAC9B,IAAI,CAAC,oBAAoB,GAAG,EAAE,CAAC;YAC/B,IAAI,CAAC,oBAAoB,GAAG,EAAE,CAAC;YAE/B,IAAI,CAAC,oBAAoB,GAAG,EAAE,CAAC;SAChC;;;;;;;;QASD,mCAAW,GAAX,UAAY,MAAc,EAAE,KAAa;YAAb,sBAAA,EAAA,aAAa;YACvC,IAAI,IAAI,CAAC,SAAS,CAAC,GAAG,CAAC,MAAM,CAAC,EAAE;gBAC9B,IAAM,IAAI,GAAG,IAAI,CAAC,SAAS,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC;gBACxC,IAAI,CAAC,QAAQ,EAAE,CAAC;gBAChB,IAAI,CAAC,KAAK,IAAI,IAAI,CAAC,QAAQ,GAAG,CAAC,EAAE;oBAC/B,OAAO,KAAK,CAAC;iBACd;gBAED,IAAI,IAAI,CAAC,oBAAoB,CAAC,GAAG,CAAC,MAAM,CAAC,EAAE;oBACzC,IAAI,CAAC,mBAAmB,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;oBACtC,OAAO,KAAK,CAAC;iBACd;qBAAM;oBACL,IAAI,CAAC,kBAAkB,CAAC,MAAM,CAAC,CAAC;iBACjC;gBAEM,IAAA,kEAAkB,CAA+B;gBACxD,IAAI,kBAAkB,IAAI,IAAI,EAAE;oBAC9B,IAAI,CAAC,WAAW,CAAC,kBAAkB,CAAC,IAAI,CAAC,MAAM,EAAE,IAAI,CAAC,CAAC;oBACvD,IAAI,CAAC,WAAW,CAAC,kBAAkB,CAAC,IAAI,CAAC,MAAM,EAAE,IAAI,CAAC,CAAC;iBACxD;gBAED,IAAI,CAAC,SAAS,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC;aAC/B;YACD,OAAO,IAAI,CAAC;SACb;QAED,8BAAM,GAAN;YACE,OAAO;gBACL,aAAa,EAAE,IAAI,CAAC,aAAa,CAAC,YAAY;gBAC9C,sBAAsB,EAAE,IAAI,CAAC,aAAa,CAAC,iBAAiB;gBAC5D,UAAU,EAAE,KAAK;aACE,CAAC;SACvB;QAED,wCAAgB,GAAhB;YACE,OAAO,IAAI,CAAC,aAAa,CAAC;SAC3B;QAED,yCAAiB,GAAjB;YACE,OAAO,IAAI,CAAC,cAAc,CAAC;SAC5B;QAED,qCAAa,GAAb,UACI,QAAgB,EAChB,KAAyD;YAAzD,sBAAA,EAAA,QAA6B,IAAI,CAAC,qBAAqB,EAAE;YAC3D,OAAO,IAAI,CAAC,aAAa,CAAC,aAAa,CAAC,QAAQ,EAAE,KAAK,CAAC,CAAC;SAC1D;QAED,0CAAkB,GAAlB,UAAmB,MAAc;YAC/B,IAAM,IAAI,GAAG,IAAI,CAAC,SAAS,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC;YACxC,IAAI,IAAI,IAAI,IAAI,IAAI,IAAI,CAAC,UAAU,CAAC,MAAM,IAAI,IAAI,EAAE;gBAClD,IAAI,CAAC,aAAa,CAAC,aAAa,CAC5B,IAAI,CAAC,UAAU,CAAC,MAAM,EAAE,IAAI,CAAC,UAAU,CAAC,QAAQ,EAChD,IAAI,CAAC,UAAU,CAAC,KAAK,CAAC,CAAC;gBAC3B,IAAI,CAAC,UAAU,CAAC,MAAM,GAAG,IAAI,CAAC;aAC/B;SACF;;QAGD,gCAAQ,GAAR,UAAS,MAAc;YACrB,IAAI,IAAI,CAAC,SAAS,CAAC,GAAG,CAAC,MAAM,CAAC,EAAE;gBAC9B,IAAM,UAAU,GAAG,IAAI,CAAC,SAAS,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC;gBAC9C,OAAO,UAAU,CAAC,QAAQ,CAAC;aAC5B;YACD,OAAO,CAAC,CAAC;SACV;;QAGD,8BAAM,GAAN,UAAO,MAAc;YACnB,IAAM,UAAU,GAAG,IAAI,CAAC,SAAS,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC;YAC9C,UAAU,CAAC,QAAQ,EAAE,CAAC;SACvB;;QAGD,8BAAM,GAAN,UAAO,MAAc;YACnB,IAAI,IAAI,CAAC,SAAS,CAAC,GAAG,CAAC,MAAM,CAAC,EAAE;gBAC9B,IAAM,UAAU,GAAG,IAAI,CAAC,SAAS,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC;gBAC9C,UAAU,CAAC,QAAQ,EAAE,CAAC;aACvB;SACF;QAED,6BAAK,GAAL,UAAM,MAAkC,EAAE,KAAe,EAAE,KAAe;YAExE,IAAI,KAAK,KAAK,WAAW,IAAI,MAAM,IAAI,IAAI,EAAE;gBAC3C,MAAM,IAAI,KAAK,CACX,qCAAqC;oBACrC,oCAAoC,CAAC,CAAC;aAC3C;YAED,IAAM,MAAM,GAAG,EAAC,EAAE,EAAE,IAAI,CAAC,UAAU,EAAE,EAAC,CAAC;YACvC,IAAM,QAAQ,GACVC,OAAI,CAAC,aAAa,CAAC,KAAK,CAAC,GAAGoI,kBAA8B,CAAC,KAAK,CAAC,CAAC;YAEtE,IAAI,CAAC,SAAS,CAAC,GAAG,CAAC,MAAM,EAAE;gBACzB,KAAK,OAAA;gBACL,MAAM,QAAA;gBACN,UAAU,EAAE,EAAC,QAAQ,UAAA,EAAE,KAAK,EAAE,IAAI,CAAC,qBAAqB,EAAE,EAAC;gBAC3D,QAAQ,EAAE,CAAC;aACZ,CAAC,CAAC;YACH,OAAO,MAAM,CAAC;SACf;QAED,4BAAI,GAAJ,UACI,MAAc,EAAE,MAAkC,EAAE,KAAe,EACnE,KAAe,EAAE,QAAgB;YACnC,IAAI,KAAK,KAAK,WAAW,EAAE;gBACzB,MAAM,IAAI,KAAK,CACX,qCAAqC;oBACrC,oCAAoC,CAAC,CAAC;aAC3C;YACD,IAAM,QAAQ,GACVpI,OAAI,CAAC,aAAa,CAAC,KAAK,CAAC,GAAGoI,kBAA8B,CAAC,KAAK,CAAC,CAAC;YAEtE,IAAI,CAAC,SAAS,CAAC,GAAG,CAAC,MAAM,EAAE;gBACzB,KAAK,OAAA;gBACL,MAAM,QAAA;gBACN,UAAU,EAAE,EAAC,QAAQ,UAAA,EAAE,KAAK,EAAE,IAAI,CAAC,qBAAqB,EAAE,EAAC;gBAC3D,QAAQ,UAAA;aACT,CAAC,CAAC;SACJ;QAED,mCAAW,GAAX;YACE,IAAI,CAAC,sBAAsB,EAAE,CAAC;YAC9B,IAAI,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,qBAAqB,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC;YACzD,IAAI,CAAC,qBAAqB,GAAG,IAAI,CAAC;YAClC,IAAI,CAAC,uBAAuB,GAAG,CAAC,CAAC;YAEjC,IAAI,CAAC,oBAAoB,GAAG,IAAI,OAAO,EAAU,CAAC;YAElD,IAAI,CAAC,kBAAkB,EAAE,CAAC;SAC3B;QAED,iCAAS,GAAT,UAAU,MAAc;YACtB,IAAI,CAAC,WAAW,CAAC,MAAM,CAAC,CAAC;YACzB,OAAO,IAAI,CAAC,SAAS,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC,UAAU,CAAC,MAAM,CAAC;SACrD;QAED,iDAAyB,GAAzB;YACE,IAAI,CAAC,IAAI,CAAC,qBAAqB,EAAE;gBAC/B,IAAI,CAAC,qBAAqB,GAAG,IAAI,CAAC,MAAM,CAAC,oBAAoB,EAAE,CAAC;aACjE;SACF;QAED,8CAAsB,GAAtB;YACE,IAAI,IAAI,CAAC,kBAAkB,EAAE;gBAC3B,IAAI,CAAC,kBAAkB,CAAC,GAAG,EAAE,CAAC;gBAC9B,IAAI,CAAC,kBAAkB,GAAG,IAAI,CAAC;aAChC;SACF;QAED,sCAAc,GAAd;YACE,IAAI,CAAC,IAAI,CAAC,kBAAkB,EAAE;gBAC5B,IAAI,CAAC,kBAAkB,GAAG,IAAI,CAAC,qBAAqB,CAAC,gBAAgB,EAAE,CAAC;aACzE;YACD,OAAO,IAAI,CAAC,kBAAkB,CAAC;SAChC;QAEa,qCAAa,GAAnB,UAAoB,IAAsB;;;;;;4BAEhD,IAAI,IAAI,CAAC,MAAM,IAAI,IAAI,EAAE;;gCAEvB,sBAAO,IAAI,CAAC,MAAM,EAAC;6BACpB;4BACK,OAAO,GAAG,IAAI,CAAC,aAAa,CAC9B,IAAI,CAAC,UAAU,CAAC,QAAQ,EACxB,cAAc,CAAC,QAAQ,GAAG,cAAc,CAAC,QAAQ,CAAC,CAAC;4BACvD,IAAI,CAAC,yBAAyB,EAAE,CAAC;4BACjC,IAAI,CAAC,sBAAsB,EAAE,CAAC;4BAC9B,IAAI,CAAC,qBAAqB,CAAC,kBAAkB,CACzC,IAAI,CAAC,UAAU,CAAC,MAAM,EAAE,CAAC,EAAE,OAAO,EAAE,CAAC,EAAE,IAAI,CAAC,UAAU,CAAC,QAAQ,CAAC,CAAC;4BACrE,IAAI,CAAC,WAAW,EAAE,CAAC;4BAEnB,qBAAM,OAAO,CAAC,QAAQ,CAAC,UAAU,CAAC,IAAI,CAAC,EAAA;;4BAAvC,SAAuC,CAAC;4BAClC,MAAM,GAAG,OAAO,CAAC,cAAc,EAAE,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC;4BAEjD,OAAO,CAAC,KAAK,EAAE,CAAC;4BAChB,IAAI,OAAO,IAAI,IAAI,EAAE;gCACnB,IAAI,CAAC,aAAa,CAAC,aAAa,CAC5B,OAAO,EAAE,IAAI,CAAC,UAAU,CAAC,QAAQ,EACjC,cAAc,CAAC,QAAQ,GAAG,cAAc,CAAC,QAAQ,CAAC,CAAC;6BACxD;;;4BAID,IAAIrI,MAAG,EAAE,CAAC,OAAO,CAAC,yBAAyB,CAAC,EAAE;gCAC5CC,OAAI,CAAC,MAAM,CACP,IAAI,CAAC,YAAY,KAAK,SAAS,EAC/B,cAAM,OAAA,wCAAwC,GAAA,CAAC,CAAC;gCACpD,IAAI,CAAC,YAAY,CAAC,iBAAiB,EAAE,CAAC;6BACvC;4BAED,sBAAO,MAAoC,EAAC;;;;SAC7C;QAEO,4CAAoB,GAApB,UAAqB,MAAc,EAAE,IAA6B;YAExE,IAAM,IAAI,GAAG,IAAI,CAAC,SAAS,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC;YAExC,IAAI,CAAC,kBAAkB,CAAC,MAAM,CAAC,CAAC;YAEhC,IAAI,CAAC,MAAM,GAAG,IAAI,CAAC;YACnB,OAAO,IAAI,CAAC,MAAM,CAAC;SACpB;;;QAID,gCAAQ,GAAR,UAAS,MAAc;YACrB,IAAM,OAAO,GAAG,IAAI,CAAC,SAAS,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC;YACpC,IAAA,uBAAM,CAAY;YAEzB,IAAI,MAAM,IAAI,IAAI,EAAE;gBAClB,MAAM,IAAI,KAAK,CACX,6DAA6D,CAAC,CAAC;aACpE;YAED,OAAO,MAAM,CAAC;SACf;QAEK,4BAAI,GAAV,UAAW,MAAc;;;;;;4BACvB,IAAI,CAAC,IAAI,CAAC,SAAS,CAAC,GAAG,CAAC,MAAM,CAAC,EAAE;gCAC/B,MAAM,IAAI,KAAK,CAAC,YAAU,MAAM,yBAAsB,CAAC,CAAC;6BACzD;4BACK,IAAI,GAAG,IAAI,CAAC,SAAS,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC;4BAEjC,MAAM,GAAI,IAAI,OAAR,CAAS;4BAEtB,IAAI,MAAM,IAAI,IAAI,EAAE;;;gCAGlB,sBAAO,IAAI,CAAC,oBAAoB,CACrB,MAAM,EAAE,MAAiC,CACtB,EAAC;6BAChC;kCAIG,IAAI,CAAC,KAAK,KAAK,WAAW,CAAA,EAA1B,wBAA0B;4BACjB,qBAAM,OAAO,CAAC,GAAG,CAAC;oCAC3B,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,kBAAkB,CAAC,IAAI,CAAC,MAAM,CAAC;oCAC9C,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,kBAAkB,CAAC,IAAI,CAAC,MAAM,CAAC;iCAC/C,CAAC,EAAA;;4BAHI,EAAE,GAAG,SAGT;4BAEI,UAAU,GAAG,EAAE,CAAC,CAAC,CAAC,CAAC;4BACnB,UAAU,GAAG,EAAE,CAAC,CAAC,CAAC,CAAC;4BACzB,IAAI,GAAGC,eAAY,CAAC,sBAAsB,CACtC,UAA0B,EAAE,UAA0B,CAAC,CAAC;;gCAE/C,qBAAM,IAAI,CAAC,aAAa,CAAC,IAAI,CAAC,EAAA;;4BAArC,IAAI,GAAG,SAA8B;4BAC3C,IAAI;gCACAoI,uBAAmC,CAAC,IAAmB,EAAE,IAAI,CAAC,KAAK,CAAC,CAAC;;;4BAE3E,IAAI,CAAC,oBAAoB,CAAC,MAAM,EAAE,IAAI,CAAC,CAAC;4BACxC,sBAAO,IAAI,EAAC;;;;SACb;QAED,kCAAU,GAAV,UAA2B,CAAa;YACtC,IAAM,IAAI,GAAG,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC;YACrC,IAAI,WAAW,GAAG,IAAkB,CAAC;YACrC,IAAI,CAAC,CAAC,KAAK,KAAK,QAAQ,EAAE;gBACxB,IAAI;;oBAEF,WAAW,GAAI,IAAqB,CAAC,GAAG,CAAC,UAAA,CAAC,IAAI,OAAArI,OAAI,CAAC,YAAY,CAAC,CAAC,CAAC,GAAA,CAAC,CAAC;iBACrE;gBAAC,WAAM;oBACN,MAAM,IAAI,KAAK,CAAC,kDAAkD,CAAC,CAAC;iBACrE;aACF;YACD,OAAOU,SAAM,CAAC,CAAC,CAAC,KAAoB,EAAE,CAAC,CAAC,KAAK,EAAE,WAAW,CACvC,CAAC;SACrB;QAEK,4BAAI,GAAV,UAAW,CAAa;;;;;;4BAChB,eAAe,GAAG,IAAI,CAAC,YAAY,CAAC;4BACpC,eAAe,GAAgB,EAAE,CAAC;4BAEpC,aAAa,GAAG,KAAK,CAAC;4BAC1B,IAAI,IAAI,CAAC,kBAAkB,IAAI,IAAI,EAAE;gCACnC,IAAI,CAAC,kBAAkB,GAAG,eAAe,CAAC;gCAC1C,aAAa,GAAG,IAAI,CAAC;6BACtB;iCAAM;gCACL,IAAI,CAAC,YAAY,CAAC,IAAI,CAAC,eAAe,CAAC,CAAC;6BACzC;4BACD,IAAI,CAAC,YAAY,GAAG,eAAe,CAAC;4BAEpC,CAAC,EAAE,CAAC;4BAEE,2BAA2B,GAC7BV,OAAI,CAAC,OAAO,CAAC,IAAI,CAAC,YAAY,CAAC,GAAG,CAAC,UAAC,CAAmB,IAAK,OAAA,CAAC,CAAC,KAAK,GAAA,CAAC,CAAC;iCAChE,MAAM,CAAC,UAAA,CAAC,IAAI,OAAA,CAAC,IAAI,IAAI,GAAA,CAAC,CAAC;4BAC1B,yBAAyB,GAC3BA,OAAI,CAAC,OAAO,CAAC,IAAI,CAAC,YAAY,CAAC,GAAG,CAAC,UAAC,CAAmB,IAAK,OAAA,CAAC,CAAC,IAAI,GAAA,CAAC,CAAC;iCAC/D,MAAM,CAAC,UAAA,CAAC,IAAI,OAAA,CAAC,IAAI,IAAI,GAAA,CAAC,CAAC;4BAEhC,IAAI,CAAC,YAAY,GAAG,eAAe,CAAC;4BAEpC,IAAI,aAAa,EAAE;gCACjB,IAAI,CAAC,kBAAkB,GAAG,IAAI,CAAC;6BAChC;4BACK,GAAG,GAAqB;gCAC5B,YAAY,EAAE,IAAI,CAAC,YAAY;gCAC/B,cAAc,EAAE,IAAI,CAAC,cAAc;gCACnC,QAAQ,EAAE,IAAI;gCACd,MAAM,EAAE,IAAI;6BACb,CAAC;4BAEe,qBAAM,OAAO,CAAC,GAAG,CAAC,2BAA2B,CAAC,EAAA;;4BAAzD,QAAQ,GAAG,SAA8C;4BAC/D,GAAG,CAAC,UAAU,CAAC,GAAGA,OAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC;4BACrC,GAAG,CAAC,qBAAqB,CAAC,GAAG,cACzB,OAAA,QAAQ,CAAC,GAAG,CAAC,UAAC,CAAC,EAAE,CAAC,IAAK,QAAC,EAAC,IAAI,EAAE,yBAAyB,CAAC,CAAC,CAAC,EAAE,EAAE,EAAE,CAAC,EAAC,IAAC,CAAC;iCAChE,GAAG,CAAC,UAAA,CAAC,IAAI,OAAG,CAAC,CAAC,IAAI,UAAK,CAAC,CAAC,EAAI,GAAA,CAAC;iCAC9B,IAAI,CAAC,IAAI,CAAC,GAAA,CAAC;4BACpB,IAAI,CAAC,YAAY,GAAG,CAAC,CAAC;4BACtB,IAAI,CAAC,cAAc,GAAG,CAAC,CAAC;4BACxB,sBAAO,GAAG,EAAC;;;;SACZ;QAED,0CAAkB,GAAlB,UAAmB,GAAW,EAAE,WAAqC;YACnE,IAAI,EAAE,GAAG,IAAI,IAAI,CAAC,aAAa,CAAC,EAAE;gBAChC,IAAI,CAAC,aAAa,CAAC,GAAG,CAAC,GAAG,WAAW,EAAE,CAAC;aACzC;YACD,OAAO,IAAI,CAAC,aAAa,CAAC,GAAG,CAAC,CAAC;SAChC;QAED,sCAAc,GAAd,UACI,KAAe,EAAE,KAAe,EAChC,MAA4C;YAC9C,IAAI,MAAM,CAAC;YACX,IAAI,KAAK,KAAK,QAAQ,IAAI,MAAM,IAAI,IAAI,IAAI,MAAM,CAAC,MAAM,GAAG,CAAC;gBACzDA,OAAI,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,EAAE;gBAC5B,IAAM,aAAa,GACd,MAAyB,CAAC,GAAG,CAAC,UAAA,CAAC,IAAI,OAAAA,OAAI,CAAC,YAAY,CAAC,CAAC,CAAC,GAAA,CAAC,CAAC;gBAE9D,MAAM,GAAG,IAAI,CAAC,KAAK,CAAC,aAAa,EAAE,KAAK,EAAE,KAAK,CAAC,CAAC;aAClD;iBAAM;gBACL,MAAM,GAAG,IAAI,CAAC,KAAK,CAAC,MAAoC,EAAE,KAAK,EAAE,KAAK,CAAC,CAAC;aACzE;YACD,OAAO,EAAC,MAAM,QAAA,EAAE,KAAK,OAAA,EAAE,KAAK,OAAA,EAAC,CAAC;SAC/B;QAEO,uCAAe,GAAf,UAAgB,MAAmB;YACzC,IAAI,CAAC,MAAM,EAAE;gBACX,OAAO,IAAI,CAAC;aACb;YAED,IAAM,UAAU,GAAG,IAAI,CAAC,SAAS,CAAC,GAAG,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC;YAErD,OAAO;gBACL,MAAM,EAAE,CAAC;gBACT,IAAI,EAAE,UAAU,CAAC,UAAU,CAAC,QAAQ;gBACpC,MAAM,EAAE,UAAU,CAAC,UAAU,CAAC,MAAM;aACrC,CAAC;SACH;QAEK,oCAAY,GAAlB,UAAmB,KAAkB;;;oBACnC,IAAI,IAAI,CAAC,gBAAgB,EAAE;wBACzB,sBAAO,IAAI,CAAC,mBAAmB,CAAC,KAAK,CAAC,EAAC;qBACxC;yBAAM;wBACL,sBAAO,CAAC,EAAC;qBACV;;;SACF;QAED,mCAAW,GAAX,UAAY,MAAc;YACxB,IAAM,IAAI,GAAG,IAAI,CAAC,SAAS,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC;YAExC,IAAI,IAAI,CAAC,UAAU,CAAC,MAAM,IAAI,IAAI,EAAE;;gBAElC,OAAO;aACR;YAED,IAAI,CAAC,UAAU,CAAC,MAAM,GAAG,IAAI,CAAC,aAAa,CAAC,IAAI,CAAC,UAAU,CAAC,QAAQ,CAAC,CAAC;YACtE,IAAI,IAAI,CAAC,MAAM,EAAE;gBACf,IAAM,aAAa,GAAG,IAAI,CAAC,aAAa,CAAC,mBAAmB,CACxD,IAAI,CAAC,UAAU,CAAC,QAAQ,EACxB,cAAc,CAAC,SAAS,GAAG,cAAc,CAAC,QAAQ,CAAC,CAAC;gBACxD,IAAM,WAAW,GAAG,aAAa,CAAC,cAAc,EAAE,CAAC;gBACnD,IAAI,IAAI,CAAC,KAAK,KAAK,OAAO,IAAI,IAAI,CAAC,KAAK,KAAK,MAAM,EAAE;oBACnD,IAAI,UAAU,CAAC,WAAW,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,MAAoB,CAAC,CAAC;iBAC5D;qBAAM;oBACL,IAAI,YAAY,CAAC,WAAW,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,MAAsB,CAAC,CAAC;iBAChE;gBACD,aAAa,CAAC,KAAK,EAAE,CAAC;gBACtB,IAAI,CAAC,yBAAyB,EAAE,CAAC;gBACjC,IAAI,CAAC,sBAAsB,EAAE,CAAC;gBAC9B,IAAI,CAAC,qBAAqB,CAAC,kBAAkB,CACzC,aAAa,EAAE,CAAC,EAAE,IAAI,CAAC,UAAU,CAAC,MAAM,EAAE,CAAC,EAC3C,IAAI,CAAC,UAAU,CAAC,QAAQ,CAAC,CAAC;gBAE9B,IAAM,WAAW,GAAG;oBAClB,QAAQ,EAAE,IAAI,CAAC,UAAU,CAAC,QAAQ;oBAClC,KAAK,EAAE,cAAc,CAAC,SAAS,GAAG,cAAc,CAAC,QAAQ;oBACzD,MAAM,EAAE,aAAa;iBACtB,CAAC;gBACF,IAAI,CAAC,oBAAoB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;;;;;;;aAO7C;SACF;QAEO,oCAAY,GAAZ,UAAa,gBAAgC;YACnD,IAAI,aAAa,GAAG,CAAC,CAAC;YACtB,IAAI,SAAS,GAAG,CAAC,CAAC;YAClB,IAAM,OAAO,GAAa,EAAE,CAAC;YAC7B,gBAAgB,CAAC,OAAO,CAAC,UAAC,CAAC;gBACzB,IAAI,CAAC,CAAC,IAAI,CAAC,MAAM,KAAK,CAAC,EAAE;oBACvB,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC;iBACd;;gBAED,IAAI,aAAqB,CAAC;gBAC1B,QAAQ,CAAC,CAAC,IAAI,CAAC,MAAM;oBACnB,KAAK,CAAC;wBACJ,aAAa,GAAG,CAAC,CAAC;wBAClB,MAAM;oBACR,KAAK,CAAC;wBACJ,aAAa,GAAG,CAAC,CAAC;wBAClB,MAAM;oBACR,KAAK,CAAC;wBACJ,aAAa,GAAG,EAAE,CAAC;wBACnB,MAAM;oBACR,KAAK,CAAC;wBACJ,aAAa,GAAG,EAAE,CAAC;wBACnB,MAAM;oBACR,KAAK,CAAC;wBACJ,aAAa,GAAG,EAAE,CAAC;wBACnB,MAAM;oBACR,KAAK,CAAC;wBACJ,aAAa,GAAG,EAAE,CAAC;wBACnB,MAAM;oBACR;wBACEA,OAAI,CAAC,MAAM,CAAC,KAAK,EAAE,cAAM,OAAA,iBAAe,CAAC,CAAC,IAAI,CAAC,MAAM,YAAS,GAAA,CAAC,CAAC;iBACnE;gBAED,IAAI,SAAS,KAAK,CAAC,IAAI,SAAS,KAAK,CAAC,EAAE;oBACtC,aAAa,GAAG,EAAE,CAAC;iBACpB;gBACD,aAAa,GAAG,IAAI,CAAC,IAAI,CAAC,aAAa,GAAG,aAAa,CAAC,GAAG,aAAa,CAAC;gBACzE,SAAS,GAAG,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC;gBAC1B,OAAO,CAAC,IAAI,CAAC,aAAa,CAAC,CAAC;gBAC5B,aAAa,IAAI,CAAC,CAAC,IAAI,CAAC,MAAM,GAAG,CAAC,CAAC;aACpC,CAAC,CAAC;YAEH,IAAM,WAAW,GAAG,IAAI,WAAW,CAAC,aAAa,CAAC,CAAC;YACnD,gBAAgB,CAAC,OAAO,CAAC,UAAC,CAAC,EAAE,CAAC;gBAC5B,IAAM,MAAM,GAAG,OAAO,CAAC,CAAC,CAAC,CAAC;gBAC1B,IAAI,CAAC,CAAC,IAAI,KAAK,OAAO,EAAE;oBACtB,IAAI,UAAU,CAAC,WAAW,EAAE,MAAM,EAAE,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC;iBAChE;qBAAM,IAAI,CAAC,CAAC,IAAI,KAAK,QAAQ,EAAE;oBAC9B,IAAI,WAAW,CAAC,WAAW,EAAE,MAAM,EAAE,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC;iBACjE;qBAAM;oBACL,IAAI,YAAY,CAAC,WAAW,EAAE,MAAM,EAAE,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC;iBAClE;aACF,CAAC,CAAC;YAEH,IAAM,aAAa,GAAG,IAAI,CAAC,aAAa,CACpC,aAAa,EAAE,cAAc,CAAC,QAAQ,GAAG,cAAc,CAAC,OAAO,CAAC,CAAC;YACrE,IAAI,CAAC,KAAK,CAAC,WAAW,CAAC,aAAa,EAAE,CAAC,EAAE,WAAW,EAAE,CAAC,EAAE,aAAa,CAAC,CAAC;YAExE,IAAM,WAAW,GAAG;gBAClB,QAAQ,EAAE,aAAa;gBACvB,KAAK,EAAE,cAAc,CAAC,QAAQ,GAAG,cAAc,CAAC,OAAO;gBACvD,MAAM,EAAE,aAAa;aACtB,CAAC;YACF,IAAI,CAAC,oBAAoB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;YAE5C,OAAO,EAAC,MAAM,EAAE,CAAC,EAAE,IAAI,EAAE,aAAa,EAAE,MAAM,EAAE,aAAa,EAAC,CAAC;SAChE;;QAGO,oCAAY,GAAZ,UAAa,cAAsB;YACzC,IAAM,sBAAsB,GAA8B,EAAE,CAAC;;YAE7D,sBAAsB,CAAC,IAAI,CAAC;gBAC1B,OAAO,EAAE,CAAC;gBACV,UAAU,EAAE,cAAc,CAAC,OAAO;gBAClC,MAAM,EAAE,EAAC,IAAI,EAAE,SAAkB,EAAC;aACnC,CAAC,CAAC;;YAEH,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,cAAc,EAAE,CAAC,EAAE,EAAE;gBACvC,sBAAsB,CAAC,IAAI,CAAC;oBAC1B,OAAO,EAAE,CAAC,GAAG,CAAC;oBACd,UAAU,EAAE,cAAc,CAAC,OAAO;oBAClC,MAAM,EAAE,EAAC,IAAI,EAAE,mBAA4B,EAAC;iBAC7C,CAAC,CAAC;aACJ;YACD,sBAAsB,CAAC,IAAI,CAAC;gBAC1B,OAAO,EAAE,cAAc,GAAG,CAAC;gBAC3B,UAAU,EAAE,cAAc,CAAC,OAAO;gBAClC,MAAM,EAAE,EAAC,IAAI,EAAE,SAAkB,EAAC;aACnC,CAAC,CAAC;YACH,IAAM,eAAe,GACjB,IAAI,CAAC,MAAM,CAAC,qBAAqB,CAAC,EAAC,OAAO,EAAE,sBAAsB,EAAC,CAAC,CAAC;YACzE,IAAM,cAAc,GAChB,IAAI,CAAC,MAAM,CAAC,oBAAoB,CAAC,EAAC,gBAAgB,EAAE,CAAC,eAAe,CAAC,EAAC,CAAC,CAAC;YAC5E,OAAO,EAAC,eAAe,iBAAA,EAAE,cAAc,gBAAA,EAAC,CAAC;SAC1C;QAEO,+CAAuB,GAAvB,UAAwB,cAAsB;YACpD,IAAI,EAAE,cAAc,IAAI,IAAI,CAAC,WAAW,CAAC,EAAE;gBACzC,IAAI,CAAC,WAAW,CAAC,cAAc,CAAC,GAAG,IAAI,CAAC,YAAY,CAAC,cAAc,CAAC,CAAC;aACtE;YACD,OAAO,IAAI,CAAC,WAAW,CAAC,cAAc,CAAC,CAAC;SACzC;QAEM,wCAAgB,GAAhB,UACH,OAAqC,EAAE,MAAoB,EAC3D,WAAqB,EAAE,eAAgC,EACvD,MAAmB;YAHhB,iBAuHN;YAnHC,IAAI,CAAC,MAAM,EAAE;gBACX,MAAM,GAAG,IAAI,CAAC,cAAc,CAAC,OAAO,CAAC,WAAW,EAAE,WAAW,CAAC,CAAC;gBAC/D,IAAIA,OAAI,CAAC,aAAa,CAAC,MAAM,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE;;;oBAG1C,IAAM,OAAO,GAAG,IAAI,CAAC,SAAS,CAAC,GAAG,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC;oBAClD,OAAO,CAAC,MAAM;wBACVA,OAAI,CAAC,sBAAsB,CAAC,MAAM,CAAC,KAAkB,EAAE,CAAC,CAAC,CAAC;oBAC9D,OAAO,MAAM,CAAC;iBACf;gBACD,IAAI,CAAC,WAAW,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC;aACjC;YACD,OAAO,CAAC,QAAQ,GAAG,eAAe,CAAC,IAAI,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;;;YAIzD,IAAI,gBAAgB,GAChB,CAAC,EAAC,IAAI,EAAE,SAAS,EAAE,IAAI,EAAE,CAAC,GAAG,CAAC,EAAC,CAAC,CAAC;YACrC,IAAM,YAAY,GAAG,MAAM,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,GAAG,CAAC,UAAA,CAAC,IAAI,OAAA,CAAC,CAAC,KAAK,GAAA,CAAC,CAAC;YAC7D,IAAM,YAAY,GAAG,OAAO,CAAC;YAC7B,YAAY,CAAC,GAAG,CAAC,UAAA,CAAC;gBAChB,gBAAgB,CAAC,IAAI,CAAC,EAAC,IAAI,EAAE,YAAY,EAAE,IAAI,EAAE,CAAC,EAAC,CAAC,CAAC;aACtD,CAAC,CAAC;YACH,IAAM,OAAO,GAAGA,OAAI,CAAC,cAAc,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC;YAClD,gBAAgB,CAAC,IAAI,CAAC,EAAC,IAAI,EAAE,YAAY,EAAE,IAAI,EAAE,OAAO,EAAC,CAAC,CAAC;YAC3D,IAAI,OAAO,CAAC,IAAI,EAAE;gBAChB,IAAM,IAAI,GAAGA,OAAI,CAAC,aAAa,CAAC,OAAO,CAAC,WAAW,CAAC,CAAC;gBACrD,gBAAgB,CAAC,IAAI,CACjB,EAAC,IAAI,EAAE,YAAY,EAAE,IAAI,EAAE,CAAC,OAAO,CAAC,MAAM,GAAG,IAAI,GAAG,CAAC,GAAG,IAAI,CAAC,EAAC,CAAC,CAAC;aACrE;YACD,IAAI,eAAe,EAAE;gBACnB,gBAAgB,YAAO,gBAAgB,EAAK,eAAe,CAAC,CAAC;aAC9D;YAED,IAAM,QAAQ,GAAG,IAAI,CAAC,YAAY,CAAC,gBAAgB,CAAC,CAAC;YAErD,IAAM,UAAU,GAAG,MAAM,CAAC,GAAG,CAAC,UAAC,KAAiB,EAAE,CAAS;gBACzD,IAAI,KAAK,CAAC,KAAK,KAAK,WAAW,EAAE;oBAC/B,MAAM,IAAI,KAAK,CACX,+DAA+D;wBAC/D,8DAA8D;wBAC9D,QAAQ,CAAC,CAAC;iBACf;gBACD,KAAI,CAAC,WAAW,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC;gBAE/B,OAAO;;;oBAGL,KAAK,EAAE,KAAI,CAAC,SAAS,CAAC,GAAG,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,KAAK;oBAC7C,KAAK,EAAE,KAAK,CAAC,KAAK;oBAClB,IAAI,EAAE,OAAO,CAAC,aAAa,CAAC,CAAC,CAAC;iBAC/B,CAAC;aACH,CAAC,CAAC;YACH,IAAM,WAAW,GAAG,UAAU,CAAC,GAAG,CAAC,UAAA,CAAC,IAAI,OAAA,CAAC,CAAC,KAAK,GAAA,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC;YACtE,IAAM,aAAa,GAAG,UAAU,CAAC,GAAG,CAChC,UAAA,CAAC,IAAI,OAAAC,eAAY,CAAC,gBAAgB,CAAC,CAAC,CAAC,KAAK,EAAE,MAAM,CAAC,KAAK,CAAC,GAAA,CAAC,CAAC;YAC/D,IAAM,yBAAyB,GAC3B,UAAU,CAAC,GAAG,CAAC,UAAA,CAAC,IAAI,OAAAD,OAAI,CAAC,WAAW,CAAC,CAAC,CAAC,KAAK,EAAE,MAAM,CAAC,KAAK,CAAC,GAAA,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC;YAC3E,IAAM,gBAAgB,GAAG,aAAa,CAAC,GAAG,CAAC,UAAA,CAAC,IAAI,OAAA,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAA,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC;YACvE,IAAM,GAAG,GAAGsI,aAA4B,CACpC,OAAO,EAAE,YAAY,EAAE,WAAW,EAAE,gBAAgB,EACpD,yBAAyB,CAAC,CAAC;YAEzB,IAAA,+DACwD,EADvD,oCAAe,EAAE,kCACsC,CAAC;YAE/D,IAAM,QAAQ,GAAG,IAAI,CAAC,kBAAkB,CAAC,GAAG,EAAE;gBAC5C,OAAOC,cAA6B,CAChC,KAAI,CAAC,MAAM,EAAE,OAAO,EAAE,cAAc,EAAE,UAAU,EAAE,MAAM,CAAC,CAAC;aAC/D,CAAC,CAAC;YAEH,IAAM,iBAAiB,GAAG,IAAI,CAAC,YAAY,IAAI,IAAI,CAAC;;YAGpD,IAAM,EAAE,GAAGC,aAA4B,CACnC,IAAI,CAAC,MAAM,EAAE,eAAe,EAAE,MAAM,CAAC,GAAG,CAAC,UAAA,CAAC,IAAI,OAAA,KAAI,CAAC,eAAe,CAAC,CAAC,CAAC,GAAA,CAAC,EACtE,IAAI,CAAC,eAAe,CAAC,MAAM,CAAC,EAAE,QAAQ,CAAC,CAAC;YAE5C,IAAI,CAAC,yBAAyB,EAAE,CAAC;YACjC,IAAM,IAAI,GAAG,IAAI,CAAC,cAAc,EAAE,CAAC;YACnC,IAAI,iBAAiB,EAAE;gBACrB,IAAI,IAAI,CAAC,gBAAgB,EAAE;;oBAExB,IAAY,CAAC,cAAc,CAAC,IAAI,CAAC,QAAQ,EAAE,CAAC,CAAC,CAAC;iBAChD;aACF;YACD,IAAI,CAAC,WAAW,CAAC,QAAQ,CAAC,CAAC;YAC3B,IAAI,CAAC,YAAY,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC;YACzB,IAAI,CAAC,QAAQ,CACT,OAAO,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,OAAO,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,OAAO,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;YACnE,IAAI,iBAAiB,EAAE;gBACrB,IAAI,IAAI,CAAC,gBAAgB,EAAE;;oBAExB,IAAY,CAAC,cAAc,CAAC,IAAI,CAAC,QAAQ,EAAE,CAAC,CAAC,CAAC;iBAChD;aACF;YACD,IAAI,CAAC,uBAAuB,EAAE,CAAC;YAE/B,MAAM,CAAC,OAAO,CAAC,UAAA,KAAK;gBAClB,KAAI,CAAC,oBAAoB,CAAC,GAAG,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC;aAC7C,CAAC,CAAC;YACH,IAAI,CAAC,oBAAoB,CAAC,GAAG,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC;YAE7C,IAAIzI,MAAG,EAAE,CAAC,GAAG,CAAC,mCAAmC,CACvC,IAAI,IAAI,CAAC,uBAAuB,EAAE;gBAC1C,IAAI,CAAC,WAAW,EAAE,CAAC;aACpB;YAED,IAAI,iBAAiB,EAAE;gBACrB,IAAI,CAAC,YAAY,CAAC,IAAI,CAAC;oBACrB,IAAI,EAAE,OAAO,CAAC,WAAW,CAAC,IAAI;oBAC9B,KAAK,EAAE,IAAI,CAAC,YAAY,CAAC,IAAI,CAAC,QAAQ,CAAC;iBACxC,CAAC,CAAC;aACJ;YACD,OAAO,MAAM,CAAC;SACf;QAEO,iDAAyB,GAAzB,UAA0B,SAAkB;YAClD,IAAI,SAAS,EAAE;gBACb,IAAI,IAAI,CAAC,4BAA4B,KAAK,IAAI,EAAE;oBAC9C,IAAI,CAAC,4BAA4B;wBAC7B,IAAI,CAAC,4BAA4B,CAAC,IAAI,iBAAiB,CAAC;iBAC7D;gBACD,OAAO,IAAI,CAAC,4BAA4B,CAAC;aAC1C;YAED,IAAI,IAAI,CAAC,sBAAsB,KAAK,IAAI,EAAE;gBACxC,IAAI,CAAC,sBAAsB;oBACvB,IAAI,CAAC,4BAA4B,CAAC,KAAK,iBAAiB,CAAC;aAC9D;YACD,OAAO,IAAI,CAAC,sBAAsB,CAAC;SACpC;QAEO,oDAA4B,GAA5B,UAA6B,SAAkB;YACrD,IAAM,sBAAsB,GAA8B,EAAE,CAAC;;YAE7D,sBAAsB,CAAC,IAAI,CAAC;gBAC1B,OAAO,EAAE,CAAC;gBACV,UAAU,EAAE,cAAc,CAAC,OAAO;gBAClC,MAAM,EAAE,EAAC,IAAI,EAAE,SAAkB,EAAC;aACnC,CAAC,CAAC;;YAEH,IAAI,SAAS,EAAE;gBACb,sBAAsB,CAAC,IAAI,CAAC;oBAC1B,OAAO,EAAE,CAAC;oBACV,UAAU,EAAE,cAAc,CAAC,OAAO;oBAClC,eAAe,EAAE,EAAE;iBACpB,CAAC,CAAC;aACJ;iBAAM;gBACL,sBAAsB,CAAC,IAAI,CACvB,EAAC,OAAO,EAAE,CAAC,EAAE,UAAU,EAAE,cAAc,CAAC,OAAO,EAAE,OAAO,EAAE,EAAE,EAAC,CAAC,CAAC;aACpE;;YAED,sBAAsB,CAAC,IAAI,CACvB,EAAC,OAAO,EAAE,CAAC,EAAE,UAAU,EAAE,cAAc,CAAC,OAAO,EAAE,MAAM,EAAE,EAAE,EAAC,CAAC,CAAC;YAClE,IAAM,wBAAwB,GAC1B,IAAI,CAAC,MAAM,CAAC,qBAAqB,CAAC,EAAC,OAAO,EAAE,sBAAsB,EAAC,CAAC,CAAC;YACzE,IAAM,uBAAuB,GAAG,IAAI,CAAC,MAAM,CAAC,oBAAoB,CAC5D,EAAC,gBAAgB,EAAE,CAAC,wBAAwB,CAAC,EAAC,CAAC,CAAC;YACpD,OAAO;gBACL,eAAe,EAAE,wBAAwB;gBACzC,cAAc,EAAE,uBAAuB;aACxC,CAAC;SACH;QAEO,kDAA0B,GAA1B,UACJ,aAA4B,EAAE,QAAkB;YAClD,IAAM,YAAY,GAAG,eAAe,CAAC,QAAQ;gBACzC,eAAe,CAAC,iBAAiB,GAAG,eAAe,CAAC,eAAe,CAAC;YACxE,IAAM,aAAa,GAAG,YAAgC,CAAC;YACvD,IAAM,OAAO,GAAG,IAAI,CAAC,cAAc,CAAC,cAAc,CAC9C,QAAQ,CAAC,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,CAAC,EAAE,aAAa,EAAE,YAAY,CAAC,CAAC;YAC3D,IAAM,gBAAgB,GAAG,OAAO,CAAC,UAAU,EAAE,CAAC;YAE9C,IAAI,CAAC,KAAK,CAAC,0BAA0B,CACjC,EAAC,MAAM,EAAE,aAAa,EAAC,EAAE,EAAC,OAAO,SAAA,EAAC,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;YAEpE,IAAM,WAAW,GAAG;gBAClB,KAAK,EAAE,QAAQ,CAAC,CAAC,CAAC;gBAClB,MAAM,EAAE,QAAQ,CAAC,CAAC,CAAC;gBACnB,MAAM,EAAE,aAAa;gBACrB,KAAK,EAAE,YAAY;gBACnB,OAAO,SAAA;aACR,CAAC;YACF,IAAI,CAAC,oBAAoB,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;YAC5C,OAAO,gBAAgB,CAAC;SACzB;QAED,4CAAoB,GAApB,UACI,OAAqC,EAAE,QAAkB,EACzD,eAA+B,EAAE,SAAkB,EACnD,aAA6C;YAHjD,iBA+FC;YA3FC,OAAO,CAAC,QAAQ,GAAG,eAAe,CAAC,IAAI,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;YAEzD,IAAM,MAAM,GAAG,IAAI,CAAC,cAAc,CAAC,QAAQ,EAAE,OAAO,CAAC,CAAC;YACtD,IAAIC,OAAI,CAAC,aAAa,CAAC,MAAM,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE;;;gBAG1C,IAAM,OAAO,GAAG,IAAI,CAAC,SAAS,CAAC,GAAG,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC;gBAClD,OAAO,CAAC,MAAM;oBACVA,OAAI,CAAC,sBAAsB,CAAC,MAAM,CAAC,KAAkB,EAAE,CAAC,CAAC,CAAC;gBAC9D,OAAO,MAAM,CAAC;aACf;YACD,IAAI,CAAC,WAAW,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC;YAChC,IAAM,GAAG,GAAGsI,aAA4B,CAAC,OAAO,EAAE,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC;YAElE,IAAM,MAAM,GAAG,IAAI,CAAC,yBAAyB,CAAC,SAAS,CAAC,CAAC;YAEzD,IAAM,QAAQ,GAAG,IAAI,CAAC,kBAAkB,CAAC,GAAG,EAAE;gBAC5C,OAAOC,cAA6B,CAChC,KAAI,CAAC,MAAM,EAAE,OAAO,EAAE,MAAM,CAAC,cAAc,EAAE,EAAE,EAAE,MAAM,EAAE,IAAI,CAAC,CAAC;aACpE,CAAC,CAAC;YAEH,IAAI,gBAAmD,CAAC;YACxD,IAAI,SAAS,EAAE;gBACb,IAAM,yBAAyB,GAAG;oBAChC,MAAM,EAAE,aAAiC;iBAC1C,CAAC;gBACF,gBAAgB;oBACZ,IAAI,CAAC,MAAM,CAAC,qBAAqB,CAAC,yBAAyB,CAAC,CAAC;aAClE;iBAAM;gBACL,gBAAgB,GAAG,IAAI,CAAC,0BAA0B,CAC9C,aAA8B,EAAE,MAAM,CAAC,KAAK,CAAC,CAAC;aACnD;YAED,IAAM,OAAO,GAAG,IAAI,CAAC,eAAe,CAAC,MAAM,CAAC,CAAC;YAC7C,IAAM,QAAQ,GAAG,IAAI,CAAC,YAAY,CAAC,eAAe,CAAC,CAAC;YACpD,IAAM,SAAS,GAAG,IAAI,CAAC,MAAM,CAAC,eAAe,CAAC;gBAC5C,MAAM,EAAE,MAAM,CAAC,eAAe;gBAC9B,OAAO,EAAE;oBACP;wBACE,OAAO,EAAE,CAAC;wBACV,QAAQ,EAAE;;4BAER,MAAM,EAAG,OAA4B,CAAC,MAAM;yBAC7C;qBACF;oBACD;wBACE,OAAO,EAAE,CAAC;wBACV,QAAQ,EAAE,gBAAgB;qBAC3B;oBACD;wBACE,OAAO,EAAE,CAAC;wBACV,QAAQ,EAAE;;4BAER,MAAM,EAAG,QAA6B,CAAC,MAAM;yBAC9C;qBACF;iBACF;aACF,CAAC,CAAC;YACH,IAAI,CAAC,yBAAyB,EAAE,CAAC;YACjC,IAAM,IAAI,GAAG,IAAI,CAAC,cAAc,EAAE,CAAC;YACnC,IAAM,iBAAiB,GAAG,IAAI,CAAC,YAAY,IAAI,IAAI,CAAC;YACpD,IAAI,iBAAiB,EAAE;gBACrB,IAAI,IAAI,CAAC,gBAAgB,EAAE;;oBAExB,IAAY,CAAC,cAAc,CAAC,IAAI,CAAC,QAAQ,EAAE,CAAC,CAAC,CAAC;iBAChD;aACF;YACD,IAAI,CAAC,WAAW,CAAC,QAAQ,CAAC,CAAC;YAC3B,IAAI,CAAC,YAAY,CAAC,CAAC,EAAE,SAAS,CAAC,CAAC;YAChC,IAAI,CAAC,QAAQ,CACT,OAAO,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,OAAO,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,OAAO,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;YACnE,IAAI,iBAAiB,EAAE;gBACrB,IAAI,IAAI,CAAC,gBAAgB,EAAE;;oBAExB,IAAY,CAAC,cAAc,CAAC,IAAI,CAAC,QAAQ,EAAE,CAAC,CAAC,CAAC;iBAChD;aACF;YACD,IAAI,CAAC,oBAAoB,CAAC,GAAG,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC;YAC7C,IAAI,CAAC,uBAAuB,EAAE,CAAC;YAC/B,IAAIxI,MAAG,EAAE,CAAC,GAAG,CAAC,mCAAmC,CACvC,IAAI,IAAI,CAAC,uBAAuB,EAAE;gBAC1C,IAAI,CAAC,WAAW,EAAE,CAAC;aACpB;YAED,IAAI,iBAAiB,EAAE;gBACrB,IAAI,CAAC,YAAY,CAAC,IAAI,CAAC;oBACrB,IAAI,EAAE,OAAO,CAAC,WAAW,CAAC,IAAI;oBAC9B,KAAK,EAAE,IAAI,CAAC,YAAY,CAAC,IAAI,CAAC,QAAQ,CAAC;iBACxC,CAAC,CAAC;aACJ;YACD,OAAO,MAAM,CAAC;SACf;QAEK,2CAAmB,GAAzB,UAA0B,QAAqB;;;;;;4BACvC,WAAW,GAAG,IAAI,CAAC,aAAa,CAClC,EAAE,EAAE,cAAc,CAAC,QAAQ,GAAG,cAAc,CAAC,aAAa,CAAC,CAAC;4BAC1D,GAAG,GAAG,IAAI,CAAC,aAAa,CAC1B,EAAE,EAAE,cAAc,CAAC,QAAQ,GAAG,cAAc,CAAC,QAAQ,CAAC,CAAC;4BAE3D,IAAI,CAAC,yBAAyB,EAAE,CAAC;4BACjC,IAAI,CAAC,sBAAsB,EAAE,CAAC;4BAC9B,IAAI,CAAC,qBAAqB,CAAC,eAAe,CAAC,QAAQ,EAAE,CAAC,EAAE,CAAC,EAAE,WAAW,EAAE,CAAC,CAAC,CAAC;4BAC3E,IAAI,CAAC,qBAAqB,CAAC,kBAAkB,CAAC,WAAW,EAAE,CAAC,EAAE,GAAG,EAAE,CAAC,EAAE,EAAE,CAAC,CAAC;4BAC1E,IAAI,CAAC,WAAW,EAAE,CAAC;4BACnB,qBAAM,GAAG,CAAC,QAAQ,CAAC,UAAU,CAAC,IAAI,CAAC,EAAA;;4BAAnC,SAAmC,CAAC;4BAC9B,QAAQ,GAAG,IAAI,cAAc,CAAC,GAAG,CAAC,cAAc,EAAE,CAAC,CAAC;4BACpD,gBAAgB,GAAG,MAAM,EAAE,QAAQ,CAAC,CAAC,CAAC,GAAG,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC;4BAC7D,GAAG,CAAC,KAAK,EAAE,CAAC;4BACZ,IAAI,CAAC,aAAa,CAAC,aAAa,CAC5B,GAAG,EAAE,EAAE,EAAE,cAAc,CAAC,QAAQ,GAAG,cAAc,CAAC,QAAQ,CAAC,CAAC;4BAChE,IAAI,CAAC,aAAa,CAAC,aAAa,CAC5B,WAAW,EAAE,EAAE,EACf,cAAc,CAAC,QAAQ,GAAG,cAAc,CAAC,aAAa,CAAC,CAAC;;4BAE5D,sBAAO,gBAAgB,GAAG,OAAO,EAAC;;;;SACnC;QAED,0CAAkB,GAAlB,UACI,MAAoB,EACpB,aAA0C;YAF9C,iBAQC;YANG,8BAAA,EAAA,0CAA0C;YAC5C,OAAOA,MAAG,EAAE,CAAC,OAAO,CAAC,oBAAoB,CAAC;gBACtC,MAAM,CAAC,KAAK,CACR,UAAA,KAAK,IACD,OAAA,KAAI,CAAC,SAAS,CAAC,GAAG,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,UAAU,CAAC,MAAM,IAAI,IAAI;oBAC1DC,OAAI,CAAC,aAAa,CAAC,KAAK,CAAC,KAAK,CAAC,GAAG,aAAa,GAAA,CAAC,CAAC;SAC9D;QAED,kCAAU,GAAV;YACE,OAAO,IAAI,CAAC,SAAS,CAAC,UAAU,EAAE,GAAG,IAAI,CAAC,mBAAmB,CAAC,MAAM,CAAC;SACtE;QAED,+BAAO,GAAP;YACE,IAAI,IAAI,CAAC,QAAQ,EAAE;gBACjB,OAAO;aACR;YACD,IAAI,CAAC,aAAa,CAAC,OAAO,EAAE,CAAC;YAC7B,IAAI,CAAC,cAAc,CAAC,OAAO,EAAE,CAAC;YAC9B,IAAI,CAAC,QAAQ,GAAG,IAAI,CAAC;SACtB;;KAx7BH,CAAmCyI,gBAAa;IAU/B,wBAAU,GAAG,CAAC;;ICvH/B;;;;;;;;;;;;;;;;;;;;;;;ICiBA;IASA,IAAI,iBAAiB,EAAE,EAAE;QACvBC,kBAAe,CAAC,QAAQ,EAAE;;;;;;;wBAGxB3I,MAAG,EAAE,CAAC,GAAG,CAAC,8BAA8B,EAAE,KAAK,CAAC,CAAC;wBAE3C,aAAa,GAA6B;4BAC9C,eAAe,EAAEA,MAAG,EAAE,CAAC,GAAG,CAAC,0BAA0B,CAAC;gCAClD,WAAW;gCACX,kBAAkB;yBACvB,CAAC;wBAEc,qBAAM,SAAS,CAAC,GAAG,CAAC,cAAc,CAAC,aAAa,CAAC,EAAA;;wBAA3D,OAAO,GAAG,SAAiD;wBAC3D,aAAa,GAAG,OAAO,CAAC,MAAM,CAAC;wBAC/B,gBAAgB,GAAwB,EAAE,CAAC;wBAC3C,gBAAgB,GAAG,OAAO,CAAC,QAAQ,CAAC,GAAG,CAAC,iBAAiB,CAAC,CAAC;wBACjE,gBAAgB,CAAC,cAAc,GAAG;4BAChC,gCAAgC,EAC5B,aAAa,CAAC,8BAA8B;4BAChD,kCAAkC,EAC9B,aAAa,CAAC,gCAAgC;yBACnD,CAAC;wBAEF,IAAI,gBAAgB,EAAE;4BACpB,gBAAgB,CAAC,gBAAgB,GAAG,CAAC,iBAA0B,CAAC,CAAC;yBAClE;6BAAM;4BACL,OAAO,CAAC,IAAI,CACR,yDAAyD;gCACzD,iCAAiC;gCACjC,+DAA+D;gCAC/D,+DAA+D;gCAC/D,iEAAiE;gCACjE,yDAAyD,CAAC,CAAC;yBAChE;wBACyB,qBAAM,OAAO,CAAC,aAAa,CAAC,gBAAgB,CAAC,EAAA;;wBAAjE,MAAM,GAAc,SAA6C;wBACvE,sBAAO,IAAI,aAAa,CAAC,MAAM,EAAE,gBAAgB,CAAC,EAAC;;;aACpD,EAAE,CAAC,cAAc,CAAC;;;;;;;;;;;"} \ No newline at end of file