diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index ed440a0c5cc..f9b7f12a289 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -24,3 +24,4 @@
/py @chipkent @jmao-denver @rcaudy
/R @chipkent @alexpeters1208 @rcaudy
*.proto @devinrsmith @nbauernfeind @niloc132 @rcaudy
+*.gwt.xml @niloc132 @rcaudy @nbauernfeind
diff --git a/Base/src/main/resources/io/deephaven/base/Base.gwt.xml b/Base/src/main/resources/io/deephaven/base/Base.gwt.xml
new file mode 100644
index 00000000000..6cf87067eae
--- /dev/null
+++ b/Base/src/main/resources/io/deephaven/base/Base.gwt.xml
@@ -0,0 +1,4 @@
+
+
+
+
diff --git a/IO/src/main/resources/io/deephaven/io/IO.gwt.xml b/IO/src/main/resources/io/deephaven/io/IO.gwt.xml
new file mode 100644
index 00000000000..51000b6cea3
--- /dev/null
+++ b/IO/src/main/resources/io/deephaven/io/IO.gwt.xml
@@ -0,0 +1,3 @@
+
+
+
diff --git a/Util/src/main/java/io/deephaven/util/MultiException.java b/Util/src/main/java/io/deephaven/util/MultiException.java
index f2dcc13a90c..e5ce94740c8 100644
--- a/Util/src/main/java/io/deephaven/util/MultiException.java
+++ b/Util/src/main/java/io/deephaven/util/MultiException.java
@@ -105,7 +105,7 @@ public void printStackTrace(PrintStream s) {
@Override
public String getMessage() {
StringBuilder sb = new StringBuilder();
- sb.append(super.getMessage()).append(": \n");
+ sb.append(super.getMessage()).append(":\n");
for (int i = 0; i < causes.length; i++) {
sb.append("Cause ").append(i).append(": ");
sb.append(causes[i].toString());
diff --git a/Util/src/main/resources/io/deephaven/Util.gwt.xml b/Util/src/main/resources/io/deephaven/Util.gwt.xml
new file mode 100644
index 00000000000..0a6e96c6463
--- /dev/null
+++ b/Util/src/main/resources/io/deephaven/Util.gwt.xml
@@ -0,0 +1,10 @@
+
+
+
+
+
+
+
+
+
+
diff --git a/buildSrc/src/main/groovy/GwtTools.groovy b/buildSrc/src/main/groovy/GwtTools.groovy
index 23b5711d005..148eacfa0d4 100644
--- a/buildSrc/src/main/groovy/GwtTools.groovy
+++ b/buildSrc/src/main/groovy/GwtTools.groovy
@@ -63,6 +63,7 @@ class GwtTools {
generateJsInteropExports = true
// TODO move this down a line when we want to give clients js that is not super strict / rigged to blow
checkAssertions = true
+ setExtraArgs('-includeJsInteropExports', 'io.deephaven.*')
if (gwtDev) {
saveSource = true
extra = extras
diff --git a/engine/chunk/src/main/resources/io/deephaven/chunk/Chunk.gwt.xml b/engine/chunk/src/main/resources/io/deephaven/chunk/Chunk.gwt.xml
new file mode 100644
index 00000000000..1f53956d9ae
--- /dev/null
+++ b/engine/chunk/src/main/resources/io/deephaven/chunk/Chunk.gwt.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
diff --git a/engine/primitive/src/main/resources/io/deephaven/engine/primitive/function/Function.gwt.xml b/engine/primitive/src/main/resources/io/deephaven/engine/primitive/function/Function.gwt.xml
new file mode 100644
index 00000000000..c78151ef021
--- /dev/null
+++ b/engine/primitive/src/main/resources/io/deephaven/engine/primitive/function/Function.gwt.xml
@@ -0,0 +1,3 @@
+
+
+
diff --git a/extensions/barrage/src/main/resources/io/deephaven/extensions/barrage/Barrage.gwt.xml b/extensions/barrage/src/main/resources/io/deephaven/extensions/barrage/Barrage.gwt.xml
new file mode 100644
index 00000000000..a29af5b6ca8
--- /dev/null
+++ b/extensions/barrage/src/main/resources/io/deephaven/extensions/barrage/Barrage.gwt.xml
@@ -0,0 +1,14 @@
+
+
+
+
+
+
+
+
diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml
index b4fb36cbd85..e02cdb202c5 100644
--- a/gradle/libs.versions.toml
+++ b/gradle/libs.versions.toml
@@ -76,7 +76,8 @@ tdunning = "3.2"
trove = "3.0.3"
undercouch = "2.15.1"
univocity = "2.6.0"
-vertispan-nio = "1.0-alpha-1"
+vertispan-nio = "1.0-alpha-2"
+vertispan-flatbuffers-gwt = "1.12.0-1"
vertispan-ts-defs = "1.0.0-RC4"
# test versions
@@ -287,6 +288,8 @@ univocity-parsers = { module = "com.univocity:univocity-parsers", version.ref =
vertispan-nio-gwt = { module = "com.vertispan.nio:gwt-nio", version.ref = "vertispan-nio" }
+vertispan-flatbuffers-gwt = { module = "com.vertispan.flatbuffers:flatbuffers-gwt", version.ref = "vertispan-flatbuffers-gwt" }
+
vertispan-ts-defs-annotations = { module = "com.vertispan.tsdefs:jsinterop-ts-defs-annotations", version.ref = "vertispan-ts-defs" }
vertispan-ts-defs-doclet = { module = "com.vertispan.tsdefs:jsinterop-ts-defs-doclet", version.ref = "vertispan-ts-defs" }
diff --git a/props/configs/src/main/resources/dh-defaults.prop b/props/configs/src/main/resources/dh-defaults.prop
index 877a9f8d056..d3e0c6d2ede 100644
--- a/props/configs/src/main/resources/dh-defaults.prop
+++ b/props/configs/src/main/resources/dh-defaults.prop
@@ -57,12 +57,15 @@ web.storage.notebook.directory=/notebooks
web.webgl=true
web.webgl.editable=true
+# Default to not flattening web viewports, but allow it as an option
+web.flattenViewports=false
+
# List of configuration properties to provide to unauthenticated clients, so that they can decide how best to prove their
# identity to the server.
authentication.client.configuration.list=AuthHandlers
# List of configuration properties to provide to authenticated clients, so they can interact with the server.
-client.configuration.list=java.version,deephaven.version,barrage.version,groovy.version,python.version,http.session.durationMs,file.separator,web.storage.layout.directory,web.storage.notebook.directory,web.webgl,web.webgl.editable
+client.configuration.list=java.version,deephaven.version,barrage.version,groovy.version,python.version,http.session.durationMs,file.separator,web.storage.layout.directory,web.storage.notebook.directory,web.webgl,web.webgl.editable,web.flattenViewports
# Version list to add to the configuration property list. Each `=`-delimited pair denotes a short name for a versioned
# jar, and a class that is found in that jar. Any such keys will be made available to the client.configuration.list
diff --git a/proto/raw-js-openapi/flight_format/Message.fbs b/proto/raw-js-openapi/flight_format/Message.fbs
deleted file mode 100644
index b93c9e991e9..00000000000
--- a/proto/raw-js-openapi/flight_format/Message.fbs
+++ /dev/null
@@ -1,138 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-include "Schema.fbs";
-
-namespace org.apache.arrow.flatbuf;
-
-/// ----------------------------------------------------------------------
-/// Data structures for describing a table row batch (a collection of
-/// equal-length Arrow arrays)
-
-/// Metadata about a field at some level of a nested type tree (but not
-/// its children).
-///
-/// For example, a List with values `[[1, 2, 3], null, [4], [5, 6], null]`
-/// would have {length: 5, null_count: 2} for its List node, and {length: 6,
-/// null_count: 0} for its Int16 node, as separate FieldNode structs
-struct FieldNode {
- /// The number of value slots in the Arrow array at this level of a nested
- /// tree
- length: long;
-
- /// The number of observed nulls. Fields with null_count == 0 may choose not
- /// to write their physical validity bitmap out as a materialized buffer,
- /// instead setting the length of the bitmap buffer to 0.
- null_count: long;
-}
-
-enum CompressionType:byte {
- // LZ4 frame format, for portability, as provided by lz4frame.h or wrappers
- // thereof. Not to be confused with "raw" (also called "block") format
- // provided by lz4.h
- LZ4_FRAME,
-
- // Zstandard
- ZSTD
-}
-
-/// Provided for forward compatibility in case we need to support different
-/// strategies for compressing the IPC message body (like whole-body
-/// compression rather than buffer-level) in the future
-enum BodyCompressionMethod:byte {
- /// Each constituent buffer is first compressed with the indicated
- /// compressor, and then written with the uncompressed length in the first 8
- /// bytes as a 64-bit little-endian signed integer followed by the compressed
- /// buffer bytes (and then padding as required by the protocol). The
- /// uncompressed length may be set to -1 to indicate that the data that
- /// follows is not compressed, which can be useful for cases where
- /// compression does not yield appreciable savings.
- BUFFER
-}
-
-/// Optional compression for the memory buffers constituting IPC message
-/// bodies. Intended for use with RecordBatch but could be used for other
-/// message types
-table BodyCompression {
- /// Compressor library
- codec: CompressionType = LZ4_FRAME;
-
- /// Indicates the way the record batch body was compressed
- method: BodyCompressionMethod = BUFFER;
-}
-
-/// A data header describing the shared memory layout of a "record" or "row"
-/// batch. Some systems call this a "row batch" internally and others a "record
-/// batch".
-table RecordBatch {
- /// number of records / rows. The arrays in the batch should all have this
- /// length
- length: long;
-
- /// Nodes correspond to the pre-ordered flattened logical schema
- nodes: [FieldNode];
-
- /// Buffers correspond to the pre-ordered flattened buffer tree
- ///
- /// The number of buffers appended to this list depends on the schema. For
- /// example, most primitive arrays will have 2 buffers, 1 for the validity
- /// bitmap and 1 for the values. For struct arrays, there will only be a
- /// single buffer for the validity (nulls) bitmap
- buffers: [Buffer];
-
- /// Optional compression of the message body
- compression: BodyCompression;
-}
-
-/// For sending dictionary encoding information. Any Field can be
-/// dictionary-encoded, but in this case none of its children may be
-/// dictionary-encoded.
-/// There is one vector / column per dictionary, but that vector / column
-/// may be spread across multiple dictionary batches by using the isDelta
-/// flag
-
-table DictionaryBatch {
- id: long;
- data: RecordBatch;
-
- /// If isDelta is true the values in the dictionary are to be appended to a
- /// dictionary with the indicated id. If isDelta is false this dictionary
- /// should replace the existing dictionary.
- isDelta: bool = false;
-}
-
-/// ----------------------------------------------------------------------
-/// The root Message type
-
-/// This union enables us to easily send different message types without
-/// redundant storage, and in the future we can easily add new message types.
-///
-/// Arrow implementations do not need to implement all of the message types,
-/// which may include experimental metadata types. For maximum compatibility,
-/// it is best to send data using RecordBatch
-union MessageHeader {
- Schema, DictionaryBatch, RecordBatch
-}
-
-table Message {
- version: org.apache.arrow.flatbuf.MetadataVersion;
- header: MessageHeader;
- bodyLength: long;
- custom_metadata: [ KeyValue ];
-}
-
-root_type Message;
diff --git a/proto/raw-js-openapi/flight_format/README.md b/proto/raw-js-openapi/flight_format/README.md
deleted file mode 100644
index 52d45ad2b7e..00000000000
--- a/proto/raw-js-openapi/flight_format/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-Currently we are generating flatbuffer files manually. See [deephaven-core/#1052](https://github.com/deephaven/deephaven-core/issues/1052) to track the work to automate this.
-
-${FLATC} --ts --no-fb-import --no-ts-reexport -o src/arrow/flight/flatbuf/ flight_format/\*.fbs
diff --git a/proto/raw-js-openapi/flight_format/Schema.fbs b/proto/raw-js-openapi/flight_format/Schema.fbs
deleted file mode 100644
index 2d447d30791..00000000000
--- a/proto/raw-js-openapi/flight_format/Schema.fbs
+++ /dev/null
@@ -1,430 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-/// Logical types, vector layouts, and schemas
-
-namespace org.apache.arrow.flatbuf;
-
-enum MetadataVersion:short {
- /// 0.1.0 (October 2016).
- V1,
-
- /// 0.2.0 (February 2017). Non-backwards compatible with V1.
- V2,
-
- /// 0.3.0 -> 0.7.1 (May - December 2017). Non-backwards compatible with V2.
- V3,
-
- /// >= 0.8.0 (December 2017). Non-backwards compatible with V3.
- V4,
-
- /// >= 1.0.0 (July 2020. Backwards compatible with V4 (V5 readers can read V4
- /// metadata and IPC messages). Implementations are recommended to provide a
- /// V4 compatibility mode with V5 format changes disabled.
- ///
- /// Incompatible changes between V4 and V5:
- /// - Union buffer layout has changed. In V5, Unions don't have a validity
- /// bitmap buffer.
- V5,
-}
-
-/// Represents Arrow Features that might not have full support
-/// within implementations. This is intended to be used in
-/// two scenarios:
-/// 1. A mechanism for readers of Arrow Streams
-/// and files to understand that the stream or file makes
-/// use of a feature that isn't supported or unknown to
-/// the implementation (and therefore can meet the Arrow
-/// forward compatibility guarantees).
-/// 2. A means of negotiating between a client and server
-/// what features a stream is allowed to use. The enums
-/// values here are intented to represent higher level
-/// features, additional details maybe negotiated
-/// with key-value pairs specific to the protocol.
-///
-/// Enums added to this list should be assigned power-of-two values
-/// to facilitate exchanging and comparing bitmaps for supported
-/// features.
-enum Feature : long {
- /// Needed to make flatbuffers happy.
- UNUSED = 0,
- /// The stream makes use of multiple full dictionaries with the
- /// same ID and assumes clients implement dictionary replacement
- /// correctly.
- DICTIONARY_REPLACEMENT = 1,
- /// The stream makes use of compressed bodies as described
- /// in Message.fbs.
- COMPRESSED_BODY = 2
-}
-
-/// These are stored in the flatbuffer in the Type union below
-
-table Null {
-}
-
-/// A Struct_ in the flatbuffer metadata is the same as an Arrow Struct
-/// (according to the physical memory layout). We used Struct_ here as
-/// Struct is a reserved word in Flatbuffers
-table Struct_ {
-}
-
-table List {
-}
-
-/// Same as List, but with 64-bit offsets, allowing to represent
-/// extremely large data values.
-table LargeList {
-}
-
-table FixedSizeList {
- /// Number of list items per value
- listSize: int;
-}
-
-/// A Map is a logical nested type that is represented as
-///
-/// List>
-///
-/// In this layout, the keys and values are each respectively contiguous. We do
-/// not constrain the key and value types, so the application is responsible
-/// for ensuring that the keys are hashable and unique. Whether the keys are sorted
-/// may be set in the metadata for this field.
-///
-/// In a field with Map type, the field has a child Struct field, which then
-/// has two children: key type and the second the value type. The names of the
-/// child fields may be respectively "entries", "key", and "value", but this is
-/// not enforced.
-///
-/// Map
-/// ```text
-/// - child[0] entries: Struct
-/// - child[0] key: K
-/// - child[1] value: V
-/// ```
-/// Neither the "entries" field nor the "key" field may be nullable.
-///
-/// The metadata is structured so that Arrow systems without special handling
-/// for Map can make Map an alias for List. The "layout" attribute for the Map
-/// field must have the same contents as a List.
-table Map {
- /// Set to true if the keys within each value are sorted
- keysSorted: bool;
-}
-
-enum UnionMode:short { Sparse, Dense }
-
-/// A union is a complex type with children in Field
-/// By default ids in the type vector refer to the offsets in the children
-/// optionally typeIds provides an indirection between the child offset and the type id
-/// for each child `typeIds[offset]` is the id used in the type vector
-table Union {
- mode: UnionMode;
- typeIds: [ int ]; // optional, describes typeid of each child.
-}
-
-table Int {
- bitWidth: int; // restricted to 8, 16, 32, and 64 in v1
- is_signed: bool;
-}
-
-enum Precision:short {HALF, SINGLE, DOUBLE}
-
-table FloatingPoint {
- precision: Precision;
-}
-
-/// Unicode with UTF-8 encoding
-table Utf8 {
-}
-
-/// Opaque binary data
-table Binary {
-}
-
-/// Same as Utf8, but with 64-bit offsets, allowing to represent
-/// extremely large data values.
-table LargeUtf8 {
-}
-
-/// Same as Binary, but with 64-bit offsets, allowing to represent
-/// extremely large data values.
-table LargeBinary {
-}
-
-table FixedSizeBinary {
- /// Number of bytes per value
- byteWidth: int;
-}
-
-table Bool {
-}
-
-/// Exact decimal value represented as an integer value in two's
-/// complement. Currently only 128-bit (16-byte) and 256-bit (32-byte) integers
-/// are used. The representation uses the endianness indicated
-/// in the Schema.
-table Decimal {
- /// Total number of decimal digits
- precision: int;
-
- /// Number of digits after the decimal point "."
- scale: int;
-
- /// Number of bits per value. The only accepted widths are 128 and 256.
- /// We use bitWidth for consistency with Int::bitWidth.
- bitWidth: int = 128;
-}
-
-enum DateUnit: short {
- DAY,
- MILLISECOND
-}
-
-/// Date is either a 32-bit or 64-bit type representing elapsed time since UNIX
-/// epoch (1970-01-01), stored in either of two units:
-///
-/// * Milliseconds (64 bits) indicating UNIX time elapsed since the epoch (no
-/// leap seconds), where the values are evenly divisible by 86400000
-/// * Days (32 bits) since the UNIX epoch
-table Date {
- unit: DateUnit = MILLISECOND;
-}
-
-enum TimeUnit: short { SECOND, MILLISECOND, MICROSECOND, NANOSECOND }
-
-/// Time type. The physical storage type depends on the unit
-/// - SECOND and MILLISECOND: 32 bits
-/// - MICROSECOND and NANOSECOND: 64 bits
-table Time {
- unit: TimeUnit = MILLISECOND;
- bitWidth: int = 32;
-}
-
-/// Time elapsed from the Unix epoch, 00:00:00.000 on 1 January 1970, excluding
-/// leap seconds, as a 64-bit integer. Note that UNIX time does not include
-/// leap seconds.
-///
-/// Date & time libraries often have multiple different data types for temporal
-/// data. In order to ease interoperability between different implementations the
-/// Arrow project has some recommendations for encoding these types into a Timestamp
-/// column.
-///
-/// An "instant" represents a single moment in time that has no meaningful time zone
-/// or the time zone is unknown. A column of instants can also contain values from
-/// multiple time zones. To encode an instant set the timezone string to "UTC".
-///
-/// A "zoned date-time" represents a single moment in time that has a meaningful
-/// reference time zone. To encode a zoned date-time as a Timestamp set the timezone
-/// string to the name of the timezone. There is some ambiguity between an instant
-/// and a zoned date-time with the UTC time zone. Both of these are stored the same.
-/// Typically, this distinction does not matter. If it does, then an application should
-/// use custom metadata or an extension type to distinguish between the two cases.
-///
-/// An "offset date-time" represents a single moment in time combined with a meaningful
-/// offset from UTC. To encode an offset date-time as a Timestamp set the timezone string
-/// to the numeric time zone offset string (e.g. "+03:00").
-///
-/// A "local date-time" does not represent a single moment in time. It represents a wall
-/// clock time combined with a date. Because of daylight savings time there may multiple
-/// instants that correspond to a single local date-time in any given time zone. A
-/// local date-time is often stored as a struct or a Date32/Time64 pair. However, it can
-/// also be encoded into a Timestamp column. To do so the value should be the the time
-/// elapsed from the Unix epoch so that a wall clock in UTC would display the desired time.
-/// The timezone string should be set to null or the empty string.
-table Timestamp {
- unit: TimeUnit;
-
- /// The time zone is a string indicating the name of a time zone, one of:
- ///
- /// * As used in the Olson time zone database (the "tz database" or
- /// "tzdata"), such as "America/New_York"
- /// * An absolute time zone offset of the form +XX:XX or -XX:XX, such as +07:30
- ///
- /// Whether a timezone string is present indicates different semantics about
- /// the data:
- ///
- /// * If the time zone is null or an empty string, the data is a local date-time
- /// and does not represent a single moment in time. Instead it represents a wall clock
- /// time and care should be taken to avoid interpreting it semantically as an instant.
- ///
- /// * If the time zone is set to a valid value, values can be displayed as
- /// "localized" to that time zone, even though the underlying 64-bit
- /// integers are identical to the same data stored in UTC. Converting
- /// between time zones is a metadata-only operation and does not change the
- /// underlying values
- timezone: string;
-}
-
-enum IntervalUnit: short { YEAR_MONTH, DAY_TIME}
-// A "calendar" interval which models types that don't necessarily
-// have a precise duration without the context of a base timestamp (e.g.
-// days can differ in length during day light savings time transitions).
-// YEAR_MONTH - Indicates the number of elapsed whole months, stored as
-// 4-byte integers.
-// DAY_TIME - Indicates the number of elapsed days and milliseconds,
-// stored as 2 contiguous 32-bit integers (8-bytes in total). Support
-// of this IntervalUnit is not required for full arrow compatibility.
-table Interval {
- unit: IntervalUnit;
-}
-
-// An absolute length of time unrelated to any calendar artifacts.
-//
-// For the purposes of Arrow Implementations, adding this value to a Timestamp
-// ("t1") naively (i.e. simply summing the two number) is acceptable even
-// though in some cases the resulting Timestamp (t2) would not account for
-// leap-seconds during the elapsed time between "t1" and "t2". Similarly,
-// representing the difference between two Unix timestamp is acceptable, but
-// would yield a value that is possibly a few seconds off from the true elapsed
-// time.
-//
-// The resolution defaults to millisecond, but can be any of the other
-// supported TimeUnit values as with Timestamp and Time types. This type is
-// always represented as an 8-byte integer.
-table Duration {
- unit: TimeUnit = MILLISECOND;
-}
-
-/// ----------------------------------------------------------------------
-/// Top-level Type value, enabling extensible type-specific metadata. We can
-/// add new logical types to Type without breaking backwards compatibility
-
-union Type {
- Null,
- Int,
- FloatingPoint,
- Binary,
- Utf8,
- Bool,
- Decimal,
- Date,
- Time,
- Timestamp,
- Interval,
- List,
- Struct_,
- Union,
- FixedSizeBinary,
- FixedSizeList,
- Map,
- Duration,
- LargeBinary,
- LargeUtf8,
- LargeList,
-}
-
-/// ----------------------------------------------------------------------
-/// user defined key value pairs to add custom metadata to arrow
-/// key namespacing is the responsibility of the user
-
-table KeyValue {
- key: string;
- value: string;
-}
-
-/// ----------------------------------------------------------------------
-/// Dictionary encoding metadata
-/// Maintained for forwards compatibility, in the future
-/// Dictionaries might be explicit maps between integers and values
-/// allowing for non-contiguous index values
-enum DictionaryKind : short { DenseArray }
-table DictionaryEncoding {
- /// The known dictionary id in the application where this data is used. In
- /// the file or streaming formats, the dictionary ids are found in the
- /// DictionaryBatch messages
- id: long;
-
- /// The dictionary indices are constrained to be non-negative integers. If
- /// this field is null, the indices must be signed int32. To maximize
- /// cross-language compatibility and performance, implementations are
- /// recommended to prefer signed integer types over unsigned integer types
- /// and to avoid uint64 indices unless they are required by an application.
- indexType: Int;
-
- /// By default, dictionaries are not ordered, or the order does not have
- /// semantic meaning. In some statistical, applications, dictionary-encoding
- /// is used to represent ordered categorical data, and we provide a way to
- /// preserve that metadata here
- isOrdered: bool;
-
- dictionaryKind: DictionaryKind;
-}
-
-/// ----------------------------------------------------------------------
-/// A field represents a named column in a record / row batch or child of a
-/// nested type.
-
-table Field {
- /// Name is not required, in i.e. a List
- name: string;
-
- /// Whether or not this field can contain nulls. Should be true in general.
- nullable: bool;
-
- /// This is the type of the decoded value if the field is dictionary encoded.
- type: Type;
-
- /// Present only if the field is dictionary encoded.
- dictionary: DictionaryEncoding;
-
- /// children apply only to nested data types like Struct, List and Union. For
- /// primitive types children will have length 0.
- children: [ Field ];
-
- /// User-defined metadata
- custom_metadata: [ KeyValue ];
-}
-
-/// ----------------------------------------------------------------------
-/// Endianness of the platform producing the data
-
-enum Endianness:short { Little, Big }
-
-/// ----------------------------------------------------------------------
-/// A Buffer represents a single contiguous memory segment
-struct Buffer {
- /// The relative offset into the shared memory page where the bytes for this
- /// buffer starts
- offset: long;
-
- /// The absolute length (in bytes) of the memory buffer. The memory is found
- /// from offset (inclusive) to offset + length (non-inclusive). When building
- /// messages using the encapsulated IPC message, padding bytes may be written
- /// after a buffer, but such padding bytes do not need to be accounted for in
- /// the size here.
- length: long;
-}
-
-/// ----------------------------------------------------------------------
-/// A Schema describes the columns in a row batch
-
-table Schema {
-
- /// endianness of the buffer
- /// it is Little Endian by default
- /// if endianness doesn't match the underlying system then the vectors need to be converted
- endianness: Endianness=Little;
-
- fields: [Field];
- // User-defined metadata
- custom_metadata: [ KeyValue ];
-
- /// Features used in the stream/file.
- features : [ Feature ];
-}
-
-root_type Schema;
diff --git a/proto/raw-js-openapi/package.json b/proto/raw-js-openapi/package.json
index 455c3d67837..fdb83f4ec86 100644
--- a/proto/raw-js-openapi/package.json
+++ b/proto/raw-js-openapi/package.json
@@ -1,9 +1,6 @@
{
"dependencies": {
- "@deephaven/barrage": "0.5.0",
"@improbable-eng/grpc-web": "^0.14.0",
- "apache-arrow": "7.0.0",
- "flatbuffers": "1.12.0",
"google-protobuf": "^3.20.1"
},
"devDependencies": {
diff --git a/proto/raw-js-openapi/src/arrow/flight/flatbuf/Message_generated.ts b/proto/raw-js-openapi/src/arrow/flight/flatbuf/Message_generated.ts
deleted file mode 100644
index 8d86b6aa04f..00000000000
--- a/proto/raw-js-openapi/src/arrow/flight/flatbuf/Message_generated.ts
+++ /dev/null
@@ -1,741 +0,0 @@
-// automatically generated by the FlatBuffers compiler, do not modify
-
-import * as NS17716817176095924048 from "./Schema_generated";
-/**
- * @enum {number}
- */
-export namespace org.apache.arrow.flatbuf{
-export enum CompressionType{
- LZ4_FRAME= 0,
- ZSTD= 1
-};
-}
-
-/**
- * Provided for forward compatibility in case we need to support different
- * strategies for compressing the IPC message body (like whole-body
- * compression rather than buffer-level) in the future
- *
- * @enum {number}
- */
-export namespace org.apache.arrow.flatbuf{
-export enum BodyCompressionMethod{
- /**
- * Each constituent buffer is first compressed with the indicated
- * compressor, and then written with the uncompressed length in the first 8
- * bytes as a 64-bit little-endian signed integer followed by the compressed
- * buffer bytes (and then padding as required by the protocol). The
- * uncompressed length may be set to -1 to indicate that the data that
- * follows is not compressed, which can be useful for cases where
- * compression does not yield appreciable savings.
- */
- BUFFER= 0
-};
-}
-
-/**
- * ----------------------------------------------------------------------
- * The root Message type
- * This union enables us to easily send different message types without
- * redundant storage, and in the future we can easily add new message types.
- *
- * Arrow implementations do not need to implement all of the message types,
- * which may include experimental metadata types. For maximum compatibility,
- * it is best to send data using RecordBatch
- *
- * @enum {number}
- */
-export namespace org.apache.arrow.flatbuf{
-export enum MessageHeader{
- NONE= 0,
- Schema= 1,
- DictionaryBatch= 2,
- RecordBatch= 3
-};
-
-export function unionToMessageHeader(
- type: MessageHeader,
- accessor: (obj:NS17716817176095924048.org.apache.arrow.flatbuf.Schema|org.apache.arrow.flatbuf.DictionaryBatch|org.apache.arrow.flatbuf.RecordBatch) => NS17716817176095924048.org.apache.arrow.flatbuf.Schema|org.apache.arrow.flatbuf.DictionaryBatch|org.apache.arrow.flatbuf.RecordBatch|null
-): NS17716817176095924048.org.apache.arrow.flatbuf.Schema|org.apache.arrow.flatbuf.DictionaryBatch|org.apache.arrow.flatbuf.RecordBatch|null {
- switch(org.apache.arrow.flatbuf.MessageHeader[type]) {
- case 'NONE': return null;
- case 'Schema': return accessor(new NS17716817176095924048.org.apache.arrow.flatbuf.Schema())! as NS17716817176095924048.org.apache.arrow.flatbuf.Schema;
- case 'DictionaryBatch': return accessor(new org.apache.arrow.flatbuf.DictionaryBatch())! as org.apache.arrow.flatbuf.DictionaryBatch;
- case 'RecordBatch': return accessor(new org.apache.arrow.flatbuf.RecordBatch())! as org.apache.arrow.flatbuf.RecordBatch;
- default: return null;
- }
-}
-
-export function unionListToMessageHeader(
- type: MessageHeader,
- accessor: (index: number, obj:NS17716817176095924048.org.apache.arrow.flatbuf.Schema|org.apache.arrow.flatbuf.DictionaryBatch|org.apache.arrow.flatbuf.RecordBatch) => NS17716817176095924048.org.apache.arrow.flatbuf.Schema|org.apache.arrow.flatbuf.DictionaryBatch|org.apache.arrow.flatbuf.RecordBatch|null,
- index: number
-): NS17716817176095924048.org.apache.arrow.flatbuf.Schema|org.apache.arrow.flatbuf.DictionaryBatch|org.apache.arrow.flatbuf.RecordBatch|null {
- switch(org.apache.arrow.flatbuf.MessageHeader[type]) {
- case 'NONE': return null;
- case 'Schema': return accessor(index, new NS17716817176095924048.org.apache.arrow.flatbuf.Schema())! as NS17716817176095924048.org.apache.arrow.flatbuf.Schema;
- case 'DictionaryBatch': return accessor(index, new org.apache.arrow.flatbuf.DictionaryBatch())! as org.apache.arrow.flatbuf.DictionaryBatch;
- case 'RecordBatch': return accessor(index, new org.apache.arrow.flatbuf.RecordBatch())! as org.apache.arrow.flatbuf.RecordBatch;
- default: return null;
- }
-}
-}
-
-/**
- * ----------------------------------------------------------------------
- * Data structures for describing a table row batch (a collection of
- * equal-length Arrow arrays)
- * Metadata about a field at some level of a nested type tree (but not
- * its children).
- *
- * For example, a List with values `[[1, 2, 3], null, [4], [5, 6], null]`
- * would have {length: 5, null_count: 2} for its List node, and {length: 6,
- * null_count: 0} for its Int16 node, as separate FieldNode structs
- *
- * @constructor
- */
-export namespace org.apache.arrow.flatbuf{
-export class FieldNode {
- bb: flatbuffers.ByteBuffer|null = null;
-
- bb_pos:number = 0;
-/**
- * @param number i
- * @param flatbuffers.ByteBuffer bb
- * @returns FieldNode
- */
-__init(i:number, bb:flatbuffers.ByteBuffer):FieldNode {
- this.bb_pos = i;
- this.bb = bb;
- return this;
-};
-
-/**
- * The number of value slots in the Arrow array at this level of a nested
- * tree
- *
- * @returns flatbuffers.Long
- */
-length():flatbuffers.Long {
- return this.bb!.readInt64(this.bb_pos);
-};
-
-/**
- * The number of observed nulls. Fields with null_count == 0 may choose not
- * to write their physical validity bitmap out as a materialized buffer,
- * instead setting the length of the bitmap buffer to 0.
- *
- * @returns flatbuffers.Long
- */
-nullCount():flatbuffers.Long {
- return this.bb!.readInt64(this.bb_pos + 8);
-};
-
-/**
- * @returns number
- */
-static sizeOf():number {
- return 16;
-}
-
-/**
- * @param flatbuffers.Builder builder
- * @param flatbuffers.Long length
- * @param flatbuffers.Long null_count
- * @returns flatbuffers.Offset
- */
-static createFieldNode(builder:flatbuffers.Builder, length: flatbuffers.Long, null_count: flatbuffers.Long):flatbuffers.Offset {
- builder.prep(8, 16);
- builder.writeInt64(null_count);
- builder.writeInt64(length);
- return builder.offset();
-};
-
-}
-}
-/**
- * Optional compression for the memory buffers constituting IPC message
- * bodies. Intended for use with RecordBatch but could be used for other
- * message types
- *
- * @constructor
- */
-export namespace org.apache.arrow.flatbuf{
-export class BodyCompression {
- bb: flatbuffers.ByteBuffer|null = null;
-
- bb_pos:number = 0;
-/**
- * @param number i
- * @param flatbuffers.ByteBuffer bb
- * @returns BodyCompression
- */
-__init(i:number, bb:flatbuffers.ByteBuffer):BodyCompression {
- this.bb_pos = i;
- this.bb = bb;
- return this;
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param BodyCompression= obj
- * @returns BodyCompression
- */
-static getRootAsBodyCompression(bb:flatbuffers.ByteBuffer, obj?:BodyCompression):BodyCompression {
- return (obj || new BodyCompression()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param BodyCompression= obj
- * @returns BodyCompression
- */
-static getSizePrefixedRootAsBodyCompression(bb:flatbuffers.ByteBuffer, obj?:BodyCompression):BodyCompression {
- bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH);
- return (obj || new BodyCompression()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * Compressor library
- *
- * @returns org.apache.arrow.flatbuf.CompressionType
- */
-codec():org.apache.arrow.flatbuf.CompressionType {
- var offset = this.bb!.__offset(this.bb_pos, 4);
- return offset ? /** */ (this.bb!.readInt8(this.bb_pos + offset)) : org.apache.arrow.flatbuf.CompressionType.LZ4_FRAME;
-};
-
-/**
- * Indicates the way the record batch body was compressed
- *
- * @returns org.apache.arrow.flatbuf.BodyCompressionMethod
- */
-method():org.apache.arrow.flatbuf.BodyCompressionMethod {
- var offset = this.bb!.__offset(this.bb_pos, 6);
- return offset ? /** */ (this.bb!.readInt8(this.bb_pos + offset)) : org.apache.arrow.flatbuf.BodyCompressionMethod.BUFFER;
-};
-
-/**
- * @param flatbuffers.Builder builder
- */
-static startBodyCompression(builder:flatbuffers.Builder) {
- builder.startObject(2);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param org.apache.arrow.flatbuf.CompressionType codec
- */
-static addCodec(builder:flatbuffers.Builder, codec:org.apache.arrow.flatbuf.CompressionType) {
- builder.addFieldInt8(0, codec, org.apache.arrow.flatbuf.CompressionType.LZ4_FRAME);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param org.apache.arrow.flatbuf.BodyCompressionMethod method
- */
-static addMethod(builder:flatbuffers.Builder, method:org.apache.arrow.flatbuf.BodyCompressionMethod) {
- builder.addFieldInt8(1, method, org.apache.arrow.flatbuf.BodyCompressionMethod.BUFFER);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @returns flatbuffers.Offset
- */
-static endBodyCompression(builder:flatbuffers.Builder):flatbuffers.Offset {
- var offset = builder.endObject();
- return offset;
-};
-
-static createBodyCompression(builder:flatbuffers.Builder, codec:org.apache.arrow.flatbuf.CompressionType, method:org.apache.arrow.flatbuf.BodyCompressionMethod):flatbuffers.Offset {
- BodyCompression.startBodyCompression(builder);
- BodyCompression.addCodec(builder, codec);
- BodyCompression.addMethod(builder, method);
- return BodyCompression.endBodyCompression(builder);
-}
-}
-}
-/**
- * A data header describing the shared memory layout of a "record" or "row"
- * batch. Some systems call this a "row batch" internally and others a "record
- * batch".
- *
- * @constructor
- */
-export namespace org.apache.arrow.flatbuf{
-export class RecordBatch {
- bb: flatbuffers.ByteBuffer|null = null;
-
- bb_pos:number = 0;
-/**
- * @param number i
- * @param flatbuffers.ByteBuffer bb
- * @returns RecordBatch
- */
-__init(i:number, bb:flatbuffers.ByteBuffer):RecordBatch {
- this.bb_pos = i;
- this.bb = bb;
- return this;
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param RecordBatch= obj
- * @returns RecordBatch
- */
-static getRootAsRecordBatch(bb:flatbuffers.ByteBuffer, obj?:RecordBatch):RecordBatch {
- return (obj || new RecordBatch()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param RecordBatch= obj
- * @returns RecordBatch
- */
-static getSizePrefixedRootAsRecordBatch(bb:flatbuffers.ByteBuffer, obj?:RecordBatch):RecordBatch {
- bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH);
- return (obj || new RecordBatch()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * number of records / rows. The arrays in the batch should all have this
- * length
- *
- * @returns flatbuffers.Long
- */
-length():flatbuffers.Long {
- var offset = this.bb!.__offset(this.bb_pos, 4);
- return offset ? this.bb!.readInt64(this.bb_pos + offset) : this.bb!.createLong(0, 0);
-};
-
-/**
- * Nodes correspond to the pre-ordered flattened logical schema
- *
- * @param number index
- * @param org.apache.arrow.flatbuf.FieldNode= obj
- * @returns org.apache.arrow.flatbuf.FieldNode
- */
-nodes(index: number, obj?:org.apache.arrow.flatbuf.FieldNode):org.apache.arrow.flatbuf.FieldNode|null {
- var offset = this.bb!.__offset(this.bb_pos, 6);
- return offset ? (obj || new org.apache.arrow.flatbuf.FieldNode()).__init(this.bb!.__vector(this.bb_pos + offset) + index * 16, this.bb!) : null;
-};
-
-/**
- * @returns number
- */
-nodesLength():number {
- var offset = this.bb!.__offset(this.bb_pos, 6);
- return offset ? this.bb!.__vector_len(this.bb_pos + offset) : 0;
-};
-
-/**
- * Buffers correspond to the pre-ordered flattened buffer tree
- *
- * The number of buffers appended to this list depends on the schema. For
- * example, most primitive arrays will have 2 buffers, 1 for the validity
- * bitmap and 1 for the values. For struct arrays, there will only be a
- * single buffer for the validity (nulls) bitmap
- *
- * @param number index
- * @param org.apache.arrow.flatbuf.Buffer= obj
- * @returns org.apache.arrow.flatbuf.Buffer
- */
-buffers(index: number, obj?:NS17716817176095924048.org.apache.arrow.flatbuf.Buffer):NS17716817176095924048.org.apache.arrow.flatbuf.Buffer|null {
- var offset = this.bb!.__offset(this.bb_pos, 8);
- return offset ? (obj || new NS17716817176095924048.org.apache.arrow.flatbuf.Buffer()).__init(this.bb!.__vector(this.bb_pos + offset) + index * 16, this.bb!) : null;
-};
-
-/**
- * @returns number
- */
-buffersLength():number {
- var offset = this.bb!.__offset(this.bb_pos, 8);
- return offset ? this.bb!.__vector_len(this.bb_pos + offset) : 0;
-};
-
-/**
- * Optional compression of the message body
- *
- * @param org.apache.arrow.flatbuf.BodyCompression= obj
- * @returns org.apache.arrow.flatbuf.BodyCompression|null
- */
-compression(obj?:org.apache.arrow.flatbuf.BodyCompression):org.apache.arrow.flatbuf.BodyCompression|null {
- var offset = this.bb!.__offset(this.bb_pos, 10);
- return offset ? (obj || new org.apache.arrow.flatbuf.BodyCompression()).__init(this.bb!.__indirect(this.bb_pos + offset), this.bb!) : null;
-};
-
-/**
- * @param flatbuffers.Builder builder
- */
-static startRecordBatch(builder:flatbuffers.Builder) {
- builder.startObject(4);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param flatbuffers.Long length
- */
-static addLength(builder:flatbuffers.Builder, length:flatbuffers.Long) {
- builder.addFieldInt64(0, length, builder.createLong(0, 0));
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param flatbuffers.Offset nodesOffset
- */
-static addNodes(builder:flatbuffers.Builder, nodesOffset:flatbuffers.Offset) {
- builder.addFieldOffset(1, nodesOffset, 0);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param number numElems
- */
-static startNodesVector(builder:flatbuffers.Builder, numElems:number) {
- builder.startVector(16, numElems, 8);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param flatbuffers.Offset buffersOffset
- */
-static addBuffers(builder:flatbuffers.Builder, buffersOffset:flatbuffers.Offset) {
- builder.addFieldOffset(2, buffersOffset, 0);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param number numElems
- */
-static startBuffersVector(builder:flatbuffers.Builder, numElems:number) {
- builder.startVector(16, numElems, 8);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param flatbuffers.Offset compressionOffset
- */
-static addCompression(builder:flatbuffers.Builder, compressionOffset:flatbuffers.Offset) {
- builder.addFieldOffset(3, compressionOffset, 0);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @returns flatbuffers.Offset
- */
-static endRecordBatch(builder:flatbuffers.Builder):flatbuffers.Offset {
- var offset = builder.endObject();
- return offset;
-};
-
-}
-}
-/**
- * For sending dictionary encoding information. Any Field can be
- * dictionary-encoded, but in this case none of its children may be
- * dictionary-encoded.
- * There is one vector / column per dictionary, but that vector / column
- * may be spread across multiple dictionary batches by using the isDelta
- * flag
- *
- * @constructor
- */
-export namespace org.apache.arrow.flatbuf{
-export class DictionaryBatch {
- bb: flatbuffers.ByteBuffer|null = null;
-
- bb_pos:number = 0;
-/**
- * @param number i
- * @param flatbuffers.ByteBuffer bb
- * @returns DictionaryBatch
- */
-__init(i:number, bb:flatbuffers.ByteBuffer):DictionaryBatch {
- this.bb_pos = i;
- this.bb = bb;
- return this;
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param DictionaryBatch= obj
- * @returns DictionaryBatch
- */
-static getRootAsDictionaryBatch(bb:flatbuffers.ByteBuffer, obj?:DictionaryBatch):DictionaryBatch {
- return (obj || new DictionaryBatch()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param DictionaryBatch= obj
- * @returns DictionaryBatch
- */
-static getSizePrefixedRootAsDictionaryBatch(bb:flatbuffers.ByteBuffer, obj?:DictionaryBatch):DictionaryBatch {
- bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH);
- return (obj || new DictionaryBatch()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @returns flatbuffers.Long
- */
-id():flatbuffers.Long {
- var offset = this.bb!.__offset(this.bb_pos, 4);
- return offset ? this.bb!.readInt64(this.bb_pos + offset) : this.bb!.createLong(0, 0);
-};
-
-/**
- * @param org.apache.arrow.flatbuf.RecordBatch= obj
- * @returns org.apache.arrow.flatbuf.RecordBatch|null
- */
-data(obj?:org.apache.arrow.flatbuf.RecordBatch):org.apache.arrow.flatbuf.RecordBatch|null {
- var offset = this.bb!.__offset(this.bb_pos, 6);
- return offset ? (obj || new org.apache.arrow.flatbuf.RecordBatch()).__init(this.bb!.__indirect(this.bb_pos + offset), this.bb!) : null;
-};
-
-/**
- * If isDelta is true the values in the dictionary are to be appended to a
- * dictionary with the indicated id. If isDelta is false this dictionary
- * should replace the existing dictionary.
- *
- * @returns boolean
- */
-isDelta():boolean {
- var offset = this.bb!.__offset(this.bb_pos, 8);
- return offset ? !!this.bb!.readInt8(this.bb_pos + offset) : false;
-};
-
-/**
- * @param flatbuffers.Builder builder
- */
-static startDictionaryBatch(builder:flatbuffers.Builder) {
- builder.startObject(3);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param flatbuffers.Long id
- */
-static addId(builder:flatbuffers.Builder, id:flatbuffers.Long) {
- builder.addFieldInt64(0, id, builder.createLong(0, 0));
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param flatbuffers.Offset dataOffset
- */
-static addData(builder:flatbuffers.Builder, dataOffset:flatbuffers.Offset) {
- builder.addFieldOffset(1, dataOffset, 0);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param boolean isDelta
- */
-static addIsDelta(builder:flatbuffers.Builder, isDelta:boolean) {
- builder.addFieldInt8(2, +isDelta, +false);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @returns flatbuffers.Offset
- */
-static endDictionaryBatch(builder:flatbuffers.Builder):flatbuffers.Offset {
- var offset = builder.endObject();
- return offset;
-};
-
-}
-}
-/**
- * @constructor
- */
-export namespace org.apache.arrow.flatbuf{
-export class Message {
- bb: flatbuffers.ByteBuffer|null = null;
-
- bb_pos:number = 0;
-/**
- * @param number i
- * @param flatbuffers.ByteBuffer bb
- * @returns Message
- */
-__init(i:number, bb:flatbuffers.ByteBuffer):Message {
- this.bb_pos = i;
- this.bb = bb;
- return this;
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param Message= obj
- * @returns Message
- */
-static getRootAsMessage(bb:flatbuffers.ByteBuffer, obj?:Message):Message {
- return (obj || new Message()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param Message= obj
- * @returns Message
- */
-static getSizePrefixedRootAsMessage(bb:flatbuffers.ByteBuffer, obj?:Message):Message {
- bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH);
- return (obj || new Message()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @returns org.apache.arrow.flatbuf.MetadataVersion
- */
-version():NS17716817176095924048.org.apache.arrow.flatbuf.MetadataVersion {
- var offset = this.bb!.__offset(this.bb_pos, 4);
- return offset ? /** */ (this.bb!.readInt16(this.bb_pos + offset)) : NS17716817176095924048.org.apache.arrow.flatbuf.MetadataVersion.V1;
-};
-
-/**
- * @returns org.apache.arrow.flatbuf.MessageHeader
- */
-headerType():org.apache.arrow.flatbuf.MessageHeader {
- var offset = this.bb!.__offset(this.bb_pos, 6);
- return offset ? /** */ (this.bb!.readUint8(this.bb_pos + offset)) : org.apache.arrow.flatbuf.MessageHeader.NONE;
-};
-
-/**
- * @param flatbuffers.Table obj
- * @returns ?flatbuffers.Table
- */
-header(obj:T):T|null {
- var offset = this.bb!.__offset(this.bb_pos, 8);
- return offset ? this.bb!.__union(obj, this.bb_pos + offset) : null;
-};
-
-/**
- * @returns flatbuffers.Long
- */
-bodyLength():flatbuffers.Long {
- var offset = this.bb!.__offset(this.bb_pos, 10);
- return offset ? this.bb!.readInt64(this.bb_pos + offset) : this.bb!.createLong(0, 0);
-};
-
-/**
- * @param number index
- * @param org.apache.arrow.flatbuf.KeyValue= obj
- * @returns org.apache.arrow.flatbuf.KeyValue
- */
-customMetadata(index: number, obj?:NS17716817176095924048.org.apache.arrow.flatbuf.KeyValue):NS17716817176095924048.org.apache.arrow.flatbuf.KeyValue|null {
- var offset = this.bb!.__offset(this.bb_pos, 12);
- return offset ? (obj || new NS17716817176095924048.org.apache.arrow.flatbuf.KeyValue()).__init(this.bb!.__indirect(this.bb!.__vector(this.bb_pos + offset) + index * 4), this.bb!) : null;
-};
-
-/**
- * @returns number
- */
-customMetadataLength():number {
- var offset = this.bb!.__offset(this.bb_pos, 12);
- return offset ? this.bb!.__vector_len(this.bb_pos + offset) : 0;
-};
-
-/**
- * @param flatbuffers.Builder builder
- */
-static startMessage(builder:flatbuffers.Builder) {
- builder.startObject(5);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param org.apache.arrow.flatbuf.MetadataVersion version
- */
-static addVersion(builder:flatbuffers.Builder, version:NS17716817176095924048.org.apache.arrow.flatbuf.MetadataVersion) {
- builder.addFieldInt16(0, version, NS17716817176095924048.org.apache.arrow.flatbuf.MetadataVersion.V1);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param org.apache.arrow.flatbuf.MessageHeader headerType
- */
-static addHeaderType(builder:flatbuffers.Builder, headerType:org.apache.arrow.flatbuf.MessageHeader) {
- builder.addFieldInt8(1, headerType, org.apache.arrow.flatbuf.MessageHeader.NONE);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param flatbuffers.Offset headerOffset
- */
-static addHeader(builder:flatbuffers.Builder, headerOffset:flatbuffers.Offset) {
- builder.addFieldOffset(2, headerOffset, 0);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param flatbuffers.Long bodyLength
- */
-static addBodyLength(builder:flatbuffers.Builder, bodyLength:flatbuffers.Long) {
- builder.addFieldInt64(3, bodyLength, builder.createLong(0, 0));
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param flatbuffers.Offset customMetadataOffset
- */
-static addCustomMetadata(builder:flatbuffers.Builder, customMetadataOffset:flatbuffers.Offset) {
- builder.addFieldOffset(4, customMetadataOffset, 0);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param Array. data
- * @returns flatbuffers.Offset
- */
-static createCustomMetadataVector(builder:flatbuffers.Builder, data:flatbuffers.Offset[]):flatbuffers.Offset {
- builder.startVector(4, data.length, 4);
- for (var i = data.length - 1; i >= 0; i--) {
- builder.addOffset(data[i]);
- }
- return builder.endVector();
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param number numElems
- */
-static startCustomMetadataVector(builder:flatbuffers.Builder, numElems:number) {
- builder.startVector(4, numElems, 4);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @returns flatbuffers.Offset
- */
-static endMessage(builder:flatbuffers.Builder):flatbuffers.Offset {
- var offset = builder.endObject();
- return offset;
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param flatbuffers.Offset offset
- */
-static finishMessageBuffer(builder:flatbuffers.Builder, offset:flatbuffers.Offset) {
- builder.finish(offset);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param flatbuffers.Offset offset
- */
-static finishSizePrefixedMessageBuffer(builder:flatbuffers.Builder, offset:flatbuffers.Offset) {
- builder.finish(offset, undefined, true);
-};
-
-static createMessage(builder:flatbuffers.Builder, version:NS17716817176095924048.org.apache.arrow.flatbuf.MetadataVersion, headerType:org.apache.arrow.flatbuf.MessageHeader, headerOffset:flatbuffers.Offset, bodyLength:flatbuffers.Long, customMetadataOffset:flatbuffers.Offset):flatbuffers.Offset {
- Message.startMessage(builder);
- Message.addVersion(builder, version);
- Message.addHeaderType(builder, headerType);
- Message.addHeader(builder, headerOffset);
- Message.addBodyLength(builder, bodyLength);
- Message.addCustomMetadata(builder, customMetadataOffset);
- return Message.endMessage(builder);
-}
-}
-}
diff --git a/proto/raw-js-openapi/src/arrow/flight/flatbuf/Schema_generated.ts b/proto/raw-js-openapi/src/arrow/flight/flatbuf/Schema_generated.ts
deleted file mode 100644
index 1c485ed3b78..00000000000
--- a/proto/raw-js-openapi/src/arrow/flight/flatbuf/Schema_generated.ts
+++ /dev/null
@@ -1,2807 +0,0 @@
-// automatically generated by the FlatBuffers compiler, do not modify
-
-/**
- * @enum {number}
- */
-export namespace org.apache.arrow.flatbuf{
-export enum MetadataVersion{
- /**
- * 0.1.0 (October 2016).
- */
- V1= 0,
-
- /**
- * 0.2.0 (February 2017). Non-backwards compatible with V1.
- */
- V2= 1,
-
- /**
- * 0.3.0 -> 0.7.1 (May - December 2017). Non-backwards compatible with V2.
- */
- V3= 2,
-
- /**
- * >= 0.8.0 (December 2017). Non-backwards compatible with V3.
- */
- V4= 3,
-
- /**
- * >= 1.0.0 (July 2020. Backwards compatible with V4 (V5 readers can read V4
- * metadata and IPC messages). Implementations are recommended to provide a
- * V4 compatibility mode with V5 format changes disabled.
- *
- * Incompatible changes between V4 and V5:
- * - Union buffer layout has changed. In V5, Unions don't have a validity
- * bitmap buffer.
- */
- V5= 4
-};
-}
-
-/**
- * Represents Arrow Features that might not have full support
- * within implementations. This is intended to be used in
- * two scenarios:
- * 1. A mechanism for readers of Arrow Streams
- * and files to understand that the stream or file makes
- * use of a feature that isn't supported or unknown to
- * the implementation (and therefore can meet the Arrow
- * forward compatibility guarantees).
- * 2. A means of negotiating between a client and server
- * what features a stream is allowed to use. The enums
- * values here are intented to represent higher level
- * features, additional details maybe negotiated
- * with key-value pairs specific to the protocol.
- *
- * Enums added to this list should be assigned power-of-two values
- * to facilitate exchanging and comparing bitmaps for supported
- * features.
- *
- * @enum {number}
- */
-export namespace org.apache.arrow.flatbuf{
-export enum Feature{
- /**
- * Needed to make flatbuffers happy.
- */
- UNUSED= 0,
-
- /**
- * The stream makes use of multiple full dictionaries with the
- * same ID and assumes clients implement dictionary replacement
- * correctly.
- */
- DICTIONARY_REPLACEMENT= 1,
-
- /**
- * The stream makes use of compressed bodies as described
- * in Message.fbs.
- */
- COMPRESSED_BODY= 2
-};
-}
-
-/**
- * @enum {number}
- */
-export namespace org.apache.arrow.flatbuf{
-export enum UnionMode{
- Sparse= 0,
- Dense= 1
-};
-}
-
-/**
- * @enum {number}
- */
-export namespace org.apache.arrow.flatbuf{
-export enum Precision{
- HALF= 0,
- SINGLE= 1,
- DOUBLE= 2
-};
-}
-
-/**
- * @enum {number}
- */
-export namespace org.apache.arrow.flatbuf{
-export enum DateUnit{
- DAY= 0,
- MILLISECOND= 1
-};
-}
-
-/**
- * @enum {number}
- */
-export namespace org.apache.arrow.flatbuf{
-export enum TimeUnit{
- SECOND= 0,
- MILLISECOND= 1,
- MICROSECOND= 2,
- NANOSECOND= 3
-};
-}
-
-/**
- * @enum {number}
- */
-export namespace org.apache.arrow.flatbuf{
-export enum IntervalUnit{
- YEAR_MONTH= 0,
- DAY_TIME= 1
-};
-}
-
-/**
- * ----------------------------------------------------------------------
- * Top-level Type value, enabling extensible type-specific metadata. We can
- * add new logical types to Type without breaking backwards compatibility
- *
- * @enum {number}
- */
-export namespace org.apache.arrow.flatbuf{
-export enum Type{
- NONE= 0,
- Null= 1,
- Int= 2,
- FloatingPoint= 3,
- Binary= 4,
- Utf8= 5,
- Bool= 6,
- Decimal= 7,
- Date= 8,
- Time= 9,
- Timestamp= 10,
- Interval= 11,
- List= 12,
- Struct_= 13,
- Union= 14,
- FixedSizeBinary= 15,
- FixedSizeList= 16,
- Map= 17,
- Duration= 18,
- LargeBinary= 19,
- LargeUtf8= 20,
- LargeList= 21
-};
-
-export function unionToType(
- type: Type,
- accessor: (obj:org.apache.arrow.flatbuf.Binary|org.apache.arrow.flatbuf.Bool|org.apache.arrow.flatbuf.Date|org.apache.arrow.flatbuf.Decimal|org.apache.arrow.flatbuf.Duration|org.apache.arrow.flatbuf.FixedSizeBinary|org.apache.arrow.flatbuf.FixedSizeList|org.apache.arrow.flatbuf.FloatingPoint|org.apache.arrow.flatbuf.Int|org.apache.arrow.flatbuf.Interval|org.apache.arrow.flatbuf.LargeBinary|org.apache.arrow.flatbuf.LargeList|org.apache.arrow.flatbuf.LargeUtf8|org.apache.arrow.flatbuf.List|org.apache.arrow.flatbuf.Map|org.apache.arrow.flatbuf.Null|org.apache.arrow.flatbuf.Struct_|org.apache.arrow.flatbuf.Time|org.apache.arrow.flatbuf.Timestamp|org.apache.arrow.flatbuf.Union|org.apache.arrow.flatbuf.Utf8) => org.apache.arrow.flatbuf.Binary|org.apache.arrow.flatbuf.Bool|org.apache.arrow.flatbuf.Date|org.apache.arrow.flatbuf.Decimal|org.apache.arrow.flatbuf.Duration|org.apache.arrow.flatbuf.FixedSizeBinary|org.apache.arrow.flatbuf.FixedSizeList|org.apache.arrow.flatbuf.FloatingPoint|org.apache.arrow.flatbuf.Int|org.apache.arrow.flatbuf.Interval|org.apache.arrow.flatbuf.LargeBinary|org.apache.arrow.flatbuf.LargeList|org.apache.arrow.flatbuf.LargeUtf8|org.apache.arrow.flatbuf.List|org.apache.arrow.flatbuf.Map|org.apache.arrow.flatbuf.Null|org.apache.arrow.flatbuf.Struct_|org.apache.arrow.flatbuf.Time|org.apache.arrow.flatbuf.Timestamp|org.apache.arrow.flatbuf.Union|org.apache.arrow.flatbuf.Utf8|null
-): org.apache.arrow.flatbuf.Binary|org.apache.arrow.flatbuf.Bool|org.apache.arrow.flatbuf.Date|org.apache.arrow.flatbuf.Decimal|org.apache.arrow.flatbuf.Duration|org.apache.arrow.flatbuf.FixedSizeBinary|org.apache.arrow.flatbuf.FixedSizeList|org.apache.arrow.flatbuf.FloatingPoint|org.apache.arrow.flatbuf.Int|org.apache.arrow.flatbuf.Interval|org.apache.arrow.flatbuf.LargeBinary|org.apache.arrow.flatbuf.LargeList|org.apache.arrow.flatbuf.LargeUtf8|org.apache.arrow.flatbuf.List|org.apache.arrow.flatbuf.Map|org.apache.arrow.flatbuf.Null|org.apache.arrow.flatbuf.Struct_|org.apache.arrow.flatbuf.Time|org.apache.arrow.flatbuf.Timestamp|org.apache.arrow.flatbuf.Union|org.apache.arrow.flatbuf.Utf8|null {
- switch(org.apache.arrow.flatbuf.Type[type]) {
- case 'NONE': return null;
- case 'Null': return accessor(new org.apache.arrow.flatbuf.Null())! as org.apache.arrow.flatbuf.Null;
- case 'Int': return accessor(new org.apache.arrow.flatbuf.Int())! as org.apache.arrow.flatbuf.Int;
- case 'FloatingPoint': return accessor(new org.apache.arrow.flatbuf.FloatingPoint())! as org.apache.arrow.flatbuf.FloatingPoint;
- case 'Binary': return accessor(new org.apache.arrow.flatbuf.Binary())! as org.apache.arrow.flatbuf.Binary;
- case 'Utf8': return accessor(new org.apache.arrow.flatbuf.Utf8())! as org.apache.arrow.flatbuf.Utf8;
- case 'Bool': return accessor(new org.apache.arrow.flatbuf.Bool())! as org.apache.arrow.flatbuf.Bool;
- case 'Decimal': return accessor(new org.apache.arrow.flatbuf.Decimal())! as org.apache.arrow.flatbuf.Decimal;
- case 'Date': return accessor(new org.apache.arrow.flatbuf.Date())! as org.apache.arrow.flatbuf.Date;
- case 'Time': return accessor(new org.apache.arrow.flatbuf.Time())! as org.apache.arrow.flatbuf.Time;
- case 'Timestamp': return accessor(new org.apache.arrow.flatbuf.Timestamp())! as org.apache.arrow.flatbuf.Timestamp;
- case 'Interval': return accessor(new org.apache.arrow.flatbuf.Interval())! as org.apache.arrow.flatbuf.Interval;
- case 'List': return accessor(new org.apache.arrow.flatbuf.List())! as org.apache.arrow.flatbuf.List;
- case 'Struct_': return accessor(new org.apache.arrow.flatbuf.Struct_())! as org.apache.arrow.flatbuf.Struct_;
- case 'Union': return accessor(new org.apache.arrow.flatbuf.Union())! as org.apache.arrow.flatbuf.Union;
- case 'FixedSizeBinary': return accessor(new org.apache.arrow.flatbuf.FixedSizeBinary())! as org.apache.arrow.flatbuf.FixedSizeBinary;
- case 'FixedSizeList': return accessor(new org.apache.arrow.flatbuf.FixedSizeList())! as org.apache.arrow.flatbuf.FixedSizeList;
- case 'Map': return accessor(new org.apache.arrow.flatbuf.Map())! as org.apache.arrow.flatbuf.Map;
- case 'Duration': return accessor(new org.apache.arrow.flatbuf.Duration())! as org.apache.arrow.flatbuf.Duration;
- case 'LargeBinary': return accessor(new org.apache.arrow.flatbuf.LargeBinary())! as org.apache.arrow.flatbuf.LargeBinary;
- case 'LargeUtf8': return accessor(new org.apache.arrow.flatbuf.LargeUtf8())! as org.apache.arrow.flatbuf.LargeUtf8;
- case 'LargeList': return accessor(new org.apache.arrow.flatbuf.LargeList())! as org.apache.arrow.flatbuf.LargeList;
- default: return null;
- }
-}
-
-export function unionListToType(
- type: Type,
- accessor: (index: number, obj:org.apache.arrow.flatbuf.Binary|org.apache.arrow.flatbuf.Bool|org.apache.arrow.flatbuf.Date|org.apache.arrow.flatbuf.Decimal|org.apache.arrow.flatbuf.Duration|org.apache.arrow.flatbuf.FixedSizeBinary|org.apache.arrow.flatbuf.FixedSizeList|org.apache.arrow.flatbuf.FloatingPoint|org.apache.arrow.flatbuf.Int|org.apache.arrow.flatbuf.Interval|org.apache.arrow.flatbuf.LargeBinary|org.apache.arrow.flatbuf.LargeList|org.apache.arrow.flatbuf.LargeUtf8|org.apache.arrow.flatbuf.List|org.apache.arrow.flatbuf.Map|org.apache.arrow.flatbuf.Null|org.apache.arrow.flatbuf.Struct_|org.apache.arrow.flatbuf.Time|org.apache.arrow.flatbuf.Timestamp|org.apache.arrow.flatbuf.Union|org.apache.arrow.flatbuf.Utf8) => org.apache.arrow.flatbuf.Binary|org.apache.arrow.flatbuf.Bool|org.apache.arrow.flatbuf.Date|org.apache.arrow.flatbuf.Decimal|org.apache.arrow.flatbuf.Duration|org.apache.arrow.flatbuf.FixedSizeBinary|org.apache.arrow.flatbuf.FixedSizeList|org.apache.arrow.flatbuf.FloatingPoint|org.apache.arrow.flatbuf.Int|org.apache.arrow.flatbuf.Interval|org.apache.arrow.flatbuf.LargeBinary|org.apache.arrow.flatbuf.LargeList|org.apache.arrow.flatbuf.LargeUtf8|org.apache.arrow.flatbuf.List|org.apache.arrow.flatbuf.Map|org.apache.arrow.flatbuf.Null|org.apache.arrow.flatbuf.Struct_|org.apache.arrow.flatbuf.Time|org.apache.arrow.flatbuf.Timestamp|org.apache.arrow.flatbuf.Union|org.apache.arrow.flatbuf.Utf8|null,
- index: number
-): org.apache.arrow.flatbuf.Binary|org.apache.arrow.flatbuf.Bool|org.apache.arrow.flatbuf.Date|org.apache.arrow.flatbuf.Decimal|org.apache.arrow.flatbuf.Duration|org.apache.arrow.flatbuf.FixedSizeBinary|org.apache.arrow.flatbuf.FixedSizeList|org.apache.arrow.flatbuf.FloatingPoint|org.apache.arrow.flatbuf.Int|org.apache.arrow.flatbuf.Interval|org.apache.arrow.flatbuf.LargeBinary|org.apache.arrow.flatbuf.LargeList|org.apache.arrow.flatbuf.LargeUtf8|org.apache.arrow.flatbuf.List|org.apache.arrow.flatbuf.Map|org.apache.arrow.flatbuf.Null|org.apache.arrow.flatbuf.Struct_|org.apache.arrow.flatbuf.Time|org.apache.arrow.flatbuf.Timestamp|org.apache.arrow.flatbuf.Union|org.apache.arrow.flatbuf.Utf8|null {
- switch(org.apache.arrow.flatbuf.Type[type]) {
- case 'NONE': return null;
- case 'Null': return accessor(index, new org.apache.arrow.flatbuf.Null())! as org.apache.arrow.flatbuf.Null;
- case 'Int': return accessor(index, new org.apache.arrow.flatbuf.Int())! as org.apache.arrow.flatbuf.Int;
- case 'FloatingPoint': return accessor(index, new org.apache.arrow.flatbuf.FloatingPoint())! as org.apache.arrow.flatbuf.FloatingPoint;
- case 'Binary': return accessor(index, new org.apache.arrow.flatbuf.Binary())! as org.apache.arrow.flatbuf.Binary;
- case 'Utf8': return accessor(index, new org.apache.arrow.flatbuf.Utf8())! as org.apache.arrow.flatbuf.Utf8;
- case 'Bool': return accessor(index, new org.apache.arrow.flatbuf.Bool())! as org.apache.arrow.flatbuf.Bool;
- case 'Decimal': return accessor(index, new org.apache.arrow.flatbuf.Decimal())! as org.apache.arrow.flatbuf.Decimal;
- case 'Date': return accessor(index, new org.apache.arrow.flatbuf.Date())! as org.apache.arrow.flatbuf.Date;
- case 'Time': return accessor(index, new org.apache.arrow.flatbuf.Time())! as org.apache.arrow.flatbuf.Time;
- case 'Timestamp': return accessor(index, new org.apache.arrow.flatbuf.Timestamp())! as org.apache.arrow.flatbuf.Timestamp;
- case 'Interval': return accessor(index, new org.apache.arrow.flatbuf.Interval())! as org.apache.arrow.flatbuf.Interval;
- case 'List': return accessor(index, new org.apache.arrow.flatbuf.List())! as org.apache.arrow.flatbuf.List;
- case 'Struct_': return accessor(index, new org.apache.arrow.flatbuf.Struct_())! as org.apache.arrow.flatbuf.Struct_;
- case 'Union': return accessor(index, new org.apache.arrow.flatbuf.Union())! as org.apache.arrow.flatbuf.Union;
- case 'FixedSizeBinary': return accessor(index, new org.apache.arrow.flatbuf.FixedSizeBinary())! as org.apache.arrow.flatbuf.FixedSizeBinary;
- case 'FixedSizeList': return accessor(index, new org.apache.arrow.flatbuf.FixedSizeList())! as org.apache.arrow.flatbuf.FixedSizeList;
- case 'Map': return accessor(index, new org.apache.arrow.flatbuf.Map())! as org.apache.arrow.flatbuf.Map;
- case 'Duration': return accessor(index, new org.apache.arrow.flatbuf.Duration())! as org.apache.arrow.flatbuf.Duration;
- case 'LargeBinary': return accessor(index, new org.apache.arrow.flatbuf.LargeBinary())! as org.apache.arrow.flatbuf.LargeBinary;
- case 'LargeUtf8': return accessor(index, new org.apache.arrow.flatbuf.LargeUtf8())! as org.apache.arrow.flatbuf.LargeUtf8;
- case 'LargeList': return accessor(index, new org.apache.arrow.flatbuf.LargeList())! as org.apache.arrow.flatbuf.LargeList;
- default: return null;
- }
-}
-}
-
-/**
- * ----------------------------------------------------------------------
- * Dictionary encoding metadata
- * Maintained for forwards compatibility, in the future
- * Dictionaries might be explicit maps between integers and values
- * allowing for non-contiguous index values
- *
- * @enum {number}
- */
-export namespace org.apache.arrow.flatbuf{
-export enum DictionaryKind{
- DenseArray= 0
-};
-}
-
-/**
- * ----------------------------------------------------------------------
- * Endianness of the platform producing the data
- *
- * @enum {number}
- */
-export namespace org.apache.arrow.flatbuf{
-export enum Endianness{
- Little= 0,
- Big= 1
-};
-}
-
-/**
- * These are stored in the flatbuffer in the Type union below
- *
- * @constructor
- */
-export namespace org.apache.arrow.flatbuf{
-export class Null {
- bb: flatbuffers.ByteBuffer|null = null;
-
- bb_pos:number = 0;
-/**
- * @param number i
- * @param flatbuffers.ByteBuffer bb
- * @returns Null
- */
-__init(i:number, bb:flatbuffers.ByteBuffer):Null {
- this.bb_pos = i;
- this.bb = bb;
- return this;
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param Null= obj
- * @returns Null
- */
-static getRootAsNull(bb:flatbuffers.ByteBuffer, obj?:Null):Null {
- return (obj || new Null()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param Null= obj
- * @returns Null
- */
-static getSizePrefixedRootAsNull(bb:flatbuffers.ByteBuffer, obj?:Null):Null {
- bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH);
- return (obj || new Null()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @param flatbuffers.Builder builder
- */
-static startNull(builder:flatbuffers.Builder) {
- builder.startObject(0);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @returns flatbuffers.Offset
- */
-static endNull(builder:flatbuffers.Builder):flatbuffers.Offset {
- var offset = builder.endObject();
- return offset;
-};
-
-static createNull(builder:flatbuffers.Builder):flatbuffers.Offset {
- Null.startNull(builder);
- return Null.endNull(builder);
-}
-}
-}
-/**
- * A Struct_ in the flatbuffer metadata is the same as an Arrow Struct
- * (according to the physical memory layout). We used Struct_ here as
- * Struct is a reserved word in Flatbuffers
- *
- * @constructor
- */
-export namespace org.apache.arrow.flatbuf{
-export class Struct_ {
- bb: flatbuffers.ByteBuffer|null = null;
-
- bb_pos:number = 0;
-/**
- * @param number i
- * @param flatbuffers.ByteBuffer bb
- * @returns Struct_
- */
-__init(i:number, bb:flatbuffers.ByteBuffer):Struct_ {
- this.bb_pos = i;
- this.bb = bb;
- return this;
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param Struct_= obj
- * @returns Struct_
- */
-static getRootAsStruct_(bb:flatbuffers.ByteBuffer, obj?:Struct_):Struct_ {
- return (obj || new Struct_()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param Struct_= obj
- * @returns Struct_
- */
-static getSizePrefixedRootAsStruct_(bb:flatbuffers.ByteBuffer, obj?:Struct_):Struct_ {
- bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH);
- return (obj || new Struct_()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @param flatbuffers.Builder builder
- */
-static startStruct_(builder:flatbuffers.Builder) {
- builder.startObject(0);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @returns flatbuffers.Offset
- */
-static endStruct_(builder:flatbuffers.Builder):flatbuffers.Offset {
- var offset = builder.endObject();
- return offset;
-};
-
-static createStruct_(builder:flatbuffers.Builder):flatbuffers.Offset {
- Struct_.startStruct_(builder);
- return Struct_.endStruct_(builder);
-}
-}
-}
-/**
- * @constructor
- */
-export namespace org.apache.arrow.flatbuf{
-export class List {
- bb: flatbuffers.ByteBuffer|null = null;
-
- bb_pos:number = 0;
-/**
- * @param number i
- * @param flatbuffers.ByteBuffer bb
- * @returns List
- */
-__init(i:number, bb:flatbuffers.ByteBuffer):List {
- this.bb_pos = i;
- this.bb = bb;
- return this;
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param List= obj
- * @returns List
- */
-static getRootAsList(bb:flatbuffers.ByteBuffer, obj?:List):List {
- return (obj || new List()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param List= obj
- * @returns List
- */
-static getSizePrefixedRootAsList(bb:flatbuffers.ByteBuffer, obj?:List):List {
- bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH);
- return (obj || new List()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @param flatbuffers.Builder builder
- */
-static startList(builder:flatbuffers.Builder) {
- builder.startObject(0);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @returns flatbuffers.Offset
- */
-static endList(builder:flatbuffers.Builder):flatbuffers.Offset {
- var offset = builder.endObject();
- return offset;
-};
-
-static createList(builder:flatbuffers.Builder):flatbuffers.Offset {
- List.startList(builder);
- return List.endList(builder);
-}
-}
-}
-/**
- * Same as List, but with 64-bit offsets, allowing to represent
- * extremely large data values.
- *
- * @constructor
- */
-export namespace org.apache.arrow.flatbuf{
-export class LargeList {
- bb: flatbuffers.ByteBuffer|null = null;
-
- bb_pos:number = 0;
-/**
- * @param number i
- * @param flatbuffers.ByteBuffer bb
- * @returns LargeList
- */
-__init(i:number, bb:flatbuffers.ByteBuffer):LargeList {
- this.bb_pos = i;
- this.bb = bb;
- return this;
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param LargeList= obj
- * @returns LargeList
- */
-static getRootAsLargeList(bb:flatbuffers.ByteBuffer, obj?:LargeList):LargeList {
- return (obj || new LargeList()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param LargeList= obj
- * @returns LargeList
- */
-static getSizePrefixedRootAsLargeList(bb:flatbuffers.ByteBuffer, obj?:LargeList):LargeList {
- bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH);
- return (obj || new LargeList()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @param flatbuffers.Builder builder
- */
-static startLargeList(builder:flatbuffers.Builder) {
- builder.startObject(0);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @returns flatbuffers.Offset
- */
-static endLargeList(builder:flatbuffers.Builder):flatbuffers.Offset {
- var offset = builder.endObject();
- return offset;
-};
-
-static createLargeList(builder:flatbuffers.Builder):flatbuffers.Offset {
- LargeList.startLargeList(builder);
- return LargeList.endLargeList(builder);
-}
-}
-}
-/**
- * @constructor
- */
-export namespace org.apache.arrow.flatbuf{
-export class FixedSizeList {
- bb: flatbuffers.ByteBuffer|null = null;
-
- bb_pos:number = 0;
-/**
- * @param number i
- * @param flatbuffers.ByteBuffer bb
- * @returns FixedSizeList
- */
-__init(i:number, bb:flatbuffers.ByteBuffer):FixedSizeList {
- this.bb_pos = i;
- this.bb = bb;
- return this;
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param FixedSizeList= obj
- * @returns FixedSizeList
- */
-static getRootAsFixedSizeList(bb:flatbuffers.ByteBuffer, obj?:FixedSizeList):FixedSizeList {
- return (obj || new FixedSizeList()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param FixedSizeList= obj
- * @returns FixedSizeList
- */
-static getSizePrefixedRootAsFixedSizeList(bb:flatbuffers.ByteBuffer, obj?:FixedSizeList):FixedSizeList {
- bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH);
- return (obj || new FixedSizeList()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * Number of list items per value
- *
- * @returns number
- */
-listSize():number {
- var offset = this.bb!.__offset(this.bb_pos, 4);
- return offset ? this.bb!.readInt32(this.bb_pos + offset) : 0;
-};
-
-/**
- * @param flatbuffers.Builder builder
- */
-static startFixedSizeList(builder:flatbuffers.Builder) {
- builder.startObject(1);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param number listSize
- */
-static addListSize(builder:flatbuffers.Builder, listSize:number) {
- builder.addFieldInt32(0, listSize, 0);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @returns flatbuffers.Offset
- */
-static endFixedSizeList(builder:flatbuffers.Builder):flatbuffers.Offset {
- var offset = builder.endObject();
- return offset;
-};
-
-static createFixedSizeList(builder:flatbuffers.Builder, listSize:number):flatbuffers.Offset {
- FixedSizeList.startFixedSizeList(builder);
- FixedSizeList.addListSize(builder, listSize);
- return FixedSizeList.endFixedSizeList(builder);
-}
-}
-}
-/**
- * A Map is a logical nested type that is represented as
- *
- * List>
- *
- * In this layout, the keys and values are each respectively contiguous. We do
- * not constrain the key and value types, so the application is responsible
- * for ensuring that the keys are hashable and unique. Whether the keys are sorted
- * may be set in the metadata for this field.
- *
- * In a field with Map type, the field has a child Struct field, which then
- * has two children: key type and the second the value type. The names of the
- * child fields may be respectively "entries", "key", and "value", but this is
- * not enforced.
- *
- * Map
- * ```text
- * - child[0] entries: Struct
- * - child[0] key: K
- * - child[1] value: V
- * ```
- * Neither the "entries" field nor the "key" field may be nullable.
- *
- * The metadata is structured so that Arrow systems without special handling
- * for Map can make Map an alias for List. The "layout" attribute for the Map
- * field must have the same contents as a List.
- *
- * @constructor
- */
-export namespace org.apache.arrow.flatbuf{
-export class Map {
- bb: flatbuffers.ByteBuffer|null = null;
-
- bb_pos:number = 0;
-/**
- * @param number i
- * @param flatbuffers.ByteBuffer bb
- * @returns Map
- */
-__init(i:number, bb:flatbuffers.ByteBuffer):Map {
- this.bb_pos = i;
- this.bb = bb;
- return this;
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param Map= obj
- * @returns Map
- */
-static getRootAsMap(bb:flatbuffers.ByteBuffer, obj?:Map):Map {
- return (obj || new Map()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param Map= obj
- * @returns Map
- */
-static getSizePrefixedRootAsMap(bb:flatbuffers.ByteBuffer, obj?:Map):Map {
- bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH);
- return (obj || new Map()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * Set to true if the keys within each value are sorted
- *
- * @returns boolean
- */
-keysSorted():boolean {
- var offset = this.bb!.__offset(this.bb_pos, 4);
- return offset ? !!this.bb!.readInt8(this.bb_pos + offset) : false;
-};
-
-/**
- * @param flatbuffers.Builder builder
- */
-static startMap(builder:flatbuffers.Builder) {
- builder.startObject(1);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param boolean keysSorted
- */
-static addKeysSorted(builder:flatbuffers.Builder, keysSorted:boolean) {
- builder.addFieldInt8(0, +keysSorted, +false);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @returns flatbuffers.Offset
- */
-static endMap(builder:flatbuffers.Builder):flatbuffers.Offset {
- var offset = builder.endObject();
- return offset;
-};
-
-static createMap(builder:flatbuffers.Builder, keysSorted:boolean):flatbuffers.Offset {
- Map.startMap(builder);
- Map.addKeysSorted(builder, keysSorted);
- return Map.endMap(builder);
-}
-}
-}
-/**
- * A union is a complex type with children in Field
- * By default ids in the type vector refer to the offsets in the children
- * optionally typeIds provides an indirection between the child offset and the type id
- * for each child `typeIds[offset]` is the id used in the type vector
- *
- * @constructor
- */
-export namespace org.apache.arrow.flatbuf{
-export class Union {
- bb: flatbuffers.ByteBuffer|null = null;
-
- bb_pos:number = 0;
-/**
- * @param number i
- * @param flatbuffers.ByteBuffer bb
- * @returns Union
- */
-__init(i:number, bb:flatbuffers.ByteBuffer):Union {
- this.bb_pos = i;
- this.bb = bb;
- return this;
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param Union= obj
- * @returns Union
- */
-static getRootAsUnion(bb:flatbuffers.ByteBuffer, obj?:Union):Union {
- return (obj || new Union()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param Union= obj
- * @returns Union
- */
-static getSizePrefixedRootAsUnion(bb:flatbuffers.ByteBuffer, obj?:Union):Union {
- bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH);
- return (obj || new Union()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @returns org.apache.arrow.flatbuf.UnionMode
- */
-mode():org.apache.arrow.flatbuf.UnionMode {
- var offset = this.bb!.__offset(this.bb_pos, 4);
- return offset ? /** */ (this.bb!.readInt16(this.bb_pos + offset)) : org.apache.arrow.flatbuf.UnionMode.Sparse;
-};
-
-/**
- * @param number index
- * @returns number
- */
-typeIds(index: number):number|null {
- var offset = this.bb!.__offset(this.bb_pos, 6);
- return offset ? this.bb!.readInt32(this.bb!.__vector(this.bb_pos + offset) + index * 4) : 0;
-};
-
-/**
- * @returns number
- */
-typeIdsLength():number {
- var offset = this.bb!.__offset(this.bb_pos, 6);
- return offset ? this.bb!.__vector_len(this.bb_pos + offset) : 0;
-};
-
-/**
- * @returns Int32Array
- */
-typeIdsArray():Int32Array|null {
- var offset = this.bb!.__offset(this.bb_pos, 6);
- return offset ? new Int32Array(this.bb!.bytes().buffer, this.bb!.bytes().byteOffset + this.bb!.__vector(this.bb_pos + offset), this.bb!.__vector_len(this.bb_pos + offset)) : null;
-};
-
-/**
- * @param flatbuffers.Builder builder
- */
-static startUnion(builder:flatbuffers.Builder) {
- builder.startObject(2);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param org.apache.arrow.flatbuf.UnionMode mode
- */
-static addMode(builder:flatbuffers.Builder, mode:org.apache.arrow.flatbuf.UnionMode) {
- builder.addFieldInt16(0, mode, org.apache.arrow.flatbuf.UnionMode.Sparse);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param flatbuffers.Offset typeIdsOffset
- */
-static addTypeIds(builder:flatbuffers.Builder, typeIdsOffset:flatbuffers.Offset) {
- builder.addFieldOffset(1, typeIdsOffset, 0);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param Array. data
- * @returns flatbuffers.Offset
- */
-static createTypeIdsVector(builder:flatbuffers.Builder, data:number[]|Int32Array):flatbuffers.Offset;
-/**
- * @deprecated This Uint8Array overload will be removed in the future.
- */
-static createTypeIdsVector(builder:flatbuffers.Builder, data:number[]|Uint8Array):flatbuffers.Offset;
-static createTypeIdsVector(builder:flatbuffers.Builder, data:number[]|Int32Array|Uint8Array):flatbuffers.Offset {
- builder.startVector(4, data.length, 4);
- for (var i = data.length - 1; i >= 0; i--) {
- builder.addInt32(data[i]);
- }
- return builder.endVector();
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param number numElems
- */
-static startTypeIdsVector(builder:flatbuffers.Builder, numElems:number) {
- builder.startVector(4, numElems, 4);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @returns flatbuffers.Offset
- */
-static endUnion(builder:flatbuffers.Builder):flatbuffers.Offset {
- var offset = builder.endObject();
- return offset;
-};
-
-static createUnion(builder:flatbuffers.Builder, mode:org.apache.arrow.flatbuf.UnionMode, typeIdsOffset:flatbuffers.Offset):flatbuffers.Offset {
- Union.startUnion(builder);
- Union.addMode(builder, mode);
- Union.addTypeIds(builder, typeIdsOffset);
- return Union.endUnion(builder);
-}
-}
-}
-/**
- * @constructor
- */
-export namespace org.apache.arrow.flatbuf{
-export class Int {
- bb: flatbuffers.ByteBuffer|null = null;
-
- bb_pos:number = 0;
-/**
- * @param number i
- * @param flatbuffers.ByteBuffer bb
- * @returns Int
- */
-__init(i:number, bb:flatbuffers.ByteBuffer):Int {
- this.bb_pos = i;
- this.bb = bb;
- return this;
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param Int= obj
- * @returns Int
- */
-static getRootAsInt(bb:flatbuffers.ByteBuffer, obj?:Int):Int {
- return (obj || new Int()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param Int= obj
- * @returns Int
- */
-static getSizePrefixedRootAsInt(bb:flatbuffers.ByteBuffer, obj?:Int):Int {
- bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH);
- return (obj || new Int()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @returns number
- */
-bitWidth():number {
- var offset = this.bb!.__offset(this.bb_pos, 4);
- return offset ? this.bb!.readInt32(this.bb_pos + offset) : 0;
-};
-
-/**
- * @returns boolean
- */
-isSigned():boolean {
- var offset = this.bb!.__offset(this.bb_pos, 6);
- return offset ? !!this.bb!.readInt8(this.bb_pos + offset) : false;
-};
-
-/**
- * @param flatbuffers.Builder builder
- */
-static startInt(builder:flatbuffers.Builder) {
- builder.startObject(2);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param number bitWidth
- */
-static addBitWidth(builder:flatbuffers.Builder, bitWidth:number) {
- builder.addFieldInt32(0, bitWidth, 0);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param boolean isSigned
- */
-static addIsSigned(builder:flatbuffers.Builder, isSigned:boolean) {
- builder.addFieldInt8(1, +isSigned, +false);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @returns flatbuffers.Offset
- */
-static endInt(builder:flatbuffers.Builder):flatbuffers.Offset {
- var offset = builder.endObject();
- return offset;
-};
-
-static createInt(builder:flatbuffers.Builder, bitWidth:number, isSigned:boolean):flatbuffers.Offset {
- Int.startInt(builder);
- Int.addBitWidth(builder, bitWidth);
- Int.addIsSigned(builder, isSigned);
- return Int.endInt(builder);
-}
-}
-}
-/**
- * @constructor
- */
-export namespace org.apache.arrow.flatbuf{
-export class FloatingPoint {
- bb: flatbuffers.ByteBuffer|null = null;
-
- bb_pos:number = 0;
-/**
- * @param number i
- * @param flatbuffers.ByteBuffer bb
- * @returns FloatingPoint
- */
-__init(i:number, bb:flatbuffers.ByteBuffer):FloatingPoint {
- this.bb_pos = i;
- this.bb = bb;
- return this;
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param FloatingPoint= obj
- * @returns FloatingPoint
- */
-static getRootAsFloatingPoint(bb:flatbuffers.ByteBuffer, obj?:FloatingPoint):FloatingPoint {
- return (obj || new FloatingPoint()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param FloatingPoint= obj
- * @returns FloatingPoint
- */
-static getSizePrefixedRootAsFloatingPoint(bb:flatbuffers.ByteBuffer, obj?:FloatingPoint):FloatingPoint {
- bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH);
- return (obj || new FloatingPoint()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @returns org.apache.arrow.flatbuf.Precision
- */
-precision():org.apache.arrow.flatbuf.Precision {
- var offset = this.bb!.__offset(this.bb_pos, 4);
- return offset ? /** */ (this.bb!.readInt16(this.bb_pos + offset)) : org.apache.arrow.flatbuf.Precision.HALF;
-};
-
-/**
- * @param flatbuffers.Builder builder
- */
-static startFloatingPoint(builder:flatbuffers.Builder) {
- builder.startObject(1);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param org.apache.arrow.flatbuf.Precision precision
- */
-static addPrecision(builder:flatbuffers.Builder, precision:org.apache.arrow.flatbuf.Precision) {
- builder.addFieldInt16(0, precision, org.apache.arrow.flatbuf.Precision.HALF);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @returns flatbuffers.Offset
- */
-static endFloatingPoint(builder:flatbuffers.Builder):flatbuffers.Offset {
- var offset = builder.endObject();
- return offset;
-};
-
-static createFloatingPoint(builder:flatbuffers.Builder, precision:org.apache.arrow.flatbuf.Precision):flatbuffers.Offset {
- FloatingPoint.startFloatingPoint(builder);
- FloatingPoint.addPrecision(builder, precision);
- return FloatingPoint.endFloatingPoint(builder);
-}
-}
-}
-/**
- * Unicode with UTF-8 encoding
- *
- * @constructor
- */
-export namespace org.apache.arrow.flatbuf{
-export class Utf8 {
- bb: flatbuffers.ByteBuffer|null = null;
-
- bb_pos:number = 0;
-/**
- * @param number i
- * @param flatbuffers.ByteBuffer bb
- * @returns Utf8
- */
-__init(i:number, bb:flatbuffers.ByteBuffer):Utf8 {
- this.bb_pos = i;
- this.bb = bb;
- return this;
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param Utf8= obj
- * @returns Utf8
- */
-static getRootAsUtf8(bb:flatbuffers.ByteBuffer, obj?:Utf8):Utf8 {
- return (obj || new Utf8()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param Utf8= obj
- * @returns Utf8
- */
-static getSizePrefixedRootAsUtf8(bb:flatbuffers.ByteBuffer, obj?:Utf8):Utf8 {
- bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH);
- return (obj || new Utf8()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @param flatbuffers.Builder builder
- */
-static startUtf8(builder:flatbuffers.Builder) {
- builder.startObject(0);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @returns flatbuffers.Offset
- */
-static endUtf8(builder:flatbuffers.Builder):flatbuffers.Offset {
- var offset = builder.endObject();
- return offset;
-};
-
-static createUtf8(builder:flatbuffers.Builder):flatbuffers.Offset {
- Utf8.startUtf8(builder);
- return Utf8.endUtf8(builder);
-}
-}
-}
-/**
- * Opaque binary data
- *
- * @constructor
- */
-export namespace org.apache.arrow.flatbuf{
-export class Binary {
- bb: flatbuffers.ByteBuffer|null = null;
-
- bb_pos:number = 0;
-/**
- * @param number i
- * @param flatbuffers.ByteBuffer bb
- * @returns Binary
- */
-__init(i:number, bb:flatbuffers.ByteBuffer):Binary {
- this.bb_pos = i;
- this.bb = bb;
- return this;
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param Binary= obj
- * @returns Binary
- */
-static getRootAsBinary(bb:flatbuffers.ByteBuffer, obj?:Binary):Binary {
- return (obj || new Binary()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param Binary= obj
- * @returns Binary
- */
-static getSizePrefixedRootAsBinary(bb:flatbuffers.ByteBuffer, obj?:Binary):Binary {
- bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH);
- return (obj || new Binary()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @param flatbuffers.Builder builder
- */
-static startBinary(builder:flatbuffers.Builder) {
- builder.startObject(0);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @returns flatbuffers.Offset
- */
-static endBinary(builder:flatbuffers.Builder):flatbuffers.Offset {
- var offset = builder.endObject();
- return offset;
-};
-
-static createBinary(builder:flatbuffers.Builder):flatbuffers.Offset {
- Binary.startBinary(builder);
- return Binary.endBinary(builder);
-}
-}
-}
-/**
- * Same as Utf8, but with 64-bit offsets, allowing to represent
- * extremely large data values.
- *
- * @constructor
- */
-export namespace org.apache.arrow.flatbuf{
-export class LargeUtf8 {
- bb: flatbuffers.ByteBuffer|null = null;
-
- bb_pos:number = 0;
-/**
- * @param number i
- * @param flatbuffers.ByteBuffer bb
- * @returns LargeUtf8
- */
-__init(i:number, bb:flatbuffers.ByteBuffer):LargeUtf8 {
- this.bb_pos = i;
- this.bb = bb;
- return this;
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param LargeUtf8= obj
- * @returns LargeUtf8
- */
-static getRootAsLargeUtf8(bb:flatbuffers.ByteBuffer, obj?:LargeUtf8):LargeUtf8 {
- return (obj || new LargeUtf8()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param LargeUtf8= obj
- * @returns LargeUtf8
- */
-static getSizePrefixedRootAsLargeUtf8(bb:flatbuffers.ByteBuffer, obj?:LargeUtf8):LargeUtf8 {
- bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH);
- return (obj || new LargeUtf8()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @param flatbuffers.Builder builder
- */
-static startLargeUtf8(builder:flatbuffers.Builder) {
- builder.startObject(0);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @returns flatbuffers.Offset
- */
-static endLargeUtf8(builder:flatbuffers.Builder):flatbuffers.Offset {
- var offset = builder.endObject();
- return offset;
-};
-
-static createLargeUtf8(builder:flatbuffers.Builder):flatbuffers.Offset {
- LargeUtf8.startLargeUtf8(builder);
- return LargeUtf8.endLargeUtf8(builder);
-}
-}
-}
-/**
- * Same as Binary, but with 64-bit offsets, allowing to represent
- * extremely large data values.
- *
- * @constructor
- */
-export namespace org.apache.arrow.flatbuf{
-export class LargeBinary {
- bb: flatbuffers.ByteBuffer|null = null;
-
- bb_pos:number = 0;
-/**
- * @param number i
- * @param flatbuffers.ByteBuffer bb
- * @returns LargeBinary
- */
-__init(i:number, bb:flatbuffers.ByteBuffer):LargeBinary {
- this.bb_pos = i;
- this.bb = bb;
- return this;
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param LargeBinary= obj
- * @returns LargeBinary
- */
-static getRootAsLargeBinary(bb:flatbuffers.ByteBuffer, obj?:LargeBinary):LargeBinary {
- return (obj || new LargeBinary()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param LargeBinary= obj
- * @returns LargeBinary
- */
-static getSizePrefixedRootAsLargeBinary(bb:flatbuffers.ByteBuffer, obj?:LargeBinary):LargeBinary {
- bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH);
- return (obj || new LargeBinary()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @param flatbuffers.Builder builder
- */
-static startLargeBinary(builder:flatbuffers.Builder) {
- builder.startObject(0);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @returns flatbuffers.Offset
- */
-static endLargeBinary(builder:flatbuffers.Builder):flatbuffers.Offset {
- var offset = builder.endObject();
- return offset;
-};
-
-static createLargeBinary(builder:flatbuffers.Builder):flatbuffers.Offset {
- LargeBinary.startLargeBinary(builder);
- return LargeBinary.endLargeBinary(builder);
-}
-}
-}
-/**
- * @constructor
- */
-export namespace org.apache.arrow.flatbuf{
-export class FixedSizeBinary {
- bb: flatbuffers.ByteBuffer|null = null;
-
- bb_pos:number = 0;
-/**
- * @param number i
- * @param flatbuffers.ByteBuffer bb
- * @returns FixedSizeBinary
- */
-__init(i:number, bb:flatbuffers.ByteBuffer):FixedSizeBinary {
- this.bb_pos = i;
- this.bb = bb;
- return this;
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param FixedSizeBinary= obj
- * @returns FixedSizeBinary
- */
-static getRootAsFixedSizeBinary(bb:flatbuffers.ByteBuffer, obj?:FixedSizeBinary):FixedSizeBinary {
- return (obj || new FixedSizeBinary()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param FixedSizeBinary= obj
- * @returns FixedSizeBinary
- */
-static getSizePrefixedRootAsFixedSizeBinary(bb:flatbuffers.ByteBuffer, obj?:FixedSizeBinary):FixedSizeBinary {
- bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH);
- return (obj || new FixedSizeBinary()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * Number of bytes per value
- *
- * @returns number
- */
-byteWidth():number {
- var offset = this.bb!.__offset(this.bb_pos, 4);
- return offset ? this.bb!.readInt32(this.bb_pos + offset) : 0;
-};
-
-/**
- * @param flatbuffers.Builder builder
- */
-static startFixedSizeBinary(builder:flatbuffers.Builder) {
- builder.startObject(1);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param number byteWidth
- */
-static addByteWidth(builder:flatbuffers.Builder, byteWidth:number) {
- builder.addFieldInt32(0, byteWidth, 0);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @returns flatbuffers.Offset
- */
-static endFixedSizeBinary(builder:flatbuffers.Builder):flatbuffers.Offset {
- var offset = builder.endObject();
- return offset;
-};
-
-static createFixedSizeBinary(builder:flatbuffers.Builder, byteWidth:number):flatbuffers.Offset {
- FixedSizeBinary.startFixedSizeBinary(builder);
- FixedSizeBinary.addByteWidth(builder, byteWidth);
- return FixedSizeBinary.endFixedSizeBinary(builder);
-}
-}
-}
-/**
- * @constructor
- */
-export namespace org.apache.arrow.flatbuf{
-export class Bool {
- bb: flatbuffers.ByteBuffer|null = null;
-
- bb_pos:number = 0;
-/**
- * @param number i
- * @param flatbuffers.ByteBuffer bb
- * @returns Bool
- */
-__init(i:number, bb:flatbuffers.ByteBuffer):Bool {
- this.bb_pos = i;
- this.bb = bb;
- return this;
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param Bool= obj
- * @returns Bool
- */
-static getRootAsBool(bb:flatbuffers.ByteBuffer, obj?:Bool):Bool {
- return (obj || new Bool()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param Bool= obj
- * @returns Bool
- */
-static getSizePrefixedRootAsBool(bb:flatbuffers.ByteBuffer, obj?:Bool):Bool {
- bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH);
- return (obj || new Bool()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @param flatbuffers.Builder builder
- */
-static startBool(builder:flatbuffers.Builder) {
- builder.startObject(0);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @returns flatbuffers.Offset
- */
-static endBool(builder:flatbuffers.Builder):flatbuffers.Offset {
- var offset = builder.endObject();
- return offset;
-};
-
-static createBool(builder:flatbuffers.Builder):flatbuffers.Offset {
- Bool.startBool(builder);
- return Bool.endBool(builder);
-}
-}
-}
-/**
- * Exact decimal value represented as an integer value in two's
- * complement. Currently only 128-bit (16-byte) and 256-bit (32-byte) integers
- * are used. The representation uses the endianness indicated
- * in the Schema.
- *
- * @constructor
- */
-export namespace org.apache.arrow.flatbuf{
-export class Decimal {
- bb: flatbuffers.ByteBuffer|null = null;
-
- bb_pos:number = 0;
-/**
- * @param number i
- * @param flatbuffers.ByteBuffer bb
- * @returns Decimal
- */
-__init(i:number, bb:flatbuffers.ByteBuffer):Decimal {
- this.bb_pos = i;
- this.bb = bb;
- return this;
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param Decimal= obj
- * @returns Decimal
- */
-static getRootAsDecimal(bb:flatbuffers.ByteBuffer, obj?:Decimal):Decimal {
- return (obj || new Decimal()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param Decimal= obj
- * @returns Decimal
- */
-static getSizePrefixedRootAsDecimal(bb:flatbuffers.ByteBuffer, obj?:Decimal):Decimal {
- bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH);
- return (obj || new Decimal()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * Total number of decimal digits
- *
- * @returns number
- */
-precision():number {
- var offset = this.bb!.__offset(this.bb_pos, 4);
- return offset ? this.bb!.readInt32(this.bb_pos + offset) : 0;
-};
-
-/**
- * Number of digits after the decimal point "."
- *
- * @returns number
- */
-scale():number {
- var offset = this.bb!.__offset(this.bb_pos, 6);
- return offset ? this.bb!.readInt32(this.bb_pos + offset) : 0;
-};
-
-/**
- * Number of bits per value. The only accepted widths are 128 and 256.
- * We use bitWidth for consistency with Int::bitWidth.
- *
- * @returns number
- */
-bitWidth():number {
- var offset = this.bb!.__offset(this.bb_pos, 8);
- return offset ? this.bb!.readInt32(this.bb_pos + offset) : 128;
-};
-
-/**
- * @param flatbuffers.Builder builder
- */
-static startDecimal(builder:flatbuffers.Builder) {
- builder.startObject(3);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param number precision
- */
-static addPrecision(builder:flatbuffers.Builder, precision:number) {
- builder.addFieldInt32(0, precision, 0);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param number scale
- */
-static addScale(builder:flatbuffers.Builder, scale:number) {
- builder.addFieldInt32(1, scale, 0);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param number bitWidth
- */
-static addBitWidth(builder:flatbuffers.Builder, bitWidth:number) {
- builder.addFieldInt32(2, bitWidth, 128);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @returns flatbuffers.Offset
- */
-static endDecimal(builder:flatbuffers.Builder):flatbuffers.Offset {
- var offset = builder.endObject();
- return offset;
-};
-
-static createDecimal(builder:flatbuffers.Builder, precision:number, scale:number, bitWidth:number):flatbuffers.Offset {
- Decimal.startDecimal(builder);
- Decimal.addPrecision(builder, precision);
- Decimal.addScale(builder, scale);
- Decimal.addBitWidth(builder, bitWidth);
- return Decimal.endDecimal(builder);
-}
-}
-}
-/**
- * Date is either a 32-bit or 64-bit type representing elapsed time since UNIX
- * epoch (1970-01-01), stored in either of two units:
- *
- * * Milliseconds (64 bits) indicating UNIX time elapsed since the epoch (no
- * leap seconds), where the values are evenly divisible by 86400000
- * * Days (32 bits) since the UNIX epoch
- *
- * @constructor
- */
-export namespace org.apache.arrow.flatbuf{
-export class Date {
- bb: flatbuffers.ByteBuffer|null = null;
-
- bb_pos:number = 0;
-/**
- * @param number i
- * @param flatbuffers.ByteBuffer bb
- * @returns Date
- */
-__init(i:number, bb:flatbuffers.ByteBuffer):Date {
- this.bb_pos = i;
- this.bb = bb;
- return this;
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param Date= obj
- * @returns Date
- */
-static getRootAsDate(bb:flatbuffers.ByteBuffer, obj?:Date):Date {
- return (obj || new Date()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param Date= obj
- * @returns Date
- */
-static getSizePrefixedRootAsDate(bb:flatbuffers.ByteBuffer, obj?:Date):Date {
- bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH);
- return (obj || new Date()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @returns org.apache.arrow.flatbuf.DateUnit
- */
-unit():org.apache.arrow.flatbuf.DateUnit {
- var offset = this.bb!.__offset(this.bb_pos, 4);
- return offset ? /** */ (this.bb!.readInt16(this.bb_pos + offset)) : org.apache.arrow.flatbuf.DateUnit.MILLISECOND;
-};
-
-/**
- * @param flatbuffers.Builder builder
- */
-static startDate(builder:flatbuffers.Builder) {
- builder.startObject(1);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param org.apache.arrow.flatbuf.DateUnit unit
- */
-static addUnit(builder:flatbuffers.Builder, unit:org.apache.arrow.flatbuf.DateUnit) {
- builder.addFieldInt16(0, unit, org.apache.arrow.flatbuf.DateUnit.MILLISECOND);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @returns flatbuffers.Offset
- */
-static endDate(builder:flatbuffers.Builder):flatbuffers.Offset {
- var offset = builder.endObject();
- return offset;
-};
-
-static createDate(builder:flatbuffers.Builder, unit:org.apache.arrow.flatbuf.DateUnit):flatbuffers.Offset {
- Date.startDate(builder);
- Date.addUnit(builder, unit);
- return Date.endDate(builder);
-}
-}
-}
-/**
- * Time type. The physical storage type depends on the unit
- * - SECOND and MILLISECOND: 32 bits
- * - MICROSECOND and NANOSECOND: 64 bits
- *
- * @constructor
- */
-export namespace org.apache.arrow.flatbuf{
-export class Time {
- bb: flatbuffers.ByteBuffer|null = null;
-
- bb_pos:number = 0;
-/**
- * @param number i
- * @param flatbuffers.ByteBuffer bb
- * @returns Time
- */
-__init(i:number, bb:flatbuffers.ByteBuffer):Time {
- this.bb_pos = i;
- this.bb = bb;
- return this;
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param Time= obj
- * @returns Time
- */
-static getRootAsTime(bb:flatbuffers.ByteBuffer, obj?:Time):Time {
- return (obj || new Time()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param Time= obj
- * @returns Time
- */
-static getSizePrefixedRootAsTime(bb:flatbuffers.ByteBuffer, obj?:Time):Time {
- bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH);
- return (obj || new Time()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @returns org.apache.arrow.flatbuf.TimeUnit
- */
-unit():org.apache.arrow.flatbuf.TimeUnit {
- var offset = this.bb!.__offset(this.bb_pos, 4);
- return offset ? /** */ (this.bb!.readInt16(this.bb_pos + offset)) : org.apache.arrow.flatbuf.TimeUnit.MILLISECOND;
-};
-
-/**
- * @returns number
- */
-bitWidth():number {
- var offset = this.bb!.__offset(this.bb_pos, 6);
- return offset ? this.bb!.readInt32(this.bb_pos + offset) : 32;
-};
-
-/**
- * @param flatbuffers.Builder builder
- */
-static startTime(builder:flatbuffers.Builder) {
- builder.startObject(2);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param org.apache.arrow.flatbuf.TimeUnit unit
- */
-static addUnit(builder:flatbuffers.Builder, unit:org.apache.arrow.flatbuf.TimeUnit) {
- builder.addFieldInt16(0, unit, org.apache.arrow.flatbuf.TimeUnit.MILLISECOND);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param number bitWidth
- */
-static addBitWidth(builder:flatbuffers.Builder, bitWidth:number) {
- builder.addFieldInt32(1, bitWidth, 32);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @returns flatbuffers.Offset
- */
-static endTime(builder:flatbuffers.Builder):flatbuffers.Offset {
- var offset = builder.endObject();
- return offset;
-};
-
-static createTime(builder:flatbuffers.Builder, unit:org.apache.arrow.flatbuf.TimeUnit, bitWidth:number):flatbuffers.Offset {
- Time.startTime(builder);
- Time.addUnit(builder, unit);
- Time.addBitWidth(builder, bitWidth);
- return Time.endTime(builder);
-}
-}
-}
-/**
- * Time elapsed from the Unix epoch, 00:00:00.000 on 1 January 1970, excluding
- * leap seconds, as a 64-bit integer. Note that UNIX time does not include
- * leap seconds.
- *
- * Date & time libraries often have multiple different data types for temporal
- * data. In order to ease interoperability between different implementations the
- * Arrow project has some recommendations for encoding these types into a Timestamp
- * column.
- *
- * An "instant" represents a single moment in time that has no meaningful time zone
- * or the time zone is unknown. A column of instants can also contain values from
- * multiple time zones. To encode an instant set the timezone string to "UTC".
- *
- * A "zoned date-time" represents a single moment in time that has a meaningful
- * reference time zone. To encode a zoned date-time as a Timestamp set the timezone
- * string to the name of the timezone. There is some ambiguity between an instant
- * and a zoned date-time with the UTC time zone. Both of these are stored the same.
- * Typically, this distinction does not matter. If it does, then an application should
- * use custom metadata or an extension type to distinguish between the two cases.
- *
- * An "offset date-time" represents a single moment in time combined with a meaningful
- * offset from UTC. To encode an offset date-time as a Timestamp set the timezone string
- * to the numeric time zone offset string (e.g. "+03:00").
- *
- * A "local date-time" does not represent a single moment in time. It represents a wall
- * clock time combined with a date. Because of daylight savings time there may multiple
- * instants that correspond to a single local date-time in any given time zone. A
- * local date-time is often stored as a struct or a Date32/Time64 pair. However, it can
- * also be encoded into a Timestamp column. To do so the value should be the the time
- * elapsed from the Unix epoch so that a wall clock in UTC would display the desired time.
- * The timezone string should be set to null or the empty string.
- *
- * @constructor
- */
-export namespace org.apache.arrow.flatbuf{
-export class Timestamp {
- bb: flatbuffers.ByteBuffer|null = null;
-
- bb_pos:number = 0;
-/**
- * @param number i
- * @param flatbuffers.ByteBuffer bb
- * @returns Timestamp
- */
-__init(i:number, bb:flatbuffers.ByteBuffer):Timestamp {
- this.bb_pos = i;
- this.bb = bb;
- return this;
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param Timestamp= obj
- * @returns Timestamp
- */
-static getRootAsTimestamp(bb:flatbuffers.ByteBuffer, obj?:Timestamp):Timestamp {
- return (obj || new Timestamp()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param Timestamp= obj
- * @returns Timestamp
- */
-static getSizePrefixedRootAsTimestamp(bb:flatbuffers.ByteBuffer, obj?:Timestamp):Timestamp {
- bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH);
- return (obj || new Timestamp()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @returns org.apache.arrow.flatbuf.TimeUnit
- */
-unit():org.apache.arrow.flatbuf.TimeUnit {
- var offset = this.bb!.__offset(this.bb_pos, 4);
- return offset ? /** */ (this.bb!.readInt16(this.bb_pos + offset)) : org.apache.arrow.flatbuf.TimeUnit.SECOND;
-};
-
-/**
- * The time zone is a string indicating the name of a time zone, one of:
- *
- * * As used in the Olson time zone database (the "tz database" or
- * "tzdata"), such as "America/New_York"
- * * An absolute time zone offset of the form +XX:XX or -XX:XX, such as +07:30
- *
- * Whether a timezone string is present indicates different semantics about
- * the data:
- *
- * * If the time zone is null or an empty string, the data is a local date-time
- * and does not represent a single moment in time. Instead it represents a wall clock
- * time and care should be taken to avoid interpreting it semantically as an instant.
- *
- * * If the time zone is set to a valid value, values can be displayed as
- * "localized" to that time zone, even though the underlying 64-bit
- * integers are identical to the same data stored in UTC. Converting
- * between time zones is a metadata-only operation and does not change the
- * underlying values
- *
- * @param flatbuffers.Encoding= optionalEncoding
- * @returns string|Uint8Array|null
- */
-timezone():string|null
-timezone(optionalEncoding:flatbuffers.Encoding):string|Uint8Array|null
-timezone(optionalEncoding?:any):string|Uint8Array|null {
- var offset = this.bb!.__offset(this.bb_pos, 6);
- return offset ? this.bb!.__string(this.bb_pos + offset, optionalEncoding) : null;
-};
-
-/**
- * @param flatbuffers.Builder builder
- */
-static startTimestamp(builder:flatbuffers.Builder) {
- builder.startObject(2);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param org.apache.arrow.flatbuf.TimeUnit unit
- */
-static addUnit(builder:flatbuffers.Builder, unit:org.apache.arrow.flatbuf.TimeUnit) {
- builder.addFieldInt16(0, unit, org.apache.arrow.flatbuf.TimeUnit.SECOND);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param flatbuffers.Offset timezoneOffset
- */
-static addTimezone(builder:flatbuffers.Builder, timezoneOffset:flatbuffers.Offset) {
- builder.addFieldOffset(1, timezoneOffset, 0);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @returns flatbuffers.Offset
- */
-static endTimestamp(builder:flatbuffers.Builder):flatbuffers.Offset {
- var offset = builder.endObject();
- return offset;
-};
-
-static createTimestamp(builder:flatbuffers.Builder, unit:org.apache.arrow.flatbuf.TimeUnit, timezoneOffset:flatbuffers.Offset):flatbuffers.Offset {
- Timestamp.startTimestamp(builder);
- Timestamp.addUnit(builder, unit);
- Timestamp.addTimezone(builder, timezoneOffset);
- return Timestamp.endTimestamp(builder);
-}
-}
-}
-/**
- * @constructor
- */
-export namespace org.apache.arrow.flatbuf{
-export class Interval {
- bb: flatbuffers.ByteBuffer|null = null;
-
- bb_pos:number = 0;
-/**
- * @param number i
- * @param flatbuffers.ByteBuffer bb
- * @returns Interval
- */
-__init(i:number, bb:flatbuffers.ByteBuffer):Interval {
- this.bb_pos = i;
- this.bb = bb;
- return this;
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param Interval= obj
- * @returns Interval
- */
-static getRootAsInterval(bb:flatbuffers.ByteBuffer, obj?:Interval):Interval {
- return (obj || new Interval()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param Interval= obj
- * @returns Interval
- */
-static getSizePrefixedRootAsInterval(bb:flatbuffers.ByteBuffer, obj?:Interval):Interval {
- bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH);
- return (obj || new Interval()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @returns org.apache.arrow.flatbuf.IntervalUnit
- */
-unit():org.apache.arrow.flatbuf.IntervalUnit {
- var offset = this.bb!.__offset(this.bb_pos, 4);
- return offset ? /** */ (this.bb!.readInt16(this.bb_pos + offset)) : org.apache.arrow.flatbuf.IntervalUnit.YEAR_MONTH;
-};
-
-/**
- * @param flatbuffers.Builder builder
- */
-static startInterval(builder:flatbuffers.Builder) {
- builder.startObject(1);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param org.apache.arrow.flatbuf.IntervalUnit unit
- */
-static addUnit(builder:flatbuffers.Builder, unit:org.apache.arrow.flatbuf.IntervalUnit) {
- builder.addFieldInt16(0, unit, org.apache.arrow.flatbuf.IntervalUnit.YEAR_MONTH);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @returns flatbuffers.Offset
- */
-static endInterval(builder:flatbuffers.Builder):flatbuffers.Offset {
- var offset = builder.endObject();
- return offset;
-};
-
-static createInterval(builder:flatbuffers.Builder, unit:org.apache.arrow.flatbuf.IntervalUnit):flatbuffers.Offset {
- Interval.startInterval(builder);
- Interval.addUnit(builder, unit);
- return Interval.endInterval(builder);
-}
-}
-}
-/**
- * @constructor
- */
-export namespace org.apache.arrow.flatbuf{
-export class Duration {
- bb: flatbuffers.ByteBuffer|null = null;
-
- bb_pos:number = 0;
-/**
- * @param number i
- * @param flatbuffers.ByteBuffer bb
- * @returns Duration
- */
-__init(i:number, bb:flatbuffers.ByteBuffer):Duration {
- this.bb_pos = i;
- this.bb = bb;
- return this;
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param Duration= obj
- * @returns Duration
- */
-static getRootAsDuration(bb:flatbuffers.ByteBuffer, obj?:Duration):Duration {
- return (obj || new Duration()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param Duration= obj
- * @returns Duration
- */
-static getSizePrefixedRootAsDuration(bb:flatbuffers.ByteBuffer, obj?:Duration):Duration {
- bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH);
- return (obj || new Duration()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @returns org.apache.arrow.flatbuf.TimeUnit
- */
-unit():org.apache.arrow.flatbuf.TimeUnit {
- var offset = this.bb!.__offset(this.bb_pos, 4);
- return offset ? /** */ (this.bb!.readInt16(this.bb_pos + offset)) : org.apache.arrow.flatbuf.TimeUnit.MILLISECOND;
-};
-
-/**
- * @param flatbuffers.Builder builder
- */
-static startDuration(builder:flatbuffers.Builder) {
- builder.startObject(1);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param org.apache.arrow.flatbuf.TimeUnit unit
- */
-static addUnit(builder:flatbuffers.Builder, unit:org.apache.arrow.flatbuf.TimeUnit) {
- builder.addFieldInt16(0, unit, org.apache.arrow.flatbuf.TimeUnit.MILLISECOND);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @returns flatbuffers.Offset
- */
-static endDuration(builder:flatbuffers.Builder):flatbuffers.Offset {
- var offset = builder.endObject();
- return offset;
-};
-
-static createDuration(builder:flatbuffers.Builder, unit:org.apache.arrow.flatbuf.TimeUnit):flatbuffers.Offset {
- Duration.startDuration(builder);
- Duration.addUnit(builder, unit);
- return Duration.endDuration(builder);
-}
-}
-}
-/**
- * ----------------------------------------------------------------------
- * user defined key value pairs to add custom metadata to arrow
- * key namespacing is the responsibility of the user
- *
- * @constructor
- */
-export namespace org.apache.arrow.flatbuf{
-export class KeyValue {
- bb: flatbuffers.ByteBuffer|null = null;
-
- bb_pos:number = 0;
-/**
- * @param number i
- * @param flatbuffers.ByteBuffer bb
- * @returns KeyValue
- */
-__init(i:number, bb:flatbuffers.ByteBuffer):KeyValue {
- this.bb_pos = i;
- this.bb = bb;
- return this;
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param KeyValue= obj
- * @returns KeyValue
- */
-static getRootAsKeyValue(bb:flatbuffers.ByteBuffer, obj?:KeyValue):KeyValue {
- return (obj || new KeyValue()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param KeyValue= obj
- * @returns KeyValue
- */
-static getSizePrefixedRootAsKeyValue(bb:flatbuffers.ByteBuffer, obj?:KeyValue):KeyValue {
- bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH);
- return (obj || new KeyValue()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @param flatbuffers.Encoding= optionalEncoding
- * @returns string|Uint8Array|null
- */
-key():string|null
-key(optionalEncoding:flatbuffers.Encoding):string|Uint8Array|null
-key(optionalEncoding?:any):string|Uint8Array|null {
- var offset = this.bb!.__offset(this.bb_pos, 4);
- return offset ? this.bb!.__string(this.bb_pos + offset, optionalEncoding) : null;
-};
-
-/**
- * @param flatbuffers.Encoding= optionalEncoding
- * @returns string|Uint8Array|null
- */
-value():string|null
-value(optionalEncoding:flatbuffers.Encoding):string|Uint8Array|null
-value(optionalEncoding?:any):string|Uint8Array|null {
- var offset = this.bb!.__offset(this.bb_pos, 6);
- return offset ? this.bb!.__string(this.bb_pos + offset, optionalEncoding) : null;
-};
-
-/**
- * @param flatbuffers.Builder builder
- */
-static startKeyValue(builder:flatbuffers.Builder) {
- builder.startObject(2);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param flatbuffers.Offset keyOffset
- */
-static addKey(builder:flatbuffers.Builder, keyOffset:flatbuffers.Offset) {
- builder.addFieldOffset(0, keyOffset, 0);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param flatbuffers.Offset valueOffset
- */
-static addValue(builder:flatbuffers.Builder, valueOffset:flatbuffers.Offset) {
- builder.addFieldOffset(1, valueOffset, 0);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @returns flatbuffers.Offset
- */
-static endKeyValue(builder:flatbuffers.Builder):flatbuffers.Offset {
- var offset = builder.endObject();
- return offset;
-};
-
-static createKeyValue(builder:flatbuffers.Builder, keyOffset:flatbuffers.Offset, valueOffset:flatbuffers.Offset):flatbuffers.Offset {
- KeyValue.startKeyValue(builder);
- KeyValue.addKey(builder, keyOffset);
- KeyValue.addValue(builder, valueOffset);
- return KeyValue.endKeyValue(builder);
-}
-}
-}
-/**
- * @constructor
- */
-export namespace org.apache.arrow.flatbuf{
-export class DictionaryEncoding {
- bb: flatbuffers.ByteBuffer|null = null;
-
- bb_pos:number = 0;
-/**
- * @param number i
- * @param flatbuffers.ByteBuffer bb
- * @returns DictionaryEncoding
- */
-__init(i:number, bb:flatbuffers.ByteBuffer):DictionaryEncoding {
- this.bb_pos = i;
- this.bb = bb;
- return this;
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param DictionaryEncoding= obj
- * @returns DictionaryEncoding
- */
-static getRootAsDictionaryEncoding(bb:flatbuffers.ByteBuffer, obj?:DictionaryEncoding):DictionaryEncoding {
- return (obj || new DictionaryEncoding()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param DictionaryEncoding= obj
- * @returns DictionaryEncoding
- */
-static getSizePrefixedRootAsDictionaryEncoding(bb:flatbuffers.ByteBuffer, obj?:DictionaryEncoding):DictionaryEncoding {
- bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH);
- return (obj || new DictionaryEncoding()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * The known dictionary id in the application where this data is used. In
- * the file or streaming formats, the dictionary ids are found in the
- * DictionaryBatch messages
- *
- * @returns flatbuffers.Long
- */
-id():flatbuffers.Long {
- var offset = this.bb!.__offset(this.bb_pos, 4);
- return offset ? this.bb!.readInt64(this.bb_pos + offset) : this.bb!.createLong(0, 0);
-};
-
-/**
- * The dictionary indices are constrained to be non-negative integers. If
- * this field is null, the indices must be signed int32. To maximize
- * cross-language compatibility and performance, implementations are
- * recommended to prefer signed integer types over unsigned integer types
- * and to avoid uint64 indices unless they are required by an application.
- *
- * @param org.apache.arrow.flatbuf.Int= obj
- * @returns org.apache.arrow.flatbuf.Int|null
- */
-indexType(obj?:org.apache.arrow.flatbuf.Int):org.apache.arrow.flatbuf.Int|null {
- var offset = this.bb!.__offset(this.bb_pos, 6);
- return offset ? (obj || new org.apache.arrow.flatbuf.Int()).__init(this.bb!.__indirect(this.bb_pos + offset), this.bb!) : null;
-};
-
-/**
- * By default, dictionaries are not ordered, or the order does not have
- * semantic meaning. In some statistical, applications, dictionary-encoding
- * is used to represent ordered categorical data, and we provide a way to
- * preserve that metadata here
- *
- * @returns boolean
- */
-isOrdered():boolean {
- var offset = this.bb!.__offset(this.bb_pos, 8);
- return offset ? !!this.bb!.readInt8(this.bb_pos + offset) : false;
-};
-
-/**
- * @returns org.apache.arrow.flatbuf.DictionaryKind
- */
-dictionaryKind():org.apache.arrow.flatbuf.DictionaryKind {
- var offset = this.bb!.__offset(this.bb_pos, 10);
- return offset ? /** */ (this.bb!.readInt16(this.bb_pos + offset)) : org.apache.arrow.flatbuf.DictionaryKind.DenseArray;
-};
-
-/**
- * @param flatbuffers.Builder builder
- */
-static startDictionaryEncoding(builder:flatbuffers.Builder) {
- builder.startObject(4);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param flatbuffers.Long id
- */
-static addId(builder:flatbuffers.Builder, id:flatbuffers.Long) {
- builder.addFieldInt64(0, id, builder.createLong(0, 0));
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param flatbuffers.Offset indexTypeOffset
- */
-static addIndexType(builder:flatbuffers.Builder, indexTypeOffset:flatbuffers.Offset) {
- builder.addFieldOffset(1, indexTypeOffset, 0);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param boolean isOrdered
- */
-static addIsOrdered(builder:flatbuffers.Builder, isOrdered:boolean) {
- builder.addFieldInt8(2, +isOrdered, +false);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param org.apache.arrow.flatbuf.DictionaryKind dictionaryKind
- */
-static addDictionaryKind(builder:flatbuffers.Builder, dictionaryKind:org.apache.arrow.flatbuf.DictionaryKind) {
- builder.addFieldInt16(3, dictionaryKind, org.apache.arrow.flatbuf.DictionaryKind.DenseArray);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @returns flatbuffers.Offset
- */
-static endDictionaryEncoding(builder:flatbuffers.Builder):flatbuffers.Offset {
- var offset = builder.endObject();
- return offset;
-};
-
-}
-}
-/**
- * ----------------------------------------------------------------------
- * A field represents a named column in a record / row batch or child of a
- * nested type.
- *
- * @constructor
- */
-export namespace org.apache.arrow.flatbuf{
-export class Field {
- bb: flatbuffers.ByteBuffer|null = null;
-
- bb_pos:number = 0;
-/**
- * @param number i
- * @param flatbuffers.ByteBuffer bb
- * @returns Field
- */
-__init(i:number, bb:flatbuffers.ByteBuffer):Field {
- this.bb_pos = i;
- this.bb = bb;
- return this;
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param Field= obj
- * @returns Field
- */
-static getRootAsField(bb:flatbuffers.ByteBuffer, obj?:Field):Field {
- return (obj || new Field()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param Field= obj
- * @returns Field
- */
-static getSizePrefixedRootAsField(bb:flatbuffers.ByteBuffer, obj?:Field):Field {
- bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH);
- return (obj || new Field()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * Name is not required, in i.e. a List
- *
- * @param flatbuffers.Encoding= optionalEncoding
- * @returns string|Uint8Array|null
- */
-name():string|null
-name(optionalEncoding:flatbuffers.Encoding):string|Uint8Array|null
-name(optionalEncoding?:any):string|Uint8Array|null {
- var offset = this.bb!.__offset(this.bb_pos, 4);
- return offset ? this.bb!.__string(this.bb_pos + offset, optionalEncoding) : null;
-};
-
-/**
- * Whether or not this field can contain nulls. Should be true in general.
- *
- * @returns boolean
- */
-nullable():boolean {
- var offset = this.bb!.__offset(this.bb_pos, 6);
- return offset ? !!this.bb!.readInt8(this.bb_pos + offset) : false;
-};
-
-/**
- * @returns org.apache.arrow.flatbuf.Type
- */
-typeType():org.apache.arrow.flatbuf.Type {
- var offset = this.bb!.__offset(this.bb_pos, 8);
- return offset ? /** */ (this.bb!.readUint8(this.bb_pos + offset)) : org.apache.arrow.flatbuf.Type.NONE;
-};
-
-/**
- * This is the type of the decoded value if the field is dictionary encoded.
- *
- * @param flatbuffers.Table obj
- * @returns ?flatbuffers.Table
- */
-type(obj:T):T|null {
- var offset = this.bb!.__offset(this.bb_pos, 10);
- return offset ? this.bb!.__union(obj, this.bb_pos + offset) : null;
-};
-
-/**
- * Present only if the field is dictionary encoded.
- *
- * @param org.apache.arrow.flatbuf.DictionaryEncoding= obj
- * @returns org.apache.arrow.flatbuf.DictionaryEncoding|null
- */
-dictionary(obj?:org.apache.arrow.flatbuf.DictionaryEncoding):org.apache.arrow.flatbuf.DictionaryEncoding|null {
- var offset = this.bb!.__offset(this.bb_pos, 12);
- return offset ? (obj || new org.apache.arrow.flatbuf.DictionaryEncoding()).__init(this.bb!.__indirect(this.bb_pos + offset), this.bb!) : null;
-};
-
-/**
- * children apply only to nested data types like Struct, List and Union. For
- * primitive types children will have length 0.
- *
- * @param number index
- * @param org.apache.arrow.flatbuf.Field= obj
- * @returns org.apache.arrow.flatbuf.Field
- */
-children(index: number, obj?:org.apache.arrow.flatbuf.Field):org.apache.arrow.flatbuf.Field|null {
- var offset = this.bb!.__offset(this.bb_pos, 14);
- return offset ? (obj || new org.apache.arrow.flatbuf.Field()).__init(this.bb!.__indirect(this.bb!.__vector(this.bb_pos + offset) + index * 4), this.bb!) : null;
-};
-
-/**
- * @returns number
- */
-childrenLength():number {
- var offset = this.bb!.__offset(this.bb_pos, 14);
- return offset ? this.bb!.__vector_len(this.bb_pos + offset) : 0;
-};
-
-/**
- * User-defined metadata
- *
- * @param number index
- * @param org.apache.arrow.flatbuf.KeyValue= obj
- * @returns org.apache.arrow.flatbuf.KeyValue
- */
-customMetadata(index: number, obj?:org.apache.arrow.flatbuf.KeyValue):org.apache.arrow.flatbuf.KeyValue|null {
- var offset = this.bb!.__offset(this.bb_pos, 16);
- return offset ? (obj || new org.apache.arrow.flatbuf.KeyValue()).__init(this.bb!.__indirect(this.bb!.__vector(this.bb_pos + offset) + index * 4), this.bb!) : null;
-};
-
-/**
- * @returns number
- */
-customMetadataLength():number {
- var offset = this.bb!.__offset(this.bb_pos, 16);
- return offset ? this.bb!.__vector_len(this.bb_pos + offset) : 0;
-};
-
-/**
- * @param flatbuffers.Builder builder
- */
-static startField(builder:flatbuffers.Builder) {
- builder.startObject(7);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param flatbuffers.Offset nameOffset
- */
-static addName(builder:flatbuffers.Builder, nameOffset:flatbuffers.Offset) {
- builder.addFieldOffset(0, nameOffset, 0);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param boolean nullable
- */
-static addNullable(builder:flatbuffers.Builder, nullable:boolean) {
- builder.addFieldInt8(1, +nullable, +false);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param org.apache.arrow.flatbuf.Type typeType
- */
-static addTypeType(builder:flatbuffers.Builder, typeType:org.apache.arrow.flatbuf.Type) {
- builder.addFieldInt8(2, typeType, org.apache.arrow.flatbuf.Type.NONE);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param flatbuffers.Offset typeOffset
- */
-static addType(builder:flatbuffers.Builder, typeOffset:flatbuffers.Offset) {
- builder.addFieldOffset(3, typeOffset, 0);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param flatbuffers.Offset dictionaryOffset
- */
-static addDictionary(builder:flatbuffers.Builder, dictionaryOffset:flatbuffers.Offset) {
- builder.addFieldOffset(4, dictionaryOffset, 0);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param flatbuffers.Offset childrenOffset
- */
-static addChildren(builder:flatbuffers.Builder, childrenOffset:flatbuffers.Offset) {
- builder.addFieldOffset(5, childrenOffset, 0);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param Array. data
- * @returns flatbuffers.Offset
- */
-static createChildrenVector(builder:flatbuffers.Builder, data:flatbuffers.Offset[]):flatbuffers.Offset {
- builder.startVector(4, data.length, 4);
- for (var i = data.length - 1; i >= 0; i--) {
- builder.addOffset(data[i]);
- }
- return builder.endVector();
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param number numElems
- */
-static startChildrenVector(builder:flatbuffers.Builder, numElems:number) {
- builder.startVector(4, numElems, 4);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param flatbuffers.Offset customMetadataOffset
- */
-static addCustomMetadata(builder:flatbuffers.Builder, customMetadataOffset:flatbuffers.Offset) {
- builder.addFieldOffset(6, customMetadataOffset, 0);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param Array. data
- * @returns flatbuffers.Offset
- */
-static createCustomMetadataVector(builder:flatbuffers.Builder, data:flatbuffers.Offset[]):flatbuffers.Offset {
- builder.startVector(4, data.length, 4);
- for (var i = data.length - 1; i >= 0; i--) {
- builder.addOffset(data[i]);
- }
- return builder.endVector();
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param number numElems
- */
-static startCustomMetadataVector(builder:flatbuffers.Builder, numElems:number) {
- builder.startVector(4, numElems, 4);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @returns flatbuffers.Offset
- */
-static endField(builder:flatbuffers.Builder):flatbuffers.Offset {
- var offset = builder.endObject();
- return offset;
-};
-
-}
-}
-/**
- * ----------------------------------------------------------------------
- * A Buffer represents a single contiguous memory segment
- *
- * @constructor
- */
-export namespace org.apache.arrow.flatbuf{
-export class Buffer {
- bb: flatbuffers.ByteBuffer|null = null;
-
- bb_pos:number = 0;
-/**
- * @param number i
- * @param flatbuffers.ByteBuffer bb
- * @returns Buffer
- */
-__init(i:number, bb:flatbuffers.ByteBuffer):Buffer {
- this.bb_pos = i;
- this.bb = bb;
- return this;
-};
-
-/**
- * The relative offset into the shared memory page where the bytes for this
- * buffer starts
- *
- * @returns flatbuffers.Long
- */
-offset():flatbuffers.Long {
- return this.bb!.readInt64(this.bb_pos);
-};
-
-/**
- * The absolute length (in bytes) of the memory buffer. The memory is found
- * from offset (inclusive) to offset + length (non-inclusive). When building
- * messages using the encapsulated IPC message, padding bytes may be written
- * after a buffer, but such padding bytes do not need to be accounted for in
- * the size here.
- *
- * @returns flatbuffers.Long
- */
-length():flatbuffers.Long {
- return this.bb!.readInt64(this.bb_pos + 8);
-};
-
-/**
- * @returns number
- */
-static sizeOf():number {
- return 16;
-}
-
-/**
- * @param flatbuffers.Builder builder
- * @param flatbuffers.Long offset
- * @param flatbuffers.Long length
- * @returns flatbuffers.Offset
- */
-static createBuffer(builder:flatbuffers.Builder, offset: flatbuffers.Long, length: flatbuffers.Long):flatbuffers.Offset {
- builder.prep(8, 16);
- builder.writeInt64(length);
- builder.writeInt64(offset);
- return builder.offset();
-};
-
-}
-}
-/**
- * ----------------------------------------------------------------------
- * A Schema describes the columns in a row batch
- *
- * @constructor
- */
-export namespace org.apache.arrow.flatbuf{
-export class Schema {
- bb: flatbuffers.ByteBuffer|null = null;
-
- bb_pos:number = 0;
-/**
- * @param number i
- * @param flatbuffers.ByteBuffer bb
- * @returns Schema
- */
-__init(i:number, bb:flatbuffers.ByteBuffer):Schema {
- this.bb_pos = i;
- this.bb = bb;
- return this;
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param Schema= obj
- * @returns Schema
- */
-static getRootAsSchema(bb:flatbuffers.ByteBuffer, obj?:Schema):Schema {
- return (obj || new Schema()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * @param flatbuffers.ByteBuffer bb
- * @param Schema= obj
- * @returns Schema
- */
-static getSizePrefixedRootAsSchema(bb:flatbuffers.ByteBuffer, obj?:Schema):Schema {
- bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH);
- return (obj || new Schema()).__init(bb.readInt32(bb.position()) + bb.position(), bb);
-};
-
-/**
- * endianness of the buffer
- * it is Little Endian by default
- * if endianness doesn't match the underlying system then the vectors need to be converted
- *
- * @returns org.apache.arrow.flatbuf.Endianness
- */
-endianness():org.apache.arrow.flatbuf.Endianness {
- var offset = this.bb!.__offset(this.bb_pos, 4);
- return offset ? /** */ (this.bb!.readInt16(this.bb_pos + offset)) : org.apache.arrow.flatbuf.Endianness.Little;
-};
-
-/**
- * @param number index
- * @param org.apache.arrow.flatbuf.Field= obj
- * @returns org.apache.arrow.flatbuf.Field
- */
-fields(index: number, obj?:org.apache.arrow.flatbuf.Field):org.apache.arrow.flatbuf.Field|null {
- var offset = this.bb!.__offset(this.bb_pos, 6);
- return offset ? (obj || new org.apache.arrow.flatbuf.Field()).__init(this.bb!.__indirect(this.bb!.__vector(this.bb_pos + offset) + index * 4), this.bb!) : null;
-};
-
-/**
- * @returns number
- */
-fieldsLength():number {
- var offset = this.bb!.__offset(this.bb_pos, 6);
- return offset ? this.bb!.__vector_len(this.bb_pos + offset) : 0;
-};
-
-/**
- * @param number index
- * @param org.apache.arrow.flatbuf.KeyValue= obj
- * @returns org.apache.arrow.flatbuf.KeyValue
- */
-customMetadata(index: number, obj?:org.apache.arrow.flatbuf.KeyValue):org.apache.arrow.flatbuf.KeyValue|null {
- var offset = this.bb!.__offset(this.bb_pos, 8);
- return offset ? (obj || new org.apache.arrow.flatbuf.KeyValue()).__init(this.bb!.__indirect(this.bb!.__vector(this.bb_pos + offset) + index * 4), this.bb!) : null;
-};
-
-/**
- * @returns number
- */
-customMetadataLength():number {
- var offset = this.bb!.__offset(this.bb_pos, 8);
- return offset ? this.bb!.__vector_len(this.bb_pos + offset) : 0;
-};
-
-/**
- * Features used in the stream/file.
- *
- * @param number index
- * @returns flatbuffers.Long
- */
-features(index: number):flatbuffers.Long|null {
- var offset = this.bb!.__offset(this.bb_pos, 10);
- return offset ? /** */ (this.bb!.readInt64(this.bb!.__vector(this.bb_pos + offset) + index * 8)) : this.bb!.createLong(0, 0);
-};
-
-/**
- * @returns number
- */
-featuresLength():number {
- var offset = this.bb!.__offset(this.bb_pos, 10);
- return offset ? this.bb!.__vector_len(this.bb_pos + offset) : 0;
-};
-
-/**
- * @param flatbuffers.Builder builder
- */
-static startSchema(builder:flatbuffers.Builder) {
- builder.startObject(4);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param org.apache.arrow.flatbuf.Endianness endianness
- */
-static addEndianness(builder:flatbuffers.Builder, endianness:org.apache.arrow.flatbuf.Endianness) {
- builder.addFieldInt16(0, endianness, org.apache.arrow.flatbuf.Endianness.Little);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param flatbuffers.Offset fieldsOffset
- */
-static addFields(builder:flatbuffers.Builder, fieldsOffset:flatbuffers.Offset) {
- builder.addFieldOffset(1, fieldsOffset, 0);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param Array. data
- * @returns flatbuffers.Offset
- */
-static createFieldsVector(builder:flatbuffers.Builder, data:flatbuffers.Offset[]):flatbuffers.Offset {
- builder.startVector(4, data.length, 4);
- for (var i = data.length - 1; i >= 0; i--) {
- builder.addOffset(data[i]);
- }
- return builder.endVector();
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param number numElems
- */
-static startFieldsVector(builder:flatbuffers.Builder, numElems:number) {
- builder.startVector(4, numElems, 4);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param flatbuffers.Offset customMetadataOffset
- */
-static addCustomMetadata(builder:flatbuffers.Builder, customMetadataOffset:flatbuffers.Offset) {
- builder.addFieldOffset(2, customMetadataOffset, 0);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param Array. data
- * @returns flatbuffers.Offset
- */
-static createCustomMetadataVector(builder:flatbuffers.Builder, data:flatbuffers.Offset[]):flatbuffers.Offset {
- builder.startVector(4, data.length, 4);
- for (var i = data.length - 1; i >= 0; i--) {
- builder.addOffset(data[i]);
- }
- return builder.endVector();
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param number numElems
- */
-static startCustomMetadataVector(builder:flatbuffers.Builder, numElems:number) {
- builder.startVector(4, numElems, 4);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param flatbuffers.Offset featuresOffset
- */
-static addFeatures(builder:flatbuffers.Builder, featuresOffset:flatbuffers.Offset) {
- builder.addFieldOffset(3, featuresOffset, 0);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param Array. data
- * @returns flatbuffers.Offset
- */
-static createFeaturesVector(builder:flatbuffers.Builder, data:flatbuffers.Long[]):flatbuffers.Offset {
- builder.startVector(8, data.length, 8);
- for (var i = data.length - 1; i >= 0; i--) {
- builder.addInt64(data[i]);
- }
- return builder.endVector();
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param number numElems
- */
-static startFeaturesVector(builder:flatbuffers.Builder, numElems:number) {
- builder.startVector(8, numElems, 8);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @returns flatbuffers.Offset
- */
-static endSchema(builder:flatbuffers.Builder):flatbuffers.Offset {
- var offset = builder.endObject();
- return offset;
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param flatbuffers.Offset offset
- */
-static finishSchemaBuffer(builder:flatbuffers.Builder, offset:flatbuffers.Offset) {
- builder.finish(offset);
-};
-
-/**
- * @param flatbuffers.Builder builder
- * @param flatbuffers.Offset offset
- */
-static finishSizePrefixedSchemaBuffer(builder:flatbuffers.Builder, offset:flatbuffers.Offset) {
- builder.finish(offset, undefined, true);
-};
-
-static createSchema(builder:flatbuffers.Builder, endianness:org.apache.arrow.flatbuf.Endianness, fieldsOffset:flatbuffers.Offset, customMetadataOffset:flatbuffers.Offset, featuresOffset:flatbuffers.Offset):flatbuffers.Offset {
- Schema.startSchema(builder);
- Schema.addEndianness(builder, endianness);
- Schema.addFields(builder, fieldsOffset);
- Schema.addCustomMetadata(builder, customMetadataOffset);
- Schema.addFeatures(builder, featuresOffset);
- return Schema.endSchema(builder);
-}
-}
-}
diff --git a/proto/raw-js-openapi/src/index.js b/proto/raw-js-openapi/src/index.js
index 1e9c688f910..4e2312ce3aa 100644
--- a/proto/raw-js-openapi/src/index.js
+++ b/proto/raw-js-openapi/src/index.js
@@ -28,11 +28,6 @@ var browserHeaders = require("browser-headers");
var grpcWeb = require("@improbable-eng/grpc-web");//usually .grpc
var jspb = require("google-protobuf");
-var flatbuffers = require("flatbuffers").flatbuffers;
-var barrage = require("@deephaven/barrage");
-
-var message = require('./arrow/flight/flatbuf/Message_generated');
-var schema = require('./arrow/flight/flatbuf/Schema_generated');
var io = { deephaven: {
proto: {
@@ -58,17 +53,8 @@ var io = { deephaven: {
hierarchicaltable_pb,
hierarchicaltable_pb_service
},
- barrage: {
- "flatbuf": {
- "Barrage_generated": barrage,
- }
- }
}};
var arrow = { flight: {
- flatbuf: {
- Message_generated: message,
- Schema_generated: schema,
- },
protocol: {
Flight_pb,
Flight_pb_service,
@@ -80,7 +66,6 @@ var dhinternal = {
browserHeaders,
jspb,
grpcWeb,//TODO need to expand this to the specific things we need
- flatbuffers,
io,
arrow
};
diff --git a/replication/static/src/main/java/io/deephaven/replicators/ReplicateBarrageUtils.java b/replication/static/src/main/java/io/deephaven/replicators/ReplicateBarrageUtils.java
index 6824f8d91f9..35dadff92d0 100644
--- a/replication/static/src/main/java/io/deephaven/replicators/ReplicateBarrageUtils.java
+++ b/replication/static/src/main/java/io/deephaven/replicators/ReplicateBarrageUtils.java
@@ -36,6 +36,9 @@ public static void main(final String[] args) throws IOException {
fixupVectorExpansionKernel(CHUNK_PACKAGE + "/vector/IntVectorExpansionKernel.java", "Int");
fixupVectorExpansionKernel(CHUNK_PACKAGE + "/vector/LongVectorExpansionKernel.java", "Long");
fixupVectorExpansionKernel(CHUNK_PACKAGE + "/vector/DoubleVectorExpansionKernel.java", "Double");
+
+ ReplicatePrimitiveCode.charToAllButBoolean("replicateBarrageUtils",
+ "web/client-api/src/main/java/io/deephaven/web/client/api/barrage/data/WebCharColumnData.java");
}
private static void fixupVectorExpansionKernel(final @NotNull String path, final @NotNull String type)
diff --git a/web/client-api/client-api.gradle b/web/client-api/client-api.gradle
index f9f9067a65b..93f8930f32f 100644
--- a/web/client-api/client-api.gradle
+++ b/web/client-api/client-api.gradle
@@ -15,15 +15,32 @@ configurations {
dts
typescriptDoclet
}
-
dependencies {
+ implementation platform(libs.grpc.bom)
+ implementation variantOf(libs.grpc.api) { classifier('sources') }
+
+ implementation project(':engine-chunk')
+ implementation project(':extensions-barrage')
+ implementation project(':DataStructures')
implementation project(':web-shared-beans')
implementation project(':web-client-backplane')
+ implementation libs.jetbrains.annotations
+ implementation variantOf(libs.jetbrains.annotations) { classifier("sources") }
+ implementation libs.immutables.value.annotations
+ implementation variantOf(libs.immutables.value.annotations) { classifier("sources") }
+
+ implementation libs.flatbuffers.java
+ implementation variantOf(libs.flatbuffers.java) { classifier("sources") }
+ implementation libs.arrow.format
+ implementation variantOf(libs.arrow.format) { classifier('sources') }
+ implementation libs.deephaven.barrage.format
+ implementation variantOf(libs.deephaven.barrage.format) { classifier('sources') }
implementation libs.vertispan.ts.defs.annotations
typescriptDoclet libs.vertispan.ts.defs.doclet
implementation libs.vertispan.nio.gwt
+ implementation libs.vertispan.flatbuffers.gwt
js project(path: ':proto:raw-js-openapi', configuration: 'js')
@@ -76,10 +93,14 @@ def gwtUnitTest = tasks.register('gwtUnitTest', Test) { t ->
'-runStyle HtmlUnit',
'-ea',
'-style PRETTY',
+ '-generateJsInteropExports',
+ '-includeJsInteropExports io.deephaven.*',
+ '-excludeJsInteropExports io.deephaven.web.client.api.widget.plot.*',
"-war ${layout.buildDirectory.dir('unitTest-war').get().asFile.absolutePath}"
].join(' '),
'gwt.persistentunitcachedir': layout.buildDirectory.dir('unitTest-unitCache').get().asFile.absolutePath,
]
+ t.classpath += tasks.getByName('gwtCompile').src
t.include '**/ClientUnitTestSuite.class'
t.useJUnit()
t.scanForTestClasses = false
@@ -148,6 +169,9 @@ def gwtIntegrationTest = tasks.register('gwtIntegrationTest', Test) { t ->
"-runStyle io.deephaven.web.junit.RunStyleRemoteWebDriver:${webdriverUrl}?firefox",
'-ea',
'-style PRETTY',
+ '-generateJsInteropExports',
+ '-includeJsInteropExports io.deephaven.*',
+ '-excludeJsInteropExports io.deephaven.web.client.api.widget.plot.*',
"-setProperty dh.server=http://${deephavenDocker.containerName.get()}:10000",
"-war ${layout.buildDirectory.dir('integrationTest-war').get().asFile.absolutePath}"
].join(' '))
@@ -181,6 +205,9 @@ Click the URL that is printed out to run the test in your browser, or refresh an
'-runStyle Manual:1',
'-ea',
'-style PRETTY',
+ '-generateJsInteropExports',
+ '-includeJsInteropExports io.deephaven.*',
+ '-excludeJsInteropExports io.deephaven.web.client.api.widget.plot.*',
'-setProperty dh.server=http://localhost:10000',
'-setProperty compiler.useSourceMaps=true',
"-war ${layout.buildDirectory.dir('manualTest-war').get().asFile.absolutePath}"
diff --git a/web/client-api/src/main/java/io/deephaven/web/DeephavenApi.gwt.xml b/web/client-api/src/main/java/io/deephaven/web/DeephavenApi.gwt.xml
index 4f6177621b8..9649d1d487c 100644
--- a/web/client-api/src/main/java/io/deephaven/web/DeephavenApi.gwt.xml
+++ b/web/client-api/src/main/java/io/deephaven/web/DeephavenApi.gwt.xml
@@ -2,8 +2,20 @@
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/Column.java b/web/client-api/src/main/java/io/deephaven/web/client/api/Column.java
index 4e4840754dd..00e97223b86 100644
--- a/web/client-api/src/main/java/io/deephaven/web/client/api/Column.java
+++ b/web/client-api/src/main/java/io/deephaven/web/client/api/Column.java
@@ -10,6 +10,7 @@
import jsinterop.annotations.JsProperty;
import jsinterop.base.Any;
+import java.util.Objects;
import java.util.stream.IntStream;
import java.util.stream.IntStream.Builder;
@@ -21,7 +22,6 @@
public class Column {
private final int index;
- private final Integer formatColumnIndex;
private final Integer styleColumnIndex;
private final Integer formatStringColumnIndex;
@@ -75,7 +75,7 @@ public Column(int jsIndex, int index, Integer formatColumnIndex, Integer styleCo
boolean inputTableKeyColumn, boolean isSortable) {
this.jsIndex = jsIndex;
this.index = index;
- this.formatColumnIndex = formatColumnIndex;
+ assert Objects.equals(formatColumnIndex, styleColumnIndex);
this.styleColumnIndex = styleColumnIndex;
this.type = type;
this.name = name;
@@ -170,14 +170,6 @@ public void setConstituentType(final String constituentType) {
this.constituentType = constituentType;
}
- /**
- * @deprecated Prefer {@link #getFormatStringColumnIndex()}.
- */
- @Deprecated
- public Integer getFormatColumnIndex() {
- return formatColumnIndex;
- }
-
public Integer getFormatStringColumnIndex() {
return formatStringColumnIndex;
}
@@ -266,7 +258,6 @@ public CustomColumn formatDate(String expression) {
public String toString() {
return "Column{" +
"index=" + index +
- ", formatColumnIndex=" + formatColumnIndex +
", styleColumnIndex=" + styleColumnIndex +
", formatStringColumnIndex=" + formatStringColumnIndex +
", type='" + type + '\'' +
@@ -285,9 +276,6 @@ public boolean equals(Object o) {
if (index != column.index)
return false;
- if (formatColumnIndex != null ? !formatColumnIndex.equals(column.formatColumnIndex)
- : column.formatColumnIndex != null)
- return false;
if (styleColumnIndex != null ? !styleColumnIndex.equals(column.styleColumnIndex)
: column.styleColumnIndex != null)
return false;
@@ -302,7 +290,6 @@ public boolean equals(Object o) {
@Override
public int hashCode() {
int result = index;
- result = 31 * result + (formatColumnIndex != null ? formatColumnIndex.hashCode() : 0);
result = 31 * result + (styleColumnIndex != null ? styleColumnIndex.hashCode() : 0);
result = 31 * result + (formatStringColumnIndex != null ? formatStringColumnIndex.hashCode() : 0);
result = 31 * result + type.hashCode();
@@ -311,12 +298,12 @@ public int hashCode() {
}
public Column withFormatStringColumnIndex(int formatStringColumnIndex) {
- return new Column(jsIndex, index, formatColumnIndex, styleColumnIndex, type, name, isPartitionColumn,
+ return new Column(jsIndex, index, styleColumnIndex, styleColumnIndex, type, name, isPartitionColumn,
formatStringColumnIndex, description, isInputTableKeyColumn, isSortable);
}
public Column withStyleColumnIndex(int styleColumnIndex) {
- return new Column(jsIndex, index, formatColumnIndex, styleColumnIndex, type, name, isPartitionColumn,
+ return new Column(jsIndex, index, styleColumnIndex, styleColumnIndex, type, name, isPartitionColumn,
formatStringColumnIndex, description, isInputTableKeyColumn, isSortable);
}
}
diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/DateWrapper.java b/web/client-api/src/main/java/io/deephaven/web/client/api/DateWrapper.java
index 4e6580b57c0..f53d2a35928 100644
--- a/web/client-api/src/main/java/io/deephaven/web/client/api/DateWrapper.java
+++ b/web/client-api/src/main/java/io/deephaven/web/client/api/DateWrapper.java
@@ -4,6 +4,7 @@
package io.deephaven.web.client.api;
import elemental2.core.JsDate;
+import io.deephaven.util.QueryConstants;
import io.deephaven.web.client.api.i18n.JsDateTimeFormat;
import jsinterop.annotations.JsIgnore;
import jsinterop.annotations.JsType;
@@ -17,6 +18,9 @@ public DateWrapper(long valueInNanos) {
@JsIgnore
public static DateWrapper of(long dateInNanos) {
+ if (dateInNanos == QueryConstants.NULL_LONG) {
+ return null;
+ }
return new DateWrapper(dateInNanos);
}
diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/Format.java b/web/client-api/src/main/java/io/deephaven/web/client/api/Format.java
index 747a3402b39..2aea9065a9c 100644
--- a/web/client-api/src/main/java/io/deephaven/web/client/api/Format.java
+++ b/web/client-api/src/main/java/io/deephaven/web/client/api/Format.java
@@ -10,12 +10,16 @@
import jsinterop.annotations.JsNullable;
import jsinterop.annotations.JsProperty;
+import java.util.Objects;
+
/**
* This object may be pooled internally or discarded and not updated. Do not retain references to it.
*/
@TsInterface
@TsName(namespace = "dh")
-public class Format {
+public final class Format {
+ public static final Format EMPTY = new Format(0, 0, null, null);
+
private final long cellColors;
private final long rowColors;
@@ -23,8 +27,8 @@ public class Format {
private final String formatString;
public Format(long cellColors, long rowColors, String numberFormat, String formatString) {
- this.cellColors = cellColors;
- this.rowColors = rowColors;
+ this.cellColors = cellColors == Long.MIN_VALUE ? 0 : cellColors;
+ this.rowColors = rowColors == Long.MIN_VALUE ? 0 : rowColors;
this.numberFormat = numberFormat;
this.formatString = formatString;
}
@@ -103,4 +107,30 @@ public String getFormatString() {
return formatString;
}
+ @Override
+ public boolean equals(Object o) {
+ if (this == o)
+ return true;
+ if (o == null || getClass() != o.getClass())
+ return false;
+ Format format = (Format) o;
+ return cellColors == format.cellColors && rowColors == format.rowColors
+ && Objects.equals(numberFormat, format.numberFormat)
+ && Objects.equals(formatString, format.formatString);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(cellColors, rowColors, numberFormat, formatString);
+ }
+
+ @Override
+ public String toString() {
+ return "Format{" +
+ "cellColors=" + cellColors +
+ ", rowColors=" + rowColors +
+ ", numberFormat='" + numberFormat + '\'' +
+ ", formatString='" + formatString + '\'' +
+ '}';
+ }
}
diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/JsColumnStatistics.java b/web/client-api/src/main/java/io/deephaven/web/client/api/JsColumnStatistics.java
index 7b117047e80..d66034f67d4 100644
--- a/web/client-api/src/main/java/io/deephaven/web/client/api/JsColumnStatistics.java
+++ b/web/client-api/src/main/java/io/deephaven/web/client/api/JsColumnStatistics.java
@@ -8,7 +8,6 @@
import com.vertispan.tsdefs.annotations.TsName;
import elemental2.core.JsArray;
import elemental2.core.JsMap;
-import io.deephaven.web.shared.data.ColumnStatistics;
import jsinterop.annotations.JsIgnore;
import jsinterop.annotations.JsMethod;
import jsinterop.annotations.JsProperty;
@@ -18,8 +17,7 @@
import java.util.Map;
/**
- * Javascript wrapper for {@link ColumnStatistics} This class holds the results of a call to generate statistics on a
- * table column.
+ * Represents statistics for a given table column.
*/
@TsInterface
@TsName(name = "ColumnStatistics", namespace = "dh")
diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/JsPartitionedTable.java b/web/client-api/src/main/java/io/deephaven/web/client/api/JsPartitionedTable.java
index acc260335e0..aa7aca03f00 100644
--- a/web/client-api/src/main/java/io/deephaven/web/client/api/JsPartitionedTable.java
+++ b/web/client-api/src/main/java/io/deephaven/web/client/api/JsPartitionedTable.java
@@ -26,7 +26,6 @@
import io.deephaven.web.client.state.ClientTableState;
import io.deephaven.web.shared.data.RangeSet;
import jsinterop.annotations.JsIgnore;
-import jsinterop.annotations.JsMethod;
import jsinterop.annotations.JsNullable;
import jsinterop.annotations.JsProperty;
import jsinterop.annotations.JsType;
@@ -143,11 +142,10 @@ private Promise subscribeToBaseTable() {
private void handleKeys(Event update) {
// noinspection unchecked
- CustomEvent event =
- (CustomEvent) update;
+ CustomEvent event = (CustomEvent) update;
// We're only interested in added rows, send an event indicating the new keys that are available
- SubscriptionTableData.UpdateEventData eventData = event.detail;
+ SubscriptionTableData eventData = event.detail;
RangeSet added = eventData.getAdded().getRange();
added.indexIterator().forEachRemaining((long index) -> {
// extract the key to use
diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/JsRangeSet.java b/web/client-api/src/main/java/io/deephaven/web/client/api/JsRangeSet.java
index 50e09604c73..85872589483 100644
--- a/web/client-api/src/main/java/io/deephaven/web/client/api/JsRangeSet.java
+++ b/web/client-api/src/main/java/io/deephaven/web/client/api/JsRangeSet.java
@@ -24,6 +24,9 @@ public class JsRangeSet {
private final RangeSet range;
public static JsRangeSet ofRange(double first, double last) {
+ if (first > last) {
+ throw new IllegalStateException(first + " > " + last);
+ }
return new JsRangeSet(RangeSet.ofRange((long) first, (long) last));
}
diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/JsTable.java b/web/client-api/src/main/java/io/deephaven/web/client/api/JsTable.java
index 4e380631ad7..21fd3ca9eff 100644
--- a/web/client-api/src/main/java/io/deephaven/web/client/api/JsTable.java
+++ b/web/client-api/src/main/java/io/deephaven/web/client/api/JsTable.java
@@ -9,7 +9,6 @@
import com.vertispan.tsdefs.annotations.TsUnionMember;
import elemental2.core.JsArray;
import elemental2.dom.CustomEventInit;
-import elemental2.dom.DomGlobal;
import elemental2.promise.IThenable.ThenOnFulfilledCallbackFn;
import elemental2.promise.Promise;
import io.deephaven.javascript.proto.dhinternal.io.deephaven.proto.hierarchicaltable_pb.RollupRequest;
@@ -38,27 +37,22 @@
import io.deephaven.javascript.proto.dhinternal.io.deephaven.proto.table_pb.runchartdownsamplerequest.ZoomRange;
import io.deephaven.javascript.proto.dhinternal.io.deephaven.proto.ticket_pb.Ticket;
import io.deephaven.javascript.proto.dhinternal.io.deephaven.proto.ticket_pb.TypedTicket;
-import io.deephaven.web.client.api.barrage.def.ColumnDefinition;
import io.deephaven.web.client.api.barrage.def.TableAttributesDefinition;
import io.deephaven.web.client.api.barrage.stream.ResponseStreamWrapper;
import io.deephaven.web.client.api.batch.RequestBatcher;
-import io.deephaven.web.client.api.batch.TableConfig;
import io.deephaven.web.client.api.console.JsVariableType;
import io.deephaven.web.client.api.filter.FilterCondition;
-import io.deephaven.web.client.api.filter.FilterValue;
import io.deephaven.web.client.api.input.JsInputTable;
import io.deephaven.web.client.api.lifecycle.HasLifecycle;
import io.deephaven.web.client.api.state.StateCache;
+import io.deephaven.web.client.api.subscription.AbstractTableSubscription;
import io.deephaven.web.client.api.subscription.TableSubscription;
import io.deephaven.web.client.api.subscription.TableViewportSubscription;
import io.deephaven.web.client.api.subscription.ViewportData;
-import io.deephaven.web.client.api.subscription.ViewportData.MergeResults;
-import io.deephaven.web.client.api.subscription.ViewportRow;
import io.deephaven.web.client.api.tree.JsRollupConfig;
import io.deephaven.web.client.api.tree.JsTreeTable;
import io.deephaven.web.client.api.tree.JsTreeTableConfig;
import io.deephaven.web.client.api.widget.JsWidget;
-import io.deephaven.web.client.fu.JsData;
import io.deephaven.web.client.fu.JsItr;
import io.deephaven.web.client.fu.JsLog;
import io.deephaven.web.client.fu.LazyPromise;
@@ -66,14 +60,11 @@
import io.deephaven.web.client.state.ClientTableState;
import io.deephaven.web.client.state.HasTableBinding;
import io.deephaven.web.shared.data.*;
-import io.deephaven.web.shared.data.TableSnapshot.SnapshotType;
-import io.deephaven.web.shared.data.columns.ColumnData;
import io.deephaven.web.shared.fu.JsConsumer;
import io.deephaven.web.shared.fu.JsProvider;
import io.deephaven.web.shared.fu.JsRunnable;
import io.deephaven.web.shared.fu.RemoverFn;
import javaemul.internal.annotations.DoNotAutobox;
-import jsinterop.annotations.JsIgnore;
import jsinterop.annotations.JsMethod;
import jsinterop.annotations.JsNullable;
import jsinterop.annotations.JsOptional;
@@ -88,7 +79,6 @@
import java.util.*;
import java.util.stream.Stream;
-import static io.deephaven.web.client.api.subscription.ViewportData.NO_ROW_FORMAT_COLUMN;
import static io.deephaven.web.client.fu.LazyPromise.logError;
/**
@@ -155,15 +145,11 @@ public class JsTable extends HasLifecycle implements HasTableBinding, JoinableTa
// change in some table data
INTERNAL_EVENT_SIZELISTENER = "sizelistener-internal";
- // Amount of debounce to use when eating snapshot events.
- public static final int DEBOUNCE_TIME = 20;
public static final int MAX_BATCH_TIME = 600_000;
private final WorkerConnection workerConnection;
- private Map subscriptions = new HashMap<>();
- @Deprecated // TODO refactor this inside of the viewportSubscription type
- private ViewportData currentViewportData;
+ private final Map subscriptions = new HashMap<>();
private ClientTableState lastVisibleState;
@@ -181,7 +167,6 @@ public class JsTable extends HasLifecycle implements HasTableBinding, JoinableTa
private final int subscriptionId;
private static int nextSubscriptionId;
- private TableSubscription nonViewportSub;
/**
* Creates a new Table directly from an existing ClientTableState. The CTS manages all fetch operations, so this is
@@ -312,7 +297,9 @@ public boolean isAlive() {
@Override
public ClientTableState state() {
- assert currentState != null : "Table already closed, cannot be used again";
+ if (currentState == null) {
+ throw new IllegalStateException("Table already closed, cannot be used again");
+ }
return currentState;
}
@@ -467,6 +454,7 @@ public double getSize() {
if (isUncoalesced()) {
return JsTable.SIZE_UNCOALESCED;
}
+ // Only return the size from ETUM if we have no other choice
return size;
}
@@ -486,10 +474,9 @@ public String getDescription() {
*/
@JsProperty
public double getTotalSize() {
- TableViewportSubscription subscription = subscriptions.get(getHandle());
- if (subscription != null && subscription.getStatus() == TableViewportSubscription.Status.ACTIVE) {
- // only ask the viewport for the size if it is alive and ticking
- return subscription.totalSize();
+ if (state().getFilters().isEmpty()) {
+ // If there are no filters, use the subscription size (if any)
+ return getSize();
}
return getHeadState().getSize();
}
@@ -683,14 +670,14 @@ public JsArray getCustomColumns() {
* Overload for Java (since JS just omits the optional params)
*/
public TableViewportSubscription setViewport(double firstRow, double lastRow) {
- return setViewport(firstRow, lastRow, null, null);
+ return setViewport(firstRow, lastRow, null, null, null);
}
/**
* Overload for Java (since JS just omits the optional param)
*/
public TableViewportSubscription setViewport(double firstRow, double lastRow, JsArray columns) {
- return setViewport(firstRow, lastRow, columns, null);
+ return setViewport(firstRow, lastRow, columns, null, null);
}
/**
@@ -709,13 +696,14 @@ public TableViewportSubscription setViewport(double firstRow, double lastRow, Js
@JsMethod
public TableViewportSubscription setViewport(double firstRow, double lastRow,
@JsOptional @JsNullable JsArray columns,
- @JsOptional @JsNullable Double updateIntervalMs) {
- Column[] columnsCopy = columns != null ? Js.uncheckedCast(columns.slice()) : null;
+ @JsOptional @JsNullable Double updateIntervalMs,
+ @JsOptional @JsNullable Boolean isReverseViewport) {
+ Column[] columnsCopy = columns != null ? Js.uncheckedCast(columns.slice()) : state().getColumns();
ClientTableState currentState = state();
TableViewportSubscription activeSubscription = subscriptions.get(getHandle());
if (activeSubscription != null && activeSubscription.getStatus() != TableViewportSubscription.Status.DONE) {
// hasn't finished, lets reuse it
- activeSubscription.setInternalViewport(firstRow, lastRow, columnsCopy, updateIntervalMs);
+ activeSubscription.setInternalViewport(firstRow, lastRow, columnsCopy, updateIntervalMs, isReverseViewport);
return activeSubscription;
} else {
// In the past, we left the old sub going until the new one was ready, then started the new one. But now,
@@ -727,35 +715,23 @@ public TableViewportSubscription setViewport(double firstRow, double lastRow,
// rewrap current state in a new one, when ready the viewport will be applied
TableViewportSubscription replacement =
- new TableViewportSubscription(firstRow, lastRow, columnsCopy, updateIntervalMs, this);
+ TableViewportSubscription.make(firstRow, lastRow, columnsCopy, updateIntervalMs, this);
subscriptions.put(currentState.getHandle(), replacement);
return replacement;
}
}
- public void setInternalViewport(double firstRow, double lastRow, Column[] columns) {
- if (firstRow > lastRow) {
- throw new IllegalArgumentException(firstRow + " > " + lastRow);
- }
- if (firstRow < 0) {
- throw new IllegalArgumentException(firstRow + " < " + 0);
- }
- currentViewportData = null;
- // we must wait for the latest stack entry that can add columns (so we get an appropriate BitSet)
- state().setDesiredViewport(this, (long) firstRow, (long) lastRow, columns);
- }
-
/**
* Gets the currently visible viewport. If the current set of operations has not yet resulted in data, it will not
* resolve until that data is ready. If this table is closed before the promise resolves, it will be rejected - to
* separate the lifespan of this promise from the table itself, call
* {@link TableViewportSubscription#getViewportData()} on the result from {@link #setViewport(double, double)}.
- *
+ *
* @return Promise of {@link TableData}
*/
@JsMethod
- public Promise getViewportData() {
+ public Promise getViewportData() {
TableViewportSubscription subscription = subscriptions.get(getHandle());
if (subscription == null) {
return Promise.reject("No viewport currently set");
@@ -763,20 +739,6 @@ public Promise getViewportData() {
return subscription.getInternalViewportData();
}
- public Promise getInternalViewportData() {
- final LazyPromise promise = new LazyPromise<>();
- final ClientTableState active = state();
- active.onRunning(state -> {
- if (currentViewportData == null) {
- // no viewport data received yet; let's set up a one-shot UPDATED event listener
- addEventListenerOneShot(EVENT_UPDATED, ignored -> promise.succeed(currentViewportData));
- } else {
- promise.succeed(currentViewportData);
- }
- }, promise::fail, () -> promise.fail("Table closed before viewport data was read"));
- return promise.asPromise(MAX_BATCH_TIME);
- }
-
/**
* Overload for java (since js just omits the optional var)
*/
@@ -798,20 +760,9 @@ public TableSubscription subscribe(JsArray columns) {
*/
@JsMethod
public TableSubscription subscribe(JsArray columns, @JsOptional Double updateIntervalMs) {
- assert nonViewportSub == null : "Can't directly subscribe to the 'private' table instance";
- // make a new table with a pUT call, listen to the subscription there
return new TableSubscription(columns, this, updateIntervalMs);
}
- public void internalSubscribe(JsArray columns, TableSubscription sub) {
- if (columns == null) {
- columns = getColumns();
- }
- this.nonViewportSub = sub;
-
- state().subscribe(this, Js.uncheckedCast(columns));
- }
-
/**
* a new table containing the distinct tuples of values from the given columns that are present in the original
* table. This table can be manipulated as any other table. Sorting is often desired as the default sort is the
@@ -1512,7 +1463,6 @@ public void revive(ClientTableState state) {
unsuppressEvents();
LazyPromise.runLater(() -> {
fireEvent(EVENT_RECONNECT);
- getBinding().maybeReviveSubscription();
});
}
}
@@ -1550,205 +1500,6 @@ public Promise downsample(LongWrapper[] zoomRange, int pixelCount, Stri
.then(state -> Promise.resolve(new JsTable(workerConnection, state)));
}
- private final class Debounce {
- private final ClientTableState state;
- private final TableTicket handle;
- private final SnapshotType type;
- private final RangeSet includedRows;
- private final BitSet columns;
- private final Object[] dataColumns;
- private final double timestamp;
- private final long maxRows;
-
- public Debounce(
- TableTicket table,
- SnapshotType snapshotType,
- RangeSet includedRows,
- BitSet columns,
- Object[] dataColumns,
- long maxRows) {
- this.handle = table;
- this.type = snapshotType;
- this.includedRows = includedRows;
- this.columns = columns;
- this.dataColumns = dataColumns;
- this.state = currentState;
- this.maxRows = maxRows;
- timestamp = System.currentTimeMillis();
- }
-
- public boolean isEqual(Debounce o) {
- if (type == o.type) {
- // this is intentionally weird. We only want to debounce when one instance is column snapshot and the
- // other is row snapshot,
- // so we consider two events of the same type to be incompatible with debouncing.
- return false;
- }
- if (handle != o.handle) {
- assert !handle.equals(o.handle);
- return false;
- }
- if (state != o.state) {
- assert state.getHandle() != o.state.getHandle();
- return false;
- }
- if (!includedRows.equals(o.includedRows)) {
- return false;
- }
- if (!columns.equals(o.columns)) {
- return false;
- }
- if (maxRows != o.maxRows) {
- return false;
- }
- assert Arrays.deepEquals(dataColumns, o.dataColumns) : "Debounce is broken, remove it.";
- return true;
- }
- }
-
- private Debounce debounce;
-
- private void handleSnapshot(TableTicket table, SnapshotType snapshotType, RangeSet includedRows,
- Object[] dataColumns, BitSet columns, long maxRows) {
- assert table.equals(state().getHandle()) : "Table received incorrect snapshot";
- // if the type is initial_snapshot, we've already recorded the size, so only watch for the other two updates.
- // note that this will sometimes result in multiple updates on startup, so we do this ugly debounce-dance.
- // When IDS-2113 is fixed, we can likely remove this code.
- JsLog.debug("Received snapshot for ", table, snapshotType, includedRows, dataColumns, columns);
- Debounce operation = new Debounce(table, snapshotType, includedRows, columns, dataColumns, maxRows);
- if (debounce == null) {
- debounce = operation;
- DomGlobal.setTimeout(ignored -> processSnapshot(), DEBOUNCE_TIME);
- } else if (debounce.isEqual(operation)) {
- // If we think the problem is fixed, we can put `assert false` here for a while before deleting Debounce
- // class
- JsLog.debug("Eating duplicated operation", debounce, operation);
- } else {
- processSnapshot();
- debounce = operation;
- DomGlobal.setTimeout(ignored -> processSnapshot(), DEBOUNCE_TIME);
- }
- }
-
- public void handleSnapshot(TableTicket handle, TableSnapshot snapshot) {
- if (!handle.equals(state().getHandle())) {
- return;
- }
- Viewport viewport = getBinding().getSubscription();
- if (viewport == null || viewport.getRows() == null || viewport.getRows().size() == 0) {
- // check out if we have a non-viewport sub attached
- if (nonViewportSub != null) {
- nonViewportSub.handleSnapshot(snapshot);
- }
- return;
- }
-
- RangeSet viewportRows = viewport.getRows();
- JsLog.debug("handleSnapshot on " + viewportRows, handle, snapshot, viewport);
-
- RangeSet includedRows = snapshot.getIncludedRows();
- ColumnData[] dataColumns = snapshot.getDataColumns();
- JsArray[] remappedData = new JsArray[dataColumns.length];
- // remap dataColumns to the expected range for that table's viewport
- long lastRow = -1;
- for (int col = viewport.getColumns().nextSetBit(0); col >= 0; col = viewport.getColumns().nextSetBit(col + 1)) {
- ColumnData dataColumn = dataColumns[col];
- if (dataColumn == null) {
- // skip this, at least one column requested by that table isn't present, waiting on a later update
- // TODO when IDS-2138 is fixed stop throwing this data away
- return;
- }
- Object columnData = dataColumn.getData();
-
- final ColumnDefinition def = state().getTableDef().getColumns()[col];
- remappedData[col] = JsData.newArray(def.getType());
-
- PrimitiveIterator.OfLong viewportIterator = viewportRows.indexIterator();
- PrimitiveIterator.OfLong includedRowsIterator = includedRows.indexIterator();
- int dataIndex = 0;
- while (viewportIterator.hasNext()) {
- long viewportIndex = viewportIterator.nextLong();
- if (viewportIndex >= snapshot.getTableSize()) {
- // reached or passed the end of the table, we'll still make a snapshot
- break;
- }
- if (!includedRowsIterator.hasNext()) {
- // we've reached the end, the viewport apparently goes past the end of what the server sent,
- // so there is another snapshot on its way
- // TODO when IDS-2138 is fixed stop throwing this data away
- return;
- }
-
- long possibleMatch = includedRowsIterator.nextLong();
- while (includedRowsIterator.hasNext() && possibleMatch < viewportIndex) {
- dataIndex++;// skip, still seeking to the next item
-
- possibleMatch = includedRowsIterator.nextLong();
- }
- if (!includedRowsIterator.hasNext() && possibleMatch < viewportIndex) {
- // we didn't find any items which match, just give up
- return;
- }
-
- if (possibleMatch > viewportIndex) {
- // if we hit a gap (more data coming, doesn't match viewport), skip the
- // rest of this table entirely, a later update will get us caught up
- return;
- }
- Object data = Js.>uncheckedCast(columnData).getAt(dataIndex);
- remappedData[col].push(data);
- dataIndex++;// increment for the next row
-
- // Track how many rows were actually present, allowing the snapshot to stop before the viewport's end
- lastRow = Math.max(lastRow, possibleMatch);
- }
- }
-
- // TODO correct this - assumes max one range per table viewport, and nothing skipped
- RangeSet actualViewport =
- lastRow == -1 ? RangeSet.empty() : RangeSet.ofRange(viewportRows.indexIterator().nextLong(), lastRow);
-
- handleSnapshot(handle, snapshot.getSnapshotType(), actualViewport, remappedData, viewport.getColumns(),
- viewportRows.size());
- }
-
- @JsIgnore
- public void processSnapshot() {
- try {
- if (debounce == null) {
- JsLog.debug("Skipping snapshot b/c debounce is null");
- return;
- }
- if (debounce.state != currentState) {
- JsLog.debug("Skipping snapshot because state has changed ", debounce.state, " != ", currentState);
- return;
- }
- if (isClosed()) {
- JsLog.debug("Skipping snapshot because table is closed", this);
- return;
- }
- JsArray viewportColumns =
- getColumns().filter((item, index) -> debounce.columns.get(item.getIndex()));
- ViewportData data = new ViewportData(debounce.includedRows, debounce.dataColumns, viewportColumns,
- currentState.getRowFormatColumn() == null ? NO_ROW_FORMAT_COLUMN
- : currentState.getRowFormatColumn().getIndex(),
- debounce.maxRows);
- this.currentViewportData = data;
- CustomEventInit updatedEvent = CustomEventInit.create();
- updatedEvent.setDetail(data);
- fireEvent(EVENT_UPDATED, updatedEvent);
-
- // also fire rowadded events - TODO also fire some kind of remove event for now-missing rows?
- for (int i = 0; i < data.getRows().length; i++) {
- CustomEventInit addedEvent = CustomEventInit.create();
- addedEvent.setDetail(wrap(data.getRows().getAt(i), i));
- fireEvent(EVENT_ROWADDED, addedEvent);
- }
- } finally {
- debounce = null;
- }
- }
-
/**
* True if this table has been closed.
*
@@ -1788,59 +1539,6 @@ public String getPluginName() {
return lastVisibleState().getTableDef().getAttributes().getPluginName();
}
- // Factored out so that we always apply the same format
- private Object wrap(ViewportRow at, int index) {
- return JsPropertyMap.of("row", at, "index", (double) index);
- }
-
- public void handleDelta(ClientTableState current, DeltaUpdates updates) {
- current.onRunning(s -> {
- if (current != state()) {
- return;
- }
- if (nonViewportSub != null) {
- nonViewportSub.handleDelta(updates);
- return;
- }
- final ViewportData vpd = currentViewportData;
- if (vpd == null) {
- // if the current viewport data is null, we're waiting on an initial snapshot to arrive for a different
- // part of the viewport
- JsLog.debug("Received delta while waiting for reinitialization");
- return;
- }
- MergeResults mergeResults = vpd.merge(updates);
- if (mergeResults.added.size() == 0 && mergeResults.modified.size() == 0
- && mergeResults.removed.size() == 0) {
- return;
- }
- CustomEventInit event = CustomEventInit.create();
- event.setDetail(vpd);
- // user might call setViewport, and wind up nulling our currentViewportData
- fireEvent(EVENT_UPDATED, event);
-
- // fire rowadded/rowupdated/rowremoved
- // TODO when we keep more rows loaded than the user is aware of, check if a given row is actually in the
- // viewport
- // here
- for (Integer index : mergeResults.added) {
- CustomEventInit addedEvent = CustomEventInit.create();
- addedEvent.setDetail(wrap(vpd.getRows().getAt(index), index));
- fireEvent(EVENT_ROWADDED, addedEvent);
- }
- for (Integer index : mergeResults.modified) {
- CustomEventInit addedEvent = CustomEventInit.create();
- addedEvent.setDetail(wrap(vpd.getRows().getAt(index), index));
- fireEvent(EVENT_ROWUPDATED, addedEvent);
- }
- for (Integer index : mergeResults.removed) {
- CustomEventInit addedEvent = CustomEventInit.create();
- addedEvent.setDetail(wrap(vpd.getRows().getAt(index), index));
- fireEvent(EVENT_ROWREMOVED, addedEvent);
- }
- }, JsRunnable.doNothing());
- }
-
@Override
public TableTicket getHandle() {
return state().getHandle();
@@ -1875,62 +1573,6 @@ public WorkerConnection getConnection() {
return workerConnection;
}
- public void refreshViewport(ClientTableState state, Viewport vp) {
- assert state() == state : "Called refreshViewport with wrong state (" + state + " instead of " + state() + ")";
- assert state.getResolution() == ClientTableState.ResolutionState.RUNNING
- : "Do not call refreshViewport for a state that is not running! (" + state + ")";
-
- currentViewportData = null; // ignore any deltas for past viewports
- workerConnection.scheduleCheck(state);
- // now that we've made sure the server knows, if we already know that the viewport is beyond what exists, we
- // can go ahead and fire an update event. We're in the onResolved call, so we know the handle has resolved
- // and if size is not -1, then we've already at least gotten the initial snapshot (otherwise, that snapshot
- // will be here soon, and will fire its own event)
- if (state.getSize() != ClientTableState.SIZE_UNINITIALIZED && state.getSize() <= vp.getRows().getFirstRow()) {
- JsLog.debug("Preparing to send a 'fake' update event since " + state.getSize() + "<="
- + vp.getRows().getFirstRow(), state);
- LazyPromise.runLater(() -> {
- if (state != state()) {
- return;
- }
-
- // get the column expected to be in the snapshot
- JsArray columns = Js.uncheckedCast(getBinding().getColumns());
- Column[] allColumns = state.getColumns();
- if (columns == null) {
- columns = Js.uncheckedCast(allColumns);
- }
- // build an array of empty column data for this snapshot
- Object[] dataColumns = new Object[allColumns.length];
-
- for (int i = 0; i < columns.length; i++) {
- Column c = columns.getAt(i);
- dataColumns[c.getIndex()] = JsData.newArray(c.getType());
- if (c.getFormatStringColumnIndex() != null) {
- dataColumns[c.getFormatStringColumnIndex()] = JsData.newArray("java.lang.String");
- }
- if (c.getStyleColumnIndex() != null) {
- dataColumns[c.getStyleColumnIndex()] = JsData.newArray("long");
- }
- }
- if (currentState.getRowFormatColumn() != null) {
- dataColumns[currentState.getRowFormatColumn().getIndex()] = JsData.newArray("long");
- }
-
- ViewportData data = new ViewportData(RangeSet.empty(), dataColumns, columns,
- currentState.getRowFormatColumn() == null ? NO_ROW_FORMAT_COLUMN
- : currentState.getRowFormatColumn().getIndex(),
- 0);
- this.currentViewportData = data;
- CustomEventInit updatedEvent = CustomEventInit.create();
- updatedEvent.setDetail(data);
- JsLog.debug("Sending 'fake' update event since " + state.getSize() + "<=" + vp.getRows().getFirstRow(),
- vp, state);
- fireEvent(EVENT_UPDATED, updatedEvent);
- });
- }
- }
-
public boolean isActive(ClientTableState state) {
return currentState == state;
}
@@ -2110,7 +1752,10 @@ public int getSubscriptionId() {
@Override
public void maybeReviveSubscription() {
- getBinding().maybeReviveSubscription();
+ TableViewportSubscription viewportSubscription = subscriptions.get(getHandle());
+ if (viewportSubscription != null) {
+ viewportSubscription.revive();
+ }
}
}
diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/JsTotalsTable.java b/web/client-api/src/main/java/io/deephaven/web/client/api/JsTotalsTable.java
index 652bfd4f55f..0bab10ea1d5 100644
--- a/web/client-api/src/main/java/io/deephaven/web/client/api/JsTotalsTable.java
+++ b/web/client-api/src/main/java/io/deephaven/web/client/api/JsTotalsTable.java
@@ -5,14 +5,16 @@
import com.vertispan.tsdefs.annotations.TsInterface;
import com.vertispan.tsdefs.annotations.TsName;
+import com.vertispan.tsdefs.annotations.TsTypeRef;
import elemental2.core.JsArray;
import elemental2.core.JsString;
import elemental2.dom.CustomEvent;
-import elemental2.dom.Event;
import elemental2.promise.Promise;
import io.deephaven.javascript.proto.dhinternal.io.deephaven.proto.ticket_pb.TypedTicket;
import io.deephaven.web.client.api.console.JsVariableType;
import io.deephaven.web.client.api.filter.FilterCondition;
+import io.deephaven.web.client.api.subscription.AbstractTableSubscription;
+import io.deephaven.web.client.api.subscription.ViewportData;
import io.deephaven.web.client.state.ClientTableState;
import io.deephaven.web.shared.fu.RemoverFn;
import jsinterop.annotations.JsIgnore;
@@ -66,7 +68,7 @@ public JsTotalsTable(JsTable wrappedTable, String directive, JsArray gro
public void refreshViewport() {
if (firstRow != null && lastRow != null) {
- setViewport(firstRow, lastRow, Js.uncheckedCast(columns), updateIntervalMs);
+ setViewport(firstRow, lastRow, Js.uncheckedCast(columns), updateIntervalMs, null);
}
}
@@ -109,12 +111,12 @@ public JsTotalsTableConfig getTotalsTableConfig() {
*/
@JsMethod
public void setViewport(double firstRow, double lastRow, @JsOptional JsArray columns,
- @JsOptional Double updateIntervalMs) {
+ @JsOptional Double updateIntervalMs, @JsOptional @JsNullable Boolean isReverseViewport) {
this.firstRow = firstRow;
this.lastRow = lastRow;
this.columns = columns != null ? Js.uncheckedCast(columns.slice()) : null;
this.updateIntervalMs = updateIntervalMs;
- wrappedTable.setViewport(firstRow, lastRow, columns, updateIntervalMs);
+ wrappedTable.setViewport(firstRow, lastRow, columns, updateIntervalMs, isReverseViewport);
}
/**
@@ -124,7 +126,7 @@ public void setViewport(double firstRow, double lastRow, @JsOptional JsArray
getViewportData() {
+ public Promise getViewportData() {
return wrappedTable.getViewportData();
}
diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/LongWrapper.java b/web/client-api/src/main/java/io/deephaven/web/client/api/LongWrapper.java
index ec679c0442a..ea2647184b0 100644
--- a/web/client-api/src/main/java/io/deephaven/web/client/api/LongWrapper.java
+++ b/web/client-api/src/main/java/io/deephaven/web/client/api/LongWrapper.java
@@ -3,6 +3,7 @@
//
package io.deephaven.web.client.api;
+import io.deephaven.util.QueryConstants;
import jsinterop.annotations.JsIgnore;
import jsinterop.annotations.JsType;
@@ -12,6 +13,9 @@ public class LongWrapper {
@JsIgnore
public static LongWrapper of(long value) {
+ if (value == QueryConstants.NULL_LONG) {
+ return null;
+ }
return new LongWrapper(value);
}
diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/TableData.java b/web/client-api/src/main/java/io/deephaven/web/client/api/TableData.java
index 5018db59ab7..19df98c4221 100644
--- a/web/client-api/src/main/java/io/deephaven/web/client/api/TableData.java
+++ b/web/client-api/src/main/java/io/deephaven/web/client/api/TableData.java
@@ -3,11 +3,11 @@
//
package io.deephaven.web.client.api;
-import com.vertispan.tsdefs.annotations.TsName;
import com.vertispan.tsdefs.annotations.TsTypeRef;
import com.vertispan.tsdefs.annotations.TsUnion;
import com.vertispan.tsdefs.annotations.TsUnionMember;
import elemental2.core.JsArray;
+import jsinterop.annotations.JsIgnore;
import jsinterop.annotations.JsMethod;
import jsinterop.annotations.JsOverlay;
import jsinterop.annotations.JsPackage;
@@ -17,13 +17,25 @@
import jsinterop.base.Js;
/**
- * Common interface for various ways of accessing table data and formatting.
- *
+ * Common interface for various ways of accessing table data and formatting for viewport or non-viewport subscriptions
+ * on tables, data in trees, and snapshots.
+ *
+ * Generally speaking, it is more efficient to access data in column-major order, rather than iterating through each Row
+ * and accessing all columns that it holds. The {@link #getRows()} accessor can be useful to read row data, but may
+ * incur other costs - it is likely faster to access data by columns using {@link #getData(RowPositionUnion, Column)}.
+ */
+/*
* Java note: this interface contains some extra overloads that aren't available in JS. Implementations are expected to
* implement only abstract methods, and default methods present in this interface will dispatch accordingly.
*/
-@TsName(namespace = "dh")
+@JsType(namespace = "dh")
public interface TableData {
+ @JsIgnore
+ int NO_ROW_FORMAT_COLUMN = -1;
+
+ /**
+ * TS type union to allow either "int" or "LongWrapper" to be passed as an argument for various methods.
+ */
@TsUnion
@JsType(name = "?", namespace = JsPackage.GLOBAL, isNative = true)
interface RowPositionUnion {
@@ -53,9 +65,18 @@ default int asInt() {
@JsProperty
JsArray getColumns();
+ /**
+ * A lazily computed array of all rows available on the client.
+ */
@JsProperty
JsArray<@TsTypeRef(Row.class) ? extends Row> getRows();
+ /**
+ * Reads a row object from the table, from which any subscribed column can be read.
+ *
+ * @param index the position or key to access
+ * @return the row at the given location
+ */
@JsMethod
default Row get(RowPositionUnion index) {
if (index.isLongWrapper()) {
@@ -64,10 +85,20 @@ default Row get(RowPositionUnion index) {
return get(Js.coerceToInt(index));
}
+ @JsIgnore
Row get(long index);
+ @JsIgnore
Row get(int index);
+ /**
+ * Reads a specific cell from the table, by row key and column.
+ *
+ * @param index the row in the table to get data from
+ * @param column the column to read
+ * @return the value in the table
+ */
+ // TODO (deephaven-core#5927) Consider a get/fillChunk API as an efficient alternative
@JsMethod
default Any getData(RowPositionUnion index, Column column) {
if (index.isLongWrapper()) {
@@ -76,10 +107,19 @@ default Any getData(RowPositionUnion index, Column column) {
return getData(index.asInt(), column);
}
+ @JsIgnore
Any getData(int index, Column column);
+ @JsIgnore
Any getData(long index, Column column);
+ /**
+ * The server-specified Format to use for the cell at the given position.
+ *
+ * @param index the row to read
+ * @param column the column to read
+ * @return a Format instance with any server-specified details
+ */
@JsMethod
default Format getFormat(RowPositionUnion index, Column column) {
if (index.isLongWrapper()) {
@@ -88,12 +128,19 @@ default Format getFormat(RowPositionUnion index, Column column) {
return getFormat(index.asInt(), column);
}
+ @JsIgnore
Format getFormat(int index, Column column);
+ @JsIgnore
Format getFormat(long index, Column column);
- @TsName(namespace = "dh")
- public interface Row {
+ /**
+ * Represents a row available in a subscription/snapshot on the client. Do not retain references to rows - they will
+ * not function properly when the event isn't actively going off (or promise resolving). Instead, wait for the next
+ * event, or re-request the viewport data.
+ */
+ @JsType(namespace = "dh")
+ interface Row {
@JsProperty
LongWrapper getIndex();
diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/WorkerConnection.java b/web/client-api/src/main/java/io/deephaven/web/client/api/WorkerConnection.java
index ab88008a3de..239dd2efbd0 100644
--- a/web/client-api/src/main/java/io/deephaven/web/client/api/WorkerConnection.java
+++ b/web/client-api/src/main/java/io/deephaven/web/client/api/WorkerConnection.java
@@ -3,6 +3,7 @@
//
package io.deephaven.web.client.api;
+import com.google.flatbuffers.FlatBufferBuilder;
import com.vertispan.tsdefs.annotations.TsIgnore;
import elemental2.core.JsArray;
import elemental2.core.JsObject;
@@ -12,33 +13,17 @@
import elemental2.dom.CustomEventInit;
import elemental2.dom.DomGlobal;
import elemental2.promise.Promise;
-import io.deephaven.javascript.proto.dhinternal.arrow.flight.flatbuf.message_generated.org.apache.arrow.flatbuf.FieldNode;
-import io.deephaven.javascript.proto.dhinternal.arrow.flight.flatbuf.message_generated.org.apache.arrow.flatbuf.Message;
-import io.deephaven.javascript.proto.dhinternal.arrow.flight.flatbuf.message_generated.org.apache.arrow.flatbuf.MessageHeader;
-import io.deephaven.javascript.proto.dhinternal.arrow.flight.flatbuf.message_generated.org.apache.arrow.flatbuf.RecordBatch;
-import io.deephaven.javascript.proto.dhinternal.arrow.flight.flatbuf.schema_generated.org.apache.arrow.flatbuf.Buffer;
-import io.deephaven.javascript.proto.dhinternal.arrow.flight.flatbuf.schema_generated.org.apache.arrow.flatbuf.Field;
-import io.deephaven.javascript.proto.dhinternal.arrow.flight.flatbuf.schema_generated.org.apache.arrow.flatbuf.KeyValue;
-import io.deephaven.javascript.proto.dhinternal.arrow.flight.flatbuf.schema_generated.org.apache.arrow.flatbuf.MetadataVersion;
-import io.deephaven.javascript.proto.dhinternal.arrow.flight.flatbuf.schema_generated.org.apache.arrow.flatbuf.Schema;
import io.deephaven.javascript.proto.dhinternal.arrow.flight.protocol.browserflight_pb_service.BrowserFlightServiceClient;
import io.deephaven.javascript.proto.dhinternal.arrow.flight.protocol.flight_pb.FlightData;
import io.deephaven.javascript.proto.dhinternal.arrow.flight.protocol.flight_pb_service.FlightServiceClient;
import io.deephaven.javascript.proto.dhinternal.browserheaders.BrowserHeaders;
-import io.deephaven.javascript.proto.dhinternal.flatbuffers.Builder;
-import io.deephaven.javascript.proto.dhinternal.flatbuffers.Long;
import io.deephaven.javascript.proto.dhinternal.grpcweb.grpc.Code;
import io.deephaven.javascript.proto.dhinternal.grpcweb.grpc.UnaryOutput;
-import io.deephaven.javascript.proto.dhinternal.io.deephaven.barrage.flatbuf.barrage_generated.io.deephaven.barrage.flatbuf.BarrageMessageType;
-import io.deephaven.javascript.proto.dhinternal.io.deephaven.barrage.flatbuf.barrage_generated.io.deephaven.barrage.flatbuf.BarrageMessageWrapper;
-import io.deephaven.javascript.proto.dhinternal.io.deephaven.barrage.flatbuf.barrage_generated.io.deephaven.barrage.flatbuf.BarrageSubscriptionOptions;
-import io.deephaven.javascript.proto.dhinternal.io.deephaven.barrage.flatbuf.barrage_generated.io.deephaven.barrage.flatbuf.BarrageSubscriptionRequest;
-import io.deephaven.javascript.proto.dhinternal.io.deephaven.barrage.flatbuf.barrage_generated.io.deephaven.barrage.flatbuf.BarrageUpdateMetadata;
-import io.deephaven.javascript.proto.dhinternal.io.deephaven.barrage.flatbuf.barrage_generated.io.deephaven.barrage.flatbuf.ColumnConversionMode;
import io.deephaven.javascript.proto.dhinternal.io.deephaven.proto.application_pb.FieldInfo;
import io.deephaven.javascript.proto.dhinternal.io.deephaven.proto.application_pb.FieldsChangeUpdate;
import io.deephaven.javascript.proto.dhinternal.io.deephaven.proto.application_pb.ListFieldsRequest;
import io.deephaven.javascript.proto.dhinternal.io.deephaven.proto.application_pb_service.ApplicationServiceClient;
+import io.deephaven.javascript.proto.dhinternal.io.deephaven.proto.config_pb.ConfigValue;
import io.deephaven.javascript.proto.dhinternal.io.deephaven.proto.config_pb.ConfigurationConstantsRequest;
import io.deephaven.javascript.proto.dhinternal.io.deephaven.proto.config_pb.ConfigurationConstantsResponse;
import io.deephaven.javascript.proto.dhinternal.io.deephaven.proto.config_pb_service.ConfigService;
@@ -71,7 +56,6 @@
import io.deephaven.javascript.proto.dhinternal.io.deephaven.proto.ticket_pb.Ticket;
import io.deephaven.javascript.proto.dhinternal.io.deephaven.proto.ticket_pb.TypedTicket;
import io.deephaven.web.client.api.barrage.WebBarrageUtils;
-import io.deephaven.web.client.api.barrage.def.ColumnDefinition;
import io.deephaven.web.client.api.barrage.def.InitialTableDefinition;
import io.deephaven.web.client.api.barrage.stream.BiDiStream;
import io.deephaven.web.client.api.barrage.stream.ResponseStreamWrapper;
@@ -96,22 +80,25 @@
import io.deephaven.web.client.state.ClientTableState;
import io.deephaven.web.client.state.HasTableBinding;
import io.deephaven.web.client.state.TableReviver;
-import io.deephaven.web.shared.data.DeltaUpdates;
-import io.deephaven.web.shared.data.RangeSet;
-import io.deephaven.web.shared.data.TableSnapshot;
-import io.deephaven.web.shared.data.TableSubscriptionRequest;
import io.deephaven.web.shared.fu.JsConsumer;
import io.deephaven.web.shared.fu.JsRunnable;
import jsinterop.annotations.JsMethod;
import jsinterop.annotations.JsOptional;
import jsinterop.base.Js;
import jsinterop.base.JsPropertyMap;
+import org.apache.arrow.flatbuf.Buffer;
+import org.apache.arrow.flatbuf.Field;
+import org.apache.arrow.flatbuf.FieldNode;
+import org.apache.arrow.flatbuf.KeyValue;
+import org.apache.arrow.flatbuf.Message;
+import org.apache.arrow.flatbuf.MessageHeader;
+import org.apache.arrow.flatbuf.MetadataVersion;
+import org.apache.arrow.flatbuf.RecordBatch;
+import org.apache.arrow.flatbuf.Schema;
import javax.annotation.Nullable;
-import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
-import java.util.BitSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
@@ -123,12 +110,6 @@
import java.util.stream.Collectors;
import static io.deephaven.web.client.api.CoreClient.EVENT_REFRESH_TOKEN_UPDATED;
-import static io.deephaven.web.client.api.barrage.WebBarrageUtils.DeltaUpdatesBuilder;
-import static io.deephaven.web.client.api.barrage.WebBarrageUtils.createSnapshot;
-import static io.deephaven.web.client.api.barrage.WebBarrageUtils.deltaUpdates;
-import static io.deephaven.web.client.api.barrage.WebBarrageUtils.makeUint8ArrayFromBitset;
-import static io.deephaven.web.client.api.barrage.WebBarrageUtils.serializeRanges;
-import static io.deephaven.web.client.api.barrage.WebBarrageUtils.typedArrayToLittleEndianByteBuffer;
import static io.deephaven.web.client.api.barrage.WebGrpcUtils.CLIENT_OPTIONS;
/**
@@ -205,7 +186,6 @@ private enum State {
private final Set flushable = new HashSet<>();
private final JsSet> logCallbacks = new JsSet<>();
- private final Map> subscriptionStreams = new HashMap<>();
private ResponseStreamWrapper exportNotifications;
private JsSet simpleReconnectableInstances = new JsSet<>();
@@ -220,6 +200,8 @@ private enum State {
private Map knownFields = new HashMap<>();
private ResponseStreamWrapper fieldsChangeUpdateStream;
+ private ConfigurationConstantsResponse constants;
+
public WorkerConnection(QueryConnectable> info) {
this.info = info;
this.config = new ClientConfiguration();
@@ -306,7 +288,6 @@ private void connectToWorker() {
ClientTableState[] hasActiveSubs = cache.getAllStates().stream()
.peek(cts -> {
cts.getHandle().setConnected(false);
- cts.setSubscribed(false);
cts.forActiveLifecycles(item -> {
assert !(item instanceof JsTable) ||
((JsTable) item).state() == cts
@@ -501,11 +482,11 @@ this, ConfigService.GetConfigurationConstants, new ConfigurationConstantsRequest
}
// Read the timeout from the server, we'll refresh at less than that
- result.getMessage().getConfigValuesMap().forEach((item, key) -> {
- if (key.equals("http.session.durationMs")) {
- sessionTimeoutMs = Double.parseDouble(item.getStringValue());
- }
- });
+ constants = result.getMessage();
+ ConfigValue sessionDuration = constants.getConfigValuesMap().get("http.session.durationMs");
+ if (sessionDuration != null && sessionDuration.hasStringValue()) {
+ sessionTimeoutMs = Double.parseDouble(sessionDuration.getStringValue());
+ }
// schedule an update based on our currently configured delay
scheduledAuthUpdate = DomGlobal.setTimeout(ignore -> {
@@ -563,52 +544,10 @@ private void subscribeToTerminationNotification() {
});
}
- // @Override
- public void initialSnapshot(TableTicket handle, TableSnapshot snapshot) {
- LazyPromise.runLater(() -> {
- // notify table that it has a snapshot available to replace viewport rows
- // TODO looping in this way is not ideal, means that we're roughly O(n*m), where
- // n is the number of rows, and m the number of tables with viewports.
- // Instead, we should track all rows here in WorkerConnection, and then
- // tell every table who might be interested about the rows it is interested in.
- if (!cache.get(handle).isPresent()) {
- JsLog.debug("Discarding snapshot for ", handle, " : ", snapshot);
- }
- cache.get(handle).ifPresent(s -> {
- s.setSize(snapshot.getTableSize());
- s.forActiveTables(table -> {
- table.handleSnapshot(handle, snapshot);
- });
- });
- });
- }
-
- // @Override
- public void incrementalUpdates(TableTicket tableHandle, DeltaUpdates updates) {
- LazyPromise.runLater(() -> {
- // notify table that it has individual row updates
- final Optional cts = cache.get(tableHandle);
- if (!cts.isPresent()) {
- JsLog.debug("Discarding delta for disconnected state ", tableHandle, " : ", updates);
- }
- JsLog.debug("Delta received", tableHandle, updates);
- cts.ifPresent(s -> {
- if (!s.isSubscribed()) {
- JsLog.debug("Discarding delta for unsubscribed table", tableHandle, updates);
- return;
- }
- s.handleDelta(updates);
- });
- });
- }
-
// @Override
public void exportedTableUpdateMessage(TableTicket clientId, long size) {
cache.get(clientId).ifPresent(state -> {
- if (!state.isSubscribed()) {
- // not presently subscribed so this is the only way to be informed of size changes
- state.setSize(size);
- }
+ state.setSize(size);
});
}
@@ -1080,12 +1019,12 @@ public Promise newTable(String[] columnNames, String[] types, Object[][
dataRef[0] = null;
// make a schema that we can embed in the first DoPut message
- Builder schema = new Builder(1024);
+ FlatBufferBuilder schema = new FlatBufferBuilder(1024);
// while we're examining columns, build the copiers for data
List columns = new ArrayList<>();
- double[] fields = new double[columnNames.length];
+ int[] fields = new int[columnNames.length];
for (int i = 0; i < columnNames.length; i++) {
String columnName = columnNames[i];
String columnType = types[i];
@@ -1093,9 +1032,9 @@ public Promise newTable(String[] columnNames, String[] types, Object[][
JsDataHandler writer = JsDataHandler.getHandler(columnType);
columns.add(writer);
- double nameOffset = schema.createString(columnName);
- double typeOffset = writer.writeType(schema);
- double metadataOffset = Field.createCustomMetadataVector(schema, new double[] {
+ int nameOffset = schema.createString(columnName);
+ int typeOffset = writer.writeType(schema);
+ int metadataOffset = Field.createCustomMetadataVector(schema, new int[] {
KeyValue.createKeyValue(schema, schema.createString("deephaven:type"),
schema.createString(writer.deephavenType()))
});
@@ -1110,7 +1049,7 @@ public Promise newTable(String[] columnNames, String[] types, Object[][
fields[i] = Field.endField(schema);
}
- double fieldsOffset = Schema.createFieldsVector(schema, fields);
+ int fieldsOffset = Schema.createFieldsVector(schema, fields);
Schema.startSchema(schema);
Schema.addFields(schema, fieldsOffset);
@@ -1152,7 +1091,7 @@ public Promise newTable(String[] columnNames, String[] types, Object[][
FlightData bodyMessage = new FlightData();
bodyMessage.setAppMetadata(WebBarrageUtils.emptyMessage());
- Builder bodyData = new Builder(1024);
+ FlatBufferBuilder bodyData = new FlatBufferBuilder(1024);
// iterate each column, building buffers and fieldnodes, as well as building the actual payload
List buffers = new ArrayList<>();
@@ -1176,25 +1115,25 @@ public Promise newTable(String[] columnNames, String[] types, Object[][
for (int i = buffers.size() - 1; i >= 0; i--) {
Uint8Array buffer = buffers.get(i);
cumulativeOffset -= buffer.byteLength;
- Buffer.createBuffer(bodyData, Long.create(cumulativeOffset, 0), Long.create(buffer.byteLength, 0));
+ Buffer.createBuffer(bodyData, cumulativeOffset, buffer.byteLength);
}
assert cumulativeOffset == 0;
- double buffersOffset = bodyData.endVector();
+ int buffersOffset = bodyData.endVector();
RecordBatch.startNodesVector(bodyData, nodes.size());
for (int i = nodes.size() - 1; i >= 0; i--) {
JsDataHandler.Node node = nodes.get(i);
- FieldNode.createFieldNode(bodyData, Long.create(node.length(), 0), Long.create(node.nullCount(), 0));
+ FieldNode.createFieldNode(bodyData, node.length(), node.nullCount());
}
- double nodesOffset = bodyData.endVector();
+ int nodesOffset = bodyData.endVector();
RecordBatch.startRecordBatch(bodyData);
RecordBatch.addBuffers(bodyData, buffersOffset);
RecordBatch.addNodes(bodyData, nodesOffset);
- RecordBatch.addLength(bodyData, Long.create(data[0].length, 0));
+ RecordBatch.addLength(bodyData, data[0].length);
- double recordBatchOffset = RecordBatch.endRecordBatch(bodyData);
+ int recordBatchOffset = RecordBatch.endRecordBatch(bodyData);
bodyMessage.setDataHeader(createMessage(bodyData, MessageHeader.RecordBatch, recordBatchOffset, length, 0));
bodyMessage.setDataBody(padAndConcat(buffers, length));
@@ -1215,11 +1154,11 @@ private Uint8Array padAndConcat(List buffers, int length) {
return all;
}
- private static Uint8Array createMessage(Builder payload, int messageHeaderType, double messageHeaderOffset,
- int bodyLength, double customMetadataOffset) {
+ private static Uint8Array createMessage(FlatBufferBuilder payload, byte messageHeaderType, int messageHeaderOffset,
+ int bodyLength, int customMetadataOffset) {
payload.finish(Message.createMessage(payload, MetadataVersion.V5, messageHeaderType, messageHeaderOffset,
- Long.create(bodyLength, 0), customMetadataOffset));
- return payload.asUint8Array();
+ bodyLength, customMetadataOffset));
+ return WebBarrageUtils.bbToUint8ArrayView(payload.dataBuffer());
}
public Promise mergeTables(JsTable[] tables, HasEventHandling failHandler) {
@@ -1323,7 +1262,7 @@ public StateCache getCache() {
}
/**
- * Schedules a deferred command to check the given state for active tables and adjust viewports accordingly.
+ * Schedules a deferred command to check the given state for active tables.
*/
public void scheduleCheck(ClientTableState state) {
if (flushable.isEmpty()) {
@@ -1348,209 +1287,23 @@ public void releaseTicket(Ticket ticket) {
sessionServiceClient.release(releaseRequest, metadata, null);
}
-
- /**
- * For those calls where we don't really care what happens
- */
- private static final Callback DONOTHING_CALLBACK = new Callback() {
- @Override
- public void onSuccess(Void value) {
- // Do nothing.
- }
-
- @Override
- public void onFailure(String error) {
- JsLog.error("Callback failed: " + error);
- }
- };
-
private void flush() {
- // LATER: instead of running a bunch of serial operations,
- // condense these all into a single batch operation.
- // All three server calls made by this method are _only_ called by this method,
- // so we can reasonably merge all three into a single batched operation.
ArrayList statesToFlush = new ArrayList<>(flushable);
flushable.clear();
-
for (ClientTableState state : statesToFlush) {
- if (state.hasNoSubscriptions()) {
- // state may be retained if it is held by at least one paused binding;
- // it is either an unsubscribed active table, an interim state for an
- // active table, or a pending rollback for an operation that has not
- // yet completed (we leave orphaned nodes paused until a request completes).
- if (state.isSubscribed()) {
- state.setSubscribed(false);
+ if (state.isEmpty()) {
+ // completely empty; perform release
+ final ClientTableState.ResolutionState previousState = state.getResolution();
+ state.setResolution(ClientTableState.ResolutionState.RELEASED);
+ if (previousState != ClientTableState.ResolutionState.RELEASED) {
+ cache.release(state);
+
+ JsLog.debug("Releasing state", state, LazyString.of(state.getHandle()));
+ // don't send a release message to the server if the table isn't really there
if (state.getHandle().isConnected()) {
- BiDiStream stream = subscriptionStreams.remove(state);
- if (stream != null) {
- stream.end();
- stream.cancel();
- }
- }
- }
-
- if (state.isEmpty()) {
- // completely empty; perform release
- final ClientTableState.ResolutionState previousState = state.getResolution();
- state.setResolution(ClientTableState.ResolutionState.RELEASED);
- state.setSubscribed(false);
- if (previousState != ClientTableState.ResolutionState.RELEASED) {
- cache.release(state);
-
- JsLog.debug("Releasing state", state, LazyString.of(state.getHandle()));
- // don't send a release message to the server if the table isn't really there
- if (state.getHandle().isConnected()) {
- releaseHandle(state.getHandle());
- }
- }
- }
- } else if (state.isRunning()) {
- List vps = new ArrayList<>();
- state.forActiveSubscriptions((table, subscription) -> {
- assert table.isActive(state) : "Inactive table has a viewport still attached";
- vps.add(new TableSubscriptionRequest(table.getSubscriptionId(), subscription.getRows(),
- subscription.getColumns()));
- });
-
- boolean isViewport = vps.stream().allMatch(req -> req.getRows() != null);
- assert isViewport || vps.stream().noneMatch(req -> req.getRows() != null)
- : "All subscriptions to a given handle must be consistently viewport or non-viewport";
-
-
- BitSet includedColumns = vps.stream().map(TableSubscriptionRequest::getColumns).reduce((bs1, bs2) -> {
- BitSet result = new BitSet();
- result.or(bs1);
- result.or(bs2);
- return result;
- }).orElseThrow(() -> new IllegalStateException("Cannot call subscribe with zero subscriptions"));
- String[] columnTypes = Arrays.stream(state.getTableDef().getColumns())
- .map(ColumnDefinition::getType)
- .toArray(String[]::new);
-
- state.setSubscribed(true);
-
- Builder subscriptionReq = new Builder(1024);
-
- double columnsOffset = BarrageSubscriptionRequest.createColumnsVector(subscriptionReq,
- makeUint8ArrayFromBitset(includedColumns));
- double viewportOffset = 0;
- if (isViewport) {
- viewportOffset = BarrageSubscriptionRequest.createViewportVector(subscriptionReq, serializeRanges(
- vps.stream().map(TableSubscriptionRequest::getRows).collect(Collectors.toSet())));
- }
- // TODO #188 support minUpdateIntervalMs
- double serializationOptionsOffset = BarrageSubscriptionOptions
- .createBarrageSubscriptionOptions(subscriptionReq, ColumnConversionMode.Stringify, true, 1000,
- 0, 0);
- double tableTicketOffset =
- BarrageSubscriptionRequest.createTicketVector(subscriptionReq, state.getHandle().getTicket());
- BarrageSubscriptionRequest.startBarrageSubscriptionRequest(subscriptionReq);
- BarrageSubscriptionRequest.addColumns(subscriptionReq, columnsOffset);
- BarrageSubscriptionRequest.addSubscriptionOptions(subscriptionReq, serializationOptionsOffset);
- BarrageSubscriptionRequest.addViewport(subscriptionReq, viewportOffset);
- BarrageSubscriptionRequest.addTicket(subscriptionReq, tableTicketOffset);
- subscriptionReq.finish(BarrageSubscriptionRequest.endBarrageSubscriptionRequest(subscriptionReq));
-
- FlightData request = new FlightData();
- request.setAppMetadata(
- WebBarrageUtils.wrapMessage(subscriptionReq, BarrageMessageType.BarrageSubscriptionRequest));
-
- BiDiStream stream = this.streamFactory().create(
- headers -> flightServiceClient.doExchange(headers),
- (first, headers) -> browserFlightServiceClient.openDoExchange(first, headers),
- (next, headers, c) -> browserFlightServiceClient.nextDoExchange(next, headers, c::apply),
- new FlightData());
-
- stream.send(request);
- stream.onData(new JsConsumer() {
- @Override
- public void apply(FlightData data) {
- ByteBuffer body = typedArrayToLittleEndianByteBuffer(data.getDataBody_asU8());
- Message headerMessage = Message
- .getRootAsMessage(new io.deephaven.javascript.proto.dhinternal.flatbuffers.ByteBuffer(
- data.getDataHeader_asU8()));
- if (body.limit() == 0 && headerMessage.headerType() != MessageHeader.RecordBatch) {
- // a subscription stream presently ignores schemas and other message types
- // TODO hang on to the schema to better handle the now-Utf8 columns
- return;
- }
- RecordBatch header = headerMessage.header(new RecordBatch());
- BarrageMessageWrapper barrageMessageWrapper =
- BarrageMessageWrapper.getRootAsBarrageMessageWrapper(
- new io.deephaven.javascript.proto.dhinternal.flatbuffers.ByteBuffer(
- data.getAppMetadata_asU8()));
- if (barrageMessageWrapper.msgType() == BarrageMessageType.None) {
- // continue previous message, just read RecordBatch
- appendAndMaybeFlush(header, body);
- } else {
- assert barrageMessageWrapper.msgType() == BarrageMessageType.BarrageUpdateMetadata;
- BarrageUpdateMetadata barrageUpdate = BarrageUpdateMetadata.getRootAsBarrageUpdateMetadata(
- new io.deephaven.javascript.proto.dhinternal.flatbuffers.ByteBuffer(
- new Uint8Array(barrageMessageWrapper.msgPayloadArray())));
- startAndMaybeFlush(barrageUpdate.isSnapshot(), header, body, barrageUpdate, isViewport,
- columnTypes);
- }
+ releaseHandle(state.getHandle());
}
-
- private DeltaUpdatesBuilder nextDeltaUpdates;
- private DeltaUpdates deferredDeltaUpdates;
-
- private void appendAndMaybeFlush(RecordBatch header, ByteBuffer body) {
- // using existing barrageUpdate, append to the current snapshot/delta
- assert nextDeltaUpdates != null;
- boolean shouldFlush = nextDeltaUpdates.appendRecordBatch(header, body);
- if (shouldFlush) {
- DeltaUpdates updates = nextDeltaUpdates.build();
- nextDeltaUpdates = null;
-
- if (state.getTableDef().getAttributes().isBlinkTable()) {
- // blink tables remove all rows from the previous step, if there are no adds this step
- // then defer removal until new data arrives -- this makes blink tables GUI friendly
- if (updates.getAdded().isEmpty()) {
- if (deferredDeltaUpdates != null) {
- final RangeSet removed = deferredDeltaUpdates.getRemoved();
- updates.getRemoved().rangeIterator().forEachRemaining(removed::addRange);
- } else {
- deferredDeltaUpdates = updates;
- }
- return;
- } else if (deferredDeltaUpdates != null) {
- assert updates.getRemoved().isEmpty()
- : "Blink table received two consecutive remove rowsets";
- updates.setRemoved(deferredDeltaUpdates.getRemoved());
- deferredDeltaUpdates = null;
- }
- }
- incrementalUpdates(state.getHandle(), updates);
- }
- }
-
- private void startAndMaybeFlush(boolean isSnapshot, RecordBatch header, ByteBuffer body,
- BarrageUpdateMetadata barrageUpdate, boolean isViewport, String[] columnTypes) {
- if (isSnapshot) {
- TableSnapshot snapshot =
- createSnapshot(header, body, barrageUpdate, isViewport, columnTypes);
-
- // for now we always expect snapshots to arrive in a single payload
- initialSnapshot(state.getHandle(), snapshot);
- } else {
- nextDeltaUpdates = deltaUpdates(barrageUpdate, isViewport, columnTypes);
- appendAndMaybeFlush(header, body);
- }
- }
- });
- stream.onStatus(err -> {
- checkStatus(err);
- if (!err.isOk() && !err.isTransportError()) {
- state.setResolution(ClientTableState.ResolutionState.FAILED, err.getDetails());
- }
- });
- BiDiStream oldStream = subscriptionStreams.put(state, stream);
- if (oldStream != null) {
- // cancel any old stream, we presently expect a fresh instance
- oldStream.end();
- oldStream.cancel();
}
}
}
@@ -1576,6 +1329,10 @@ public ClientConfiguration getConfig() {
return config;
}
+ public ConfigValue getServerConfigValue(String key) {
+ return constants.getConfigValuesMap().get(key);
+ }
+
public void onOpen(BiConsumer callback) {
switch (state) {
case Connected:
diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/CompressedRangeSetReader.java b/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/CompressedRangeSetReader.java
index e4f14ef2fd4..1ee0c147621 100644
--- a/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/CompressedRangeSetReader.java
+++ b/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/CompressedRangeSetReader.java
@@ -175,7 +175,7 @@ public RangeSet read(ByteBuffer data) {
if (pending >= 0) {
append(pending);
}
- return RangeSet.fromSortedRanges(sortedRanges.toArray(new Range[0]));
+ return RangeSet.fromSortedRanges(sortedRanges);
default:
throw new IllegalStateException("Bad command: " + command + " at position " + data.position());
}
diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/ShiftedRangeReader.java b/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/ShiftedRangeReader.java
index ac2b7524e6b..3c1c68fc3bb 100644
--- a/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/ShiftedRangeReader.java
+++ b/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/ShiftedRangeReader.java
@@ -12,7 +12,7 @@
public class ShiftedRangeReader {
- public ShiftedRange[] read(ByteBuffer data) {
+ public static ShiftedRange[] read(ByteBuffer data) {
RangeSet start = new CompressedRangeSetReader().read(data);
RangeSet end = new CompressedRangeSetReader().read(data);
RangeSet postShiftStart = new CompressedRangeSetReader().read(data);
@@ -30,4 +30,29 @@ public ShiftedRange[] read(ByteBuffer data) {
return ranges;
}
+
+ public static ByteBuffer write(ShiftedRange[] shiftedRanges) {
+ RangeSet start = new RangeSet();
+ RangeSet end = new RangeSet();
+ RangeSet postShiftStart = new RangeSet();
+
+ for (int i = 0; i < shiftedRanges.length; i++) {
+ ShiftedRange range = shiftedRanges[i];
+ long first = range.getRange().getFirst();
+ long last = range.getRange().getLast();
+ long delta = range.getDelta() + first;
+ start.addRange(new Range(first, first));
+ end.addRange(new Range(last, last));
+ postShiftStart.addRange(new Range(delta, delta));
+ }
+
+ ByteBuffer startBuf = CompressedRangeSetReader.writeRange(start);
+ ByteBuffer endBuf = CompressedRangeSetReader.writeRange(end);
+ ByteBuffer shiftBuf = CompressedRangeSetReader.writeRange(postShiftStart);
+ ByteBuffer all = ByteBuffer.allocateDirect(startBuf.remaining() + endBuf.remaining() + shiftBuf.remaining());
+ all.put(startBuf);
+ all.put(endBuf);
+ all.put(shiftBuf);
+ return all;
+ }
}
diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/WebBarrageMessage.java b/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/WebBarrageMessage.java
new file mode 100644
index 00000000000..1b26f2ccadb
--- /dev/null
+++ b/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/WebBarrageMessage.java
@@ -0,0 +1,49 @@
+//
+// Copyright (c) 2016-2024 Deephaven Data Labs and Patent Pending
+//
+package io.deephaven.web.client.api.barrage;
+
+import io.deephaven.chunk.Chunk;
+import io.deephaven.chunk.ChunkType;
+import io.deephaven.chunk.attributes.Values;
+import io.deephaven.web.shared.data.RangeSet;
+import io.deephaven.web.shared.data.ShiftedRange;
+
+import java.util.ArrayList;
+import java.util.BitSet;
+
+public class WebBarrageMessage {
+ public static class ModColumnData {
+ public RangeSet rowsModified;
+ public Class> type;
+ public Class> componentType;
+ public ArrayList> data;
+ public ChunkType chunkType;
+ }
+ public static class AddColumnData {
+ public Class> type;
+ public Class> componentType;
+ public ArrayList> data;
+ public ChunkType chunkType;
+ }
+
+ public long firstSeq = -1;
+ public long lastSeq = -1;
+ public long step = -1;
+
+ public boolean isSnapshot;
+ public RangeSet snapshotRowSet;
+ public boolean snapshotRowSetIsReversed;
+ public BitSet snapshotColumns;
+
+ public RangeSet rowsAdded;
+ public RangeSet rowsIncluded;
+ public RangeSet rowsRemoved;
+ public ShiftedRange[] shifted;
+
+ public AddColumnData[] addColumnData;
+ public ModColumnData[] modColumnData;
+
+ // Underlying RecordBatch.length, visible for reading snapshots
+ public long length;
+}
diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/WebBarrageStreamReader.java b/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/WebBarrageStreamReader.java
new file mode 100644
index 00000000000..6bf5196034e
--- /dev/null
+++ b/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/WebBarrageStreamReader.java
@@ -0,0 +1,291 @@
+//
+// Copyright (c) 2016-2024 Deephaven Data Labs and Patent Pending
+//
+package io.deephaven.web.client.api.barrage;
+
+import com.google.common.io.LittleEndianDataInputStream;
+import io.deephaven.barrage.flatbuf.BarrageMessageType;
+import io.deephaven.barrage.flatbuf.BarrageMessageWrapper;
+import io.deephaven.barrage.flatbuf.BarrageModColumnMetadata;
+import io.deephaven.barrage.flatbuf.BarrageUpdateMetadata;
+import io.deephaven.chunk.ChunkType;
+import io.deephaven.chunk.WritableChunk;
+import io.deephaven.chunk.attributes.Values;
+import io.deephaven.extensions.barrage.chunk.ChunkInputStreamGenerator;
+import io.deephaven.extensions.barrage.chunk.ChunkReader;
+import io.deephaven.extensions.barrage.util.FlatBufferIteratorAdapter;
+import io.deephaven.extensions.barrage.util.StreamReaderOptions;
+import io.deephaven.io.streams.ByteBufferInputStream;
+import io.deephaven.javascript.proto.dhinternal.arrow.flight.protocol.flight_pb.FlightData;
+import io.deephaven.util.datastructures.LongSizedDataStructure;
+import io.deephaven.web.client.fu.JsLog;
+import io.deephaven.web.shared.data.RangeSet;
+import io.deephaven.web.shared.data.ShiftedRange;
+import org.apache.arrow.flatbuf.Field;
+import org.apache.arrow.flatbuf.Message;
+import org.apache.arrow.flatbuf.MessageHeader;
+import org.apache.arrow.flatbuf.RecordBatch;
+import org.apache.arrow.flatbuf.Schema;
+import org.gwtproject.nio.TypedArrayHelper;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.BitSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.PrimitiveIterator;
+
+/**
+ * Consumes FlightData fields from Flight/Barrage producers and builds browser-compatible WebBarrageMessage payloads
+ * that can be used to maintain table data.
+ */
+public class WebBarrageStreamReader {
+ private static final int MAX_CHUNK_SIZE = Integer.MAX_VALUE - 8;
+
+ // record progress in reading
+ private long numAddRowsRead = 0;
+ private long numAddRowsTotal = 0;
+ private long numModRowsRead = 0;
+ private long numModRowsTotal = 0;
+
+ // hold in-progress messages that aren't finished being built
+ private WebBarrageMessage msg;
+
+ private final WebChunkReaderFactory chunkReaderFactory = new WebChunkReaderFactory();
+ private final List readers = new ArrayList<>();
+
+ public WebBarrageMessage parseFrom(
+ final StreamReaderOptions options,
+ ChunkType[] columnChunkTypes,
+ Class>[] columnTypes,
+ Class>[] componentTypes,
+ FlightData flightData) throws IOException {
+ ByteBuffer headerAsBB = TypedArrayHelper.wrap(flightData.getDataHeader_asU8());
+ Message header = headerAsBB.hasRemaining() ? Message.getRootAsMessage(headerAsBB) : null;
+
+ ByteBuffer msgAsBB = TypedArrayHelper.wrap(flightData.getAppMetadata_asU8());
+ if (msgAsBB.hasRemaining()) {
+ BarrageMessageWrapper wrapper =
+ BarrageMessageWrapper.getRootAsBarrageMessageWrapper(msgAsBB);
+ if (wrapper.magic() != WebBarrageUtils.FLATBUFFER_MAGIC) {
+ JsLog.warn(
+ "WebBarrageStreamReader: skipping app_metadata that does not look like BarrageMessageWrapper");
+ } else if (wrapper.msgType() == BarrageMessageType.BarrageUpdateMetadata) {
+ if (msg != null) {
+ throw new IllegalStateException(
+ "Previous message was not complete; pending " + (numAddRowsTotal - numAddRowsRead)
+ + " add rows and " + (numModRowsTotal - numModRowsRead) + " mod rows");
+ }
+
+ final BarrageUpdateMetadata metadata =
+ BarrageUpdateMetadata.getRootAsBarrageUpdateMetadata(wrapper.msgPayloadAsByteBuffer());
+
+ msg = new WebBarrageMessage();
+
+ msg.isSnapshot = metadata.isSnapshot();
+ msg.snapshotRowSetIsReversed = metadata.effectiveReverseViewport();
+
+ numAddRowsRead = 0;
+ numModRowsRead = 0;
+
+ if (msg.isSnapshot) {
+ final ByteBuffer effectiveViewport = metadata.effectiveViewportAsByteBuffer();
+ if (effectiveViewport != null) {
+ msg.snapshotRowSet = extractIndex(effectiveViewport);
+ }
+ final ByteBuffer effectiveSnapshotColumns = metadata.effectiveColumnSetAsByteBuffer();
+ if (effectiveSnapshotColumns != null) {
+ msg.snapshotColumns = extractBitSet(effectiveSnapshotColumns);
+ }
+ }
+
+ msg.firstSeq = metadata.firstSeq();
+ msg.lastSeq = metadata.lastSeq();
+ msg.rowsAdded = extractIndex(metadata.addedRowsAsByteBuffer());
+ msg.rowsRemoved = extractIndex(metadata.removedRowsAsByteBuffer());
+ msg.shifted = extractIndexShiftData(metadata.shiftDataAsByteBuffer());
+
+ final ByteBuffer rowsIncluded = metadata.addedRowsIncludedAsByteBuffer();
+ msg.rowsIncluded = rowsIncluded != null ? extractIndex(rowsIncluded) : msg.rowsAdded;
+ msg.addColumnData = new WebBarrageMessage.AddColumnData[columnTypes.length];
+ for (int ci = 0; ci < msg.addColumnData.length; ++ci) {
+ msg.addColumnData[ci] = new WebBarrageMessage.AddColumnData();
+ // msg.addColumnData[ci].type = columnTypes[ci];
+ // msg.addColumnData[ci].componentType = componentTypes[ci];
+ msg.addColumnData[ci].data = new ArrayList<>();
+
+ // create an initial chunk of the correct size
+ final int chunkSize = (int) (Math.min(msg.rowsIncluded.size(), MAX_CHUNK_SIZE));
+ final WritableChunk chunk = columnChunkTypes[ci].makeWritableChunk(chunkSize);
+ chunk.setSize(0);
+ msg.addColumnData[ci].data.add(chunk);
+ }
+ numAddRowsTotal = msg.rowsIncluded.size();
+
+ // if this message is a snapshot response (vs. subscription) then mod columns may be empty
+ numModRowsTotal = 0;
+ msg.modColumnData = new WebBarrageMessage.ModColumnData[metadata.modColumnNodesLength()];
+ for (int ci = 0; ci < msg.modColumnData.length; ++ci) {
+ msg.modColumnData[ci] = new WebBarrageMessage.ModColumnData();
+ // msg.modColumnData[ci].type = columnTypes[ci];
+ // msg.modColumnData[ci].componentType = componentTypes[ci];
+ msg.modColumnData[ci].data = new ArrayList<>();
+
+ final BarrageModColumnMetadata mcd = metadata.modColumnNodes(ci);
+ msg.modColumnData[ci].rowsModified = extractIndex(mcd.modifiedRowsAsByteBuffer());
+
+ // create an initial chunk of the correct size
+ final int chunkSize = (int) (Math.min(msg.modColumnData[ci].rowsModified.size(),
+ MAX_CHUNK_SIZE));
+ final WritableChunk chunk = columnChunkTypes[ci].makeWritableChunk(chunkSize);
+ chunk.setSize(0);
+ msg.modColumnData[ci].data.add(chunk);
+
+ numModRowsTotal = Math.max(numModRowsTotal, msg.modColumnData[ci].rowsModified.size());
+ }
+ }
+ }
+
+ byte headerType = header.headerType();
+ if (headerType == MessageHeader.Schema) {
+ // there is no body and our clients do not want to see schema messages
+ Schema schema = new Schema();
+ header.header(schema);
+ for (int i = 0; i < schema.fieldsLength(); i++) {
+ Field field = schema.fields(i);
+ ChunkReader chunkReader = chunkReaderFactory.getReader(options,
+ ChunkReader.typeInfo(columnChunkTypes[i], columnTypes[i],
+ componentTypes[i], field));
+ readers.add(chunkReader);
+ }
+ return null;
+ }
+ if (headerType != MessageHeader.RecordBatch) {
+ throw new IllegalStateException("Only know how to decode Schema/RecordBatch messages");
+ }
+
+
+ // throw an error when no app metadata (snapshots now provide by default)
+ if (msg == null) {
+ throw new IllegalStateException(
+ "Missing app metadata tag; cannot decode using BarrageStreamReader");
+ }
+
+ final RecordBatch batch = (RecordBatch) header.header(new RecordBatch());
+ msg.length = batch.length();
+ ByteBuffer body = TypedArrayHelper.wrap(flightData.getDataBody_asU8());
+ final LittleEndianDataInputStream ois =
+ new LittleEndianDataInputStream(new ByteBufferInputStream(body));
+ final Iterator fieldNodeIter =
+ new FlatBufferIteratorAdapter<>(batch.nodesLength(),
+ i -> new ChunkInputStreamGenerator.FieldNodeInfo(batch.nodes(i)));
+
+ final long[] bufferInfo = new long[batch.buffersLength()];
+ for (int bi = 0; bi < batch.buffersLength(); ++bi) {
+ int offset = LongSizedDataStructure.intSize("BufferInfo", batch.buffers(bi).offset());
+ int length = LongSizedDataStructure.intSize("BufferInfo", batch.buffers(bi).length());
+ if (bi < batch.buffersLength() - 1) {
+ final int nextOffset =
+ LongSizedDataStructure.intSize("BufferInfo", batch.buffers(bi + 1).offset());
+ // our parsers handle overhanging buffers
+ length += Math.max(0, nextOffset - offset - length);
+ }
+ bufferInfo[bi] = length;
+ }
+ final PrimitiveIterator.OfLong bufferInfoIter = Arrays.stream(bufferInfo).iterator();
+
+
+ // add and mod rows are never combined in a batch. all added rows must be received before the first
+ // mod rows will be received.
+ if (numAddRowsRead < numAddRowsTotal) {
+ for (int ci = 0; ci < msg.addColumnData.length; ++ci) {
+ final WebBarrageMessage.AddColumnData acd = msg.addColumnData[ci];
+
+ final long remaining = numAddRowsTotal - numAddRowsRead;
+ if (batch.length() > remaining) {
+ throw new IllegalStateException(
+ "Batch length exceeded the expected number of rows from app metadata");
+ }
+
+ // select the current chunk size and read the size
+ int lastChunkIndex = acd.data.size() - 1;
+ WritableChunk chunk = (WritableChunk) acd.data.get(lastChunkIndex);
+
+ if (batch.length() > chunk.capacity() - chunk.size()) {
+ // reading the rows from this batch will overflow the existing chunk; create a new one
+ final int chunkSize = (int) (Math.min(remaining, MAX_CHUNK_SIZE));
+ chunk = columnChunkTypes[ci].makeWritableChunk(chunkSize);
+ acd.data.add(chunk);
+
+ chunk.setSize(0);
+ ++lastChunkIndex;
+ }
+
+ // fill the chunk with data and assign back into the array
+ acd.data.set(lastChunkIndex,
+ readers.get(ci).readChunk(fieldNodeIter, bufferInfoIter, ois, chunk, chunk.size(),
+ (int) batch.length()));
+ chunk.setSize(chunk.size() + (int) batch.length());
+ }
+ numAddRowsRead += batch.length();
+ } else {
+ for (int ci = 0; ci < msg.modColumnData.length; ++ci) {
+ final WebBarrageMessage.ModColumnData mcd = msg.modColumnData[ci];
+
+ // another column may be larger than this column
+ long remaining = Math.max(0, mcd.rowsModified.size() - numModRowsRead);
+
+ // need to add the batch row data to the column chunks
+ int lastChunkIndex = mcd.data.size() - 1;
+ WritableChunk chunk = (WritableChunk) mcd.data.get(lastChunkIndex);
+
+ final int numRowsToRead = LongSizedDataStructure.intSize("BarrageStreamReader",
+ Math.min(remaining, batch.length()));
+ if (numRowsToRead > chunk.capacity() - chunk.size()) {
+ // reading the rows from this batch will overflow the existing chunk; create a new one
+ final int chunkSize = (int) (Math.min(remaining, MAX_CHUNK_SIZE));
+ chunk = columnChunkTypes[ci].makeWritableChunk(chunkSize);
+ mcd.data.add(chunk);
+
+ chunk.setSize(0);
+ ++lastChunkIndex;
+ }
+
+ // fill the chunk with data and assign back into the array
+ mcd.data.set(lastChunkIndex,
+ readers.get(ci).readChunk(fieldNodeIter, bufferInfoIter, ois, chunk, chunk.size(),
+ numRowsToRead));
+ chunk.setSize(chunk.size() + numRowsToRead);
+ }
+ numModRowsRead += batch.length();
+ }
+
+ if (numAddRowsRead == numAddRowsTotal && numModRowsRead == numModRowsTotal) {
+ final WebBarrageMessage retval = msg;
+ msg = null;
+ return retval;
+ }
+
+ // otherwise, must wait for more data
+ return null;
+ }
+
+ private static RangeSet extractIndex(final ByteBuffer bb) {
+ if (bb == null) {
+ return RangeSet.empty();
+ }
+ return new CompressedRangeSetReader().read(bb);
+ }
+
+ private static BitSet extractBitSet(final ByteBuffer bb) {
+ byte[] array = new byte[bb.remaining()];
+ bb.get(array);
+ return BitSet.valueOf(array);
+ }
+
+ private static ShiftedRange[] extractIndexShiftData(final ByteBuffer bb) {
+ return ShiftedRangeReader.read(bb);
+ }
+}
diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/WebBarrageUtils.java b/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/WebBarrageUtils.java
index 8f11e5ebe27..648025723a5 100644
--- a/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/WebBarrageUtils.java
+++ b/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/WebBarrageUtils.java
@@ -3,64 +3,58 @@
//
package io.deephaven.web.client.api.barrage;
+import com.google.flatbuffers.FlatBufferBuilder;
import elemental2.core.*;
-import io.deephaven.javascript.proto.dhinternal.arrow.flight.flatbuf.message_generated.org.apache.arrow.flatbuf.FieldNode;
-import io.deephaven.javascript.proto.dhinternal.arrow.flight.flatbuf.message_generated.org.apache.arrow.flatbuf.Message;
-import io.deephaven.javascript.proto.dhinternal.arrow.flight.flatbuf.message_generated.org.apache.arrow.flatbuf.MessageHeader;
-import io.deephaven.javascript.proto.dhinternal.arrow.flight.flatbuf.message_generated.org.apache.arrow.flatbuf.RecordBatch;
-import io.deephaven.javascript.proto.dhinternal.arrow.flight.flatbuf.schema_generated.org.apache.arrow.flatbuf.Buffer;
-import io.deephaven.javascript.proto.dhinternal.arrow.flight.flatbuf.schema_generated.org.apache.arrow.flatbuf.Field;
-import io.deephaven.javascript.proto.dhinternal.arrow.flight.flatbuf.schema_generated.org.apache.arrow.flatbuf.KeyValue;
-import io.deephaven.javascript.proto.dhinternal.arrow.flight.flatbuf.schema_generated.org.apache.arrow.flatbuf.Schema;
-import io.deephaven.javascript.proto.dhinternal.flatbuffers.Builder;
-import io.deephaven.javascript.proto.dhinternal.io.deephaven.barrage.flatbuf.barrage_generated.io.deephaven.barrage.flatbuf.BarrageMessageType;
-import io.deephaven.javascript.proto.dhinternal.io.deephaven.barrage.flatbuf.barrage_generated.io.deephaven.barrage.flatbuf.BarrageMessageWrapper;
-import io.deephaven.javascript.proto.dhinternal.io.deephaven.barrage.flatbuf.barrage_generated.io.deephaven.barrage.flatbuf.BarrageModColumnMetadata;
-import io.deephaven.javascript.proto.dhinternal.io.deephaven.barrage.flatbuf.barrage_generated.io.deephaven.barrage.flatbuf.BarrageUpdateMetadata;
+import io.deephaven.barrage.flatbuf.BarrageMessageType;
+import io.deephaven.barrage.flatbuf.BarrageMessageWrapper;
import io.deephaven.web.client.api.barrage.def.ColumnDefinition;
import io.deephaven.web.client.api.barrage.def.InitialTableDefinition;
import io.deephaven.web.client.api.barrage.def.TableAttributesDefinition;
import io.deephaven.web.shared.data.*;
-import io.deephaven.web.shared.data.columns.*;
-import jsinterop.base.Js;
+import org.apache.arrow.flatbuf.KeyValue;
+import org.apache.arrow.flatbuf.Message;
+import org.apache.arrow.flatbuf.MessageHeader;
+import org.apache.arrow.flatbuf.Schema;
import org.gwtproject.nio.TypedArrayHelper;
-import java.math.BigDecimal;
-import java.math.BigInteger;
import java.nio.ByteBuffer;
-import java.nio.ByteOrder;
-import java.nio.IntBuffer;
-import java.nio.charset.StandardCharsets;
-import java.util.BitSet;
import java.util.HashMap;
-import java.util.Iterator;
import java.util.Map;
import java.util.Set;
-import java.util.function.DoubleFunction;
-import java.util.stream.IntStream;
+import java.util.function.IntFunction;
/**
* Utility to read barrage record batches.
*/
public class WebBarrageUtils {
- private static final int MAGIC = 0x6E687064;
-
- public static Uint8Array wrapMessage(Builder innerBuilder, int messageType) {
- Builder outerBuilder = new Builder(1024);
- // This deprecation is incorrect, tsickle didn't understand that only one overload is deprecated
- // noinspection deprecation
- double messageOffset = BarrageMessageWrapper.createMsgPayloadVector(outerBuilder, innerBuilder.asUint8Array());
- double offset =
- BarrageMessageWrapper.createBarrageMessageWrapper(outerBuilder, MAGIC, messageType, messageOffset);
+ public static final int FLATBUFFER_MAGIC = 0x6E687064;
+
+ public static Uint8Array wrapMessage(FlatBufferBuilder innerBuilder, byte messageType) {
+ FlatBufferBuilder outerBuilder = new FlatBufferBuilder(1024);
+ int messageOffset = BarrageMessageWrapper.createMsgPayloadVector(outerBuilder, innerBuilder.dataBuffer());
+ int offset =
+ BarrageMessageWrapper.createBarrageMessageWrapper(outerBuilder, FLATBUFFER_MAGIC, messageType,
+ messageOffset);
outerBuilder.finish(offset);
- return outerBuilder.asUint8Array();
+ ByteBuffer byteBuffer = outerBuilder.dataBuffer();
+ return bbToUint8ArrayView(byteBuffer);
+ }
+
+ public static Uint8Array bbToUint8ArrayView(ByteBuffer byteBuffer) {
+ ArrayBufferView view = TypedArrayHelper.unwrap(byteBuffer);
+ return new Uint8Array(view.buffer, byteBuffer.position() + view.byteOffset, byteBuffer.remaining());
}
public static Uint8Array emptyMessage() {
- Builder builder = new Builder(1024);
- double offset = BarrageMessageWrapper.createBarrageMessageWrapper(builder, MAGIC, BarrageMessageType.None, 0);
+ FlatBufferBuilder builder = new FlatBufferBuilder(1024);
+ int offset = BarrageMessageWrapper.createBarrageMessageWrapper(builder, FLATBUFFER_MAGIC,
+ BarrageMessageType.None, 0);
builder.finish(offset);
- return builder.asUint8Array();
+ return bbToUint8ArrayView(builder.dataBuffer());
+ }
+
+ public static InitialTableDefinition readTableDefinition(Uint8Array flightSchemaMessage) {
+ return readTableDefinition(readSchemaMessage(flightSchemaMessage));
}
public static InitialTableDefinition readTableDefinition(Schema schema) {
@@ -76,50 +70,10 @@ public static InitialTableDefinition readTableDefinition(Schema schema) {
.setColumns(cols);
}
- public static ColumnDefinition[] readColumnDefinitions(Schema schema) {
+ private static ColumnDefinition[] readColumnDefinitions(Schema schema) {
ColumnDefinition[] cols = new ColumnDefinition[(int) schema.fieldsLength()];
for (int i = 0; i < schema.fieldsLength(); i++) {
- cols[i] = new ColumnDefinition();
- Field f = schema.fields(i);
- Map fieldMetadata =
- keyValuePairs("deephaven:", f.customMetadataLength(), f::customMetadata);
- cols[i].setName(f.name().asString());
- cols[i].setColumnIndex(i);
- cols[i].setType(fieldMetadata.get("type"));
- cols[i].setIsSortable("true".equals(fieldMetadata.get("isSortable")));
- cols[i].setStyleColumn("true".equals(fieldMetadata.get("isStyle")));
- cols[i].setFormatColumn("true".equals(fieldMetadata.get("isDateFormat"))
- || "true".equals(fieldMetadata.get("isNumberFormat")));
- cols[i].setForRow("true".equals(fieldMetadata.get("isRowStyle")));
-
- String formatColumnName = fieldMetadata.get("dateFormatColumn");
- if (formatColumnName == null) {
- formatColumnName = fieldMetadata.get("numberFormatColumn");
- }
- cols[i].setFormatColumnName(formatColumnName);
-
- cols[i].setStyleColumnName(fieldMetadata.get("styleColumn"));
-
- if (fieldMetadata.containsKey("inputtable.isKey")) {
- cols[i].setInputTableKeyColumn("true".equals(fieldMetadata.get("inputtable.isKey")));
- }
-
- cols[i].setDescription(fieldMetadata.get("description"));
-
- cols[i].setPartitionColumn("true".equals(fieldMetadata.get("isPartitioning")));
-
- cols[i].setHierarchicalExpandByColumn(
- "true".equals(fieldMetadata.get("hierarchicalTable.isExpandByColumn")));
- cols[i].setHierarchicalRowDepthColumn(
- "true".equals(fieldMetadata.get("hierarchicalTable.isRowDepthColumn")));
- cols[i].setHierarchicalRowExpandedColumn(
- "true".equals(fieldMetadata.get("hierarchicalTable.isRowExpandedColumn")));
- cols[i].setRollupAggregatedNodeColumn(
- "true".equals(fieldMetadata.get("rollupTable.isAggregatedNodeColumn")));
- cols[i].setRollupConstituentNodeColumn(
- "true".equals(fieldMetadata.get("rollupTable.isConstituentNodeColumn")));
- cols[i].setRollupGroupByColumn("true".equals(fieldMetadata.get("rollupTable.isGroupByColumn")));
- cols[i].setRollupAggregationInputColumn(fieldMetadata.get("rollupTable.aggregationInputColumnName"));
+ cols[i] = new ColumnDefinition(i, schema.fields(i));
}
return cols;
}
@@ -129,505 +83,42 @@ public static Schema readSchemaMessage(Uint8Array flightSchemaMessage) {
// - IPC_CONTINUATION_TOKEN (4-byte int of -1)
// - message size (4-byte int)
// - a Message wrapping the schema
- io.deephaven.javascript.proto.dhinternal.flatbuffers.ByteBuffer bb =
- new io.deephaven.javascript.proto.dhinternal.flatbuffers.ByteBuffer(flightSchemaMessage);
- bb.setPosition(bb.position() + 8);
+ ByteBuffer bb = TypedArrayHelper.wrap(flightSchemaMessage);
+ bb.position(bb.position() + 8);
Message headerMessage = Message.getRootAsMessage(bb);
assert headerMessage.headerType() == MessageHeader.Schema;
- return headerMessage.header(new Schema());
+ return (Schema) headerMessage.header(new Schema());
}
public static Map keyValuePairs(String filterPrefix, double count,
- DoubleFunction accessor) {
+ IntFunction accessor) {
Map map = new HashMap<>();
for (int i = 0; i < count; i++) {
KeyValue pair = accessor.apply(i);
- String key = pair.key().asString();
+ String key = pair.key();
if (key.startsWith(filterPrefix)) {
key = key.substring(filterPrefix.length());
- String oldValue = map.put(key, pair.value().asString());
+ String oldValue = map.put(key, pair.value());
assert oldValue == null : key + " had " + oldValue + ", replaced with " + pair.value();
}
}
return map;
}
- /**
- * Iterator wrapper that allows peeking at the next item, if any.
- */
- private static class Iter implements Iterator {
- private final Iterator wrapped;
- private T next;
-
- private Iter(Iterator wrapped) {
- this.wrapped = wrapped;
- }
-
- public T peek() {
- if (next != null) {
- return next;
- }
- return next = next();
- }
-
- @Override
- public boolean hasNext() {
- return next != null || wrapped.hasNext();
- }
-
- @Override
- public T next() {
- if (next == null) {
- return wrapped.next();
- }
- T val = next;
- next = null;
- return val;
- }
- }
-
- public static Uint8Array makeUint8ArrayFromBitset(BitSet bitset) {
- int length = (bitset.previousSetBit(Integer.MAX_VALUE - 1) + 8) / 8;
- Uint8Array array = new Uint8Array(length);
- byte[] bytes = bitset.toByteArray();
- for (int i = 0; i < bytes.length; i++) {
- array.setAt(i, (double) bytes[i]);
- }
-
- return array;
- }
-
- public static Uint8Array serializeRanges(Set rangeSets) {
+ public static ByteBuffer serializeRanges(Set rangeSets) {
final RangeSet s;
- if (rangeSets.size() == 0) {
- return new Uint8Array(0);
+ if (rangeSets.isEmpty()) {
+ return ByteBuffer.allocate(0);
} else if (rangeSets.size() == 1) {
s = rangeSets.iterator().next();
} else {
s = new RangeSet();
for (RangeSet rangeSet : rangeSets) {
- rangeSet.rangeIterator().forEachRemaining(s::addRange);
- }
- }
-
- ByteBuffer payload = CompressedRangeSetReader.writeRange(s);
- ArrayBufferView buffer = TypedArrayHelper.unwrap(payload);
- return new Uint8Array(buffer);
- }
-
- public static ByteBuffer typedArrayToLittleEndianByteBuffer(Uint8Array data) {
- ArrayBuffer slicedBuffer = data.slice().buffer;
- ByteBuffer bb = TypedArrayHelper.wrap(slicedBuffer);
- bb.order(ByteOrder.LITTLE_ENDIAN);
- return bb;
- }
-
- public static ByteBuffer typedArrayToLittleEndianByteBuffer(Int8Array data) {
- ArrayBuffer slicedBuffer = data.slice().buffer;
- ByteBuffer bb = TypedArrayHelper.wrap(slicedBuffer);
- bb.order(ByteOrder.LITTLE_ENDIAN);
- return bb;
- }
-
- public static TableSnapshot createSnapshot(RecordBatch header, ByteBuffer body, BarrageUpdateMetadata barrageUpdate,
- boolean isViewport, String[] columnTypes) {
- RangeSet added;
-
- final RangeSet includedAdditions;
- if (barrageUpdate == null) {
- includedAdditions = added = RangeSet.ofRange(0, (long) (header.length().toFloat64() - 1));
- } else {
- added = new CompressedRangeSetReader()
- .read(typedArrayToLittleEndianByteBuffer(barrageUpdate.addedRowsArray()));
-
- Int8Array addedRowsIncluded = barrageUpdate.addedRowsIncludedArray();
- if (isViewport && addedRowsIncluded != null) {
- includedAdditions = new CompressedRangeSetReader()
- .read(typedArrayToLittleEndianByteBuffer(addedRowsIncluded));
- } else {
- // if this isn't a viewport, then a second index isn't sent, because all rows are included
- includedAdditions = added;
- }
- }
-
- // read the nodes and buffers into iterators so that we can descend into the data columns as necessary
- Iter nodes =
- new Iter<>(IntStream.range(0, (int) header.nodesLength()).mapToObj(header::nodes).iterator());
- Iter buffers =
- new Iter<>(IntStream.range(0, (int) header.buffersLength()).mapToObj(header::buffers).iterator());
- ColumnData[] columnData = new ColumnData[columnTypes.length];
- for (int columnIndex = 0; columnIndex < columnTypes.length; ++columnIndex) {
- columnData[columnIndex] =
- readArrowBuffer(body, nodes, buffers, (int) includedAdditions.size(), columnTypes[columnIndex]);
- }
-
- return new TableSnapshot(includedAdditions, columnData, added.size());
- }
-
- public static DeltaUpdatesBuilder deltaUpdates(BarrageUpdateMetadata barrageUpdate, boolean isViewport,
- String[] columnTypes) {
- return new DeltaUpdatesBuilder(barrageUpdate, isViewport, columnTypes);
- }
-
- public static class DeltaUpdatesBuilder {
- private final DeltaUpdates deltaUpdates = new DeltaUpdates();
- private final BarrageUpdateMetadata barrageUpdate;
- private final String[] columnTypes;
- private long numAddRowsRemaining = 0;
- private long numModRowsRemaining = 0;
-
- public DeltaUpdatesBuilder(BarrageUpdateMetadata barrageUpdate, boolean isViewport, String[] columnTypes) {
- this.barrageUpdate = barrageUpdate;
- this.columnTypes = columnTypes;
-
- deltaUpdates.setAdded(new CompressedRangeSetReader()
- .read(typedArrayToLittleEndianByteBuffer(barrageUpdate.addedRowsArray())));
- deltaUpdates.setRemoved(new CompressedRangeSetReader()
- .read(typedArrayToLittleEndianByteBuffer(barrageUpdate.removedRowsArray())));
-
- deltaUpdates.setShiftedRanges(
- new ShiftedRangeReader().read(typedArrayToLittleEndianByteBuffer(barrageUpdate.shiftDataArray())));
-
- RangeSet includedAdditions;
-
- Int8Array addedRowsIncluded = barrageUpdate.addedRowsIncludedArray();
- if (isViewport && addedRowsIncluded != null) {
- includedAdditions = new CompressedRangeSetReader()
- .read(typedArrayToLittleEndianByteBuffer(addedRowsIncluded));
- } else {
- // if this isn't a viewport, then a second index isn't sent, because all rows are included
- includedAdditions = deltaUpdates.getAdded();
+ s.addRangeSet(rangeSet);
}
- numAddRowsRemaining = includedAdditions.size();
- deltaUpdates.setIncludedAdditions(includedAdditions);
- deltaUpdates.setSerializedAdditions(new DeltaUpdates.ColumnAdditions[0]);
- deltaUpdates.setSerializedModifications(new DeltaUpdates.ColumnModifications[0]);
-
- for (int columnIndex = 0; columnIndex < columnTypes.length; ++columnIndex) {
- BarrageModColumnMetadata columnMetadata = barrageUpdate.modColumnNodes(columnIndex);
- RangeSet modifiedRows = new CompressedRangeSetReader()
- .read(typedArrayToLittleEndianByteBuffer(columnMetadata.modifiedRowsArray()));
- numModRowsRemaining = Math.max(numModRowsRemaining, modifiedRows.size());
- }
- }
-
- /**
- * Appends a new record batch and payload. Returns true if this was the final record batch that was expected.
- */
- public boolean appendRecordBatch(RecordBatch recordBatch, ByteBuffer body) {
- if (numAddRowsRemaining > 0) {
- handleAddBatch(recordBatch, body);
- } else if (numModRowsRemaining > 0) {
- handleModBatch(recordBatch, body);
- }
- // return true when complete
- return numAddRowsRemaining == 0 && numModRowsRemaining == 0;
}
- private void handleAddBatch(RecordBatch recordBatch, ByteBuffer body) {
- Iter nodes = new Iter<>(
- IntStream.range(0, (int) recordBatch.nodesLength()).mapToObj(recordBatch::nodes).iterator());
- Iter buffers = new Iter<>(
- IntStream.range(0, (int) recordBatch.buffersLength()).mapToObj(recordBatch::buffers).iterator());
-
- DeltaUpdates.ColumnAdditions[] addedColumnData = new DeltaUpdates.ColumnAdditions[columnTypes.length];
- for (int columnIndex = 0; columnIndex < columnTypes.length; ++columnIndex) {
- assert nodes.hasNext() && buffers.hasNext();
- ColumnData columnData = readArrowBuffer(body, nodes, buffers, (int) nodes.peek().length().toFloat64(),
- columnTypes[columnIndex]);
-
- addedColumnData[columnIndex] = new DeltaUpdates.ColumnAdditions(columnIndex, columnData);
- }
- deltaUpdates.setSerializedAdditions(addedColumnData);
- numAddRowsRemaining -= (long) recordBatch.length().toFloat64();
- }
-
- private void handleModBatch(RecordBatch recordBatch, ByteBuffer body) {
- Iter nodes = new Iter<>(
- IntStream.range(0, (int) recordBatch.nodesLength()).mapToObj(recordBatch::nodes).iterator());
- Iter buffers = new Iter<>(
- IntStream.range(0, (int) recordBatch.buffersLength()).mapToObj(recordBatch::buffers).iterator());
-
- DeltaUpdates.ColumnModifications[] modifiedColumnData =
- new DeltaUpdates.ColumnModifications[columnTypes.length];
- for (int columnIndex = 0; columnIndex < columnTypes.length; ++columnIndex) {
- assert nodes.hasNext() && buffers.hasNext();
-
- BarrageModColumnMetadata columnMetadata = barrageUpdate.modColumnNodes(columnIndex);
- RangeSet modifiedRows = new CompressedRangeSetReader()
- .read(typedArrayToLittleEndianByteBuffer(columnMetadata.modifiedRowsArray()));
-
- ColumnData columnData = readArrowBuffer(body, nodes, buffers, (int) nodes.peek().length().toFloat64(),
- columnTypes[columnIndex]);
- modifiedColumnData[columnIndex] =
- new DeltaUpdates.ColumnModifications(columnIndex, modifiedRows, columnData);
- }
- deltaUpdates.setSerializedModifications(modifiedColumnData);
- numModRowsRemaining -= (long) recordBatch.length().toFloat64();
- }
-
- public DeltaUpdates build() {
- return deltaUpdates;
- }
+ return CompressedRangeSetReader.writeRange(s);
}
-
- private static ColumnData readArrowBuffer(ByteBuffer data, Iter nodes, Iter buffers, int size,
- String columnType) {
- // explicit cast to be clear that we're rounding down
- BitSet valid = readValidityBufferAsBitset(data, size, buffers.next());
- FieldNode thisNode = nodes.next();
- boolean hasNulls = thisNode.nullCount().toFloat64() != 0;
- size = Math.min(size, (int) thisNode.length().toFloat64());
-
- Buffer positions = buffers.next();
- switch (columnType) {
- // for simple well-supported typedarray types, wrap and return
- case "int":
- assert positions.length().toFloat64() >= size * 4;
- Int32Array intArray = new Int32Array(TypedArrayHelper.unwrap(data).buffer,
- (int) positions.offset().toFloat64(), size);
- return new IntArrayColumnData(Js.uncheckedCast(intArray));
- case "short":
- assert positions.length().toFloat64() >= size * 2;
- Int16Array shortArray = new Int16Array(TypedArrayHelper.unwrap(data).buffer,
- (int) positions.offset().toFloat64(), size);
- return new ShortArrayColumnData(Js.uncheckedCast(shortArray));
- case "boolean":
- case "java.lang.Boolean":
- // noinspection IntegerDivisionInFloatingPointContext
- assert positions.length().toFloat64() >= ((size + 63) / 64);
- // booleans are stored as a bitset, but internally we represent booleans as bytes
- data.position((int) positions.offset().toFloat64());
- BitSet wireValues = readBitSetWithLength(data, (int) (positions.length().toFloat64()));
- Boolean[] boolArray = new Boolean[size];
- for (int i = 0; i < size; ++i) {
- if (!hasNulls || valid.get(i)) {
- boolArray[i] = wireValues.get(i);
- } else {
- boolArray[i] = null;
- }
- }
- return new BooleanArrayColumnData(boolArray);
- case "byte":
- assert positions.length().toFloat64() >= size;
- Int8Array byteArray =
- new Int8Array(TypedArrayHelper.unwrap(data).buffer, (int) positions.offset().toFloat64(), size);
- return new ByteArrayColumnData(Js.uncheckedCast(byteArray));
- case "double":
- assert positions.length().toFloat64() >= size * 8;
- Float64Array doubleArray = new Float64Array(TypedArrayHelper.unwrap(data).buffer,
- (int) positions.offset().toFloat64(), size);
- return new DoubleArrayColumnData(Js.uncheckedCast(doubleArray));
- case "float":
- assert positions.length().toFloat64() >= size * 4;
- Float32Array floatArray = new Float32Array(TypedArrayHelper.unwrap(data).buffer,
- (int) positions.offset().toFloat64(), size);
- return new FloatArrayColumnData(Js.uncheckedCast(floatArray));
- case "char":
- assert positions.length().toFloat64() >= size * 2;
- Uint16Array charArray = new Uint16Array(TypedArrayHelper.unwrap(data).buffer,
- (int) positions.offset().toFloat64(), size);
- return new CharArrayColumnData(Js.uncheckedCast(charArray));
- // longs are a special case despite being java primitives
- case "long":
- case "java.time.Instant":
- case "java.time.ZonedDateTime":
- assert positions.length().toFloat64() >= size * 8;
- long[] longArray = new long[size];
-
- data.position((int) positions.offset().toFloat64());
- for (int i = 0; i < size; i++) {
- longArray[i] = data.getLong();
- }
- return new LongArrayColumnData(longArray);
- // all other types are read out in some custom way
- case "java.time.LocalTime":// LocalDateArrayColumnData
- assert positions.length().toFloat64() >= size * 6;
- data.position((int) positions.offset().toFloat64());
- LocalDate[] localDateArray = new LocalDate[size];
- for (int i = 0; i < size; i++) {
- int year = data.getInt();
- byte month = data.get();
- byte day = data.get();
- localDateArray[i] = new LocalDate(year, month, day);
- }
- return new LocalDateArrayColumnData(localDateArray);
- case "java.time.LocalDate":// LocalTimeArrayColumnData
- assert positions.length().toFloat64() == size * 7;
- LocalTime[] localTimeArray = new LocalTime[size];
-
- data.position((int) positions.offset().toFloat64());
- for (int i = 0; i < size; i++) {
- int nano = data.getInt();
- byte hour = data.get();
- byte minute = data.get();
- byte second = data.get();
- data.position(data.position() + 1);// aligned for next read
- localTimeArray[i] = new LocalTime(hour, minute, second, nano);
- }
- return new LocalTimeArrayColumnData(localTimeArray);
- default:
- // remaining types have an offset buffer to read first
- IntBuffer offsets = readOffsets(data, size, positions);
-
- if (columnType.endsWith("[]")) {
- FieldNode arrayNode = nodes.next();
- int innerSize = (int) arrayNode.length().toFloat64();
- boolean innerHasNulls = arrayNode.nullCount().toFloat64() != 0;
-
- // array type, also read the inner valid buffer and inner offset buffer
- BitSet innerValid = readValidityBufferAsBitset(data, innerSize, buffers.next());
- IntBuffer innerOffsets = readOffsets(data, innerSize, buffers.next());
-
- Buffer payload = buffers.next();
-
- switch (columnType) {
- case "java.lang.String[]":
- String[][] strArrArr = new String[size][];
-
- for (int i = 0; i < size; i++) {
- if (hasNulls && !valid.get(i)) {
- strArrArr[i] = null;
- continue;
- }
- int arrayStart = offsets.get(i);
- int instanceSize = offsets.get(i + 1) - arrayStart;
- String[] strArr = new String[instanceSize];
- for (int j = 0; j < instanceSize; j++) {
- int inner = j + arrayStart;
- assert innerOffsets != null;
- if (innerHasNulls && !innerValid.get(inner)) {
- assert innerOffsets.get(inner) == innerOffsets.get(inner + 1)
- : innerOffsets.get(inner) + " == " + innerOffsets.get(inner + 1);
- strArr[j] = null;
- continue;
- }
- // might be cheaper to do views on the underlying bb (which will be copied anyway
- // into the String)
- data.position((int) (payload.offset().toFloat64()) + innerOffsets.get(inner));
- int stringSize = innerOffsets.get(inner + 1) - innerOffsets.get(inner);
- byte[] stringBytes = new byte[stringSize];
- data.get(stringBytes);
- strArr[j] = new String(stringBytes, StandardCharsets.UTF_8);
- }
- strArrArr[i] = strArr;
- }
-
- return new StringArrayArrayColumnData(strArrArr);
- default:
- throw new IllegalStateException("Can't decode column of type " + columnType);
- }
-
- } else {
- // non-array, variable length stuff, just grab the buffer and read ranges specified by offsets
- Buffer payload = buffers.next();
-
- switch (columnType) {
- case "java.lang.String": {
- String[] stringArray = new String[size];
- byte[] buf = new byte[32];
- for (int i = 0; i < size; i++) {
- if (hasNulls && !valid.get(i)) {
- stringArray[i] = null;
- continue;
- }
- int ioff = offsets.get(i);
- int len = offsets.get(i + 1) - ioff;
- data.position((int) (payload.offset().toFloat64()) + ioff);
- if (buf.length < len) {
- buf = new byte[len];
- }
- data.get(buf, 0, len);
- stringArray[i] = new String(buf, 0, len, StandardCharsets.UTF_8);// new
- // String(Js.uncheckedCast(stringBytes));
- }
- return new StringArrayColumnData(stringArray);
- }
- case "java.math.BigDecimal": {
- BigDecimal[] bigDecArray = new BigDecimal[size];
- byte[] buf = null;
- for (int i = 0; i < size; i++) {
- if (hasNulls && !valid.get(i)) {
- bigDecArray[i] = null;
- continue;
- }
- int ioff = offsets.get(i);
- int len = offsets.get(i + 1) - ioff;
- data.position((int) (payload.offset().toFloat64()) + ioff);
- int scale = data.getInt();
- len -= 4;
- if (buf == null || buf.length != len) {
- buf = new byte[len];
- }
- bigDecArray[i] = new BigDecimal(readBigInt(data, buf), scale);
- }
- return new BigDecimalArrayColumnData(bigDecArray);
- }
- case "java.math.BigInteger": {
- BigInteger[] bigIntArray = new BigInteger[size];
- byte[] buf = null;
- for (int i = 0; i < size; i++) {
- if (hasNulls && !valid.get(i)) {
- bigIntArray[i] = null;
- continue;
- }
- int ioff = offsets.get(i);
- int len = offsets.get(i + 1) - ioff;
- if (buf == null || buf.length != len) {
- buf = new byte[len];
- }
- data.position((int) (payload.offset().toFloat64()) + ioff);
- bigIntArray[i] = readBigInt(data, buf);
- }
- return new BigIntegerArrayColumnData(bigIntArray);
- }
- default:
- throw new IllegalStateException("Can't decode column of type " + columnType);
- }
- }
- }
- }
-
- private static BigInteger readBigInt(ByteBuffer data, byte[] buf) {
- // TODO: Change to the code below when the Java 9 BigInteger(byte[], int, int) constructor is available.
- // https://github.com/deephaven/deephaven-core/issues/1626
- // Make the call take an additional len parameter, and make the calling logic reallocate only when
- // there is a need to grow, instead of the current need for an exact match.
- //
- // data.get(buf, 0, len);
- // return new BigInteger(buf, 0, len);
- data.get(buf);
- return new BigInteger(buf);
- }
-
- private static BitSet readValidityBufferAsBitset(ByteBuffer data, int size, Buffer buffer) {
- if (size == 0 || buffer.length().toFloat64() == 0) {
- // these buffers are optional (and empty) if the column is empty, or if it has primitives and we've allowed
- // DH nulls
- return new BitSet(0);
- }
- data.position((int) buffer.offset().toFloat64());
- BitSet valid = readBitSetWithLength(data, (int) (buffer.length().toFloat64()));
- return valid;
- }
-
- private static BitSet readBitSetWithLength(ByteBuffer data, int lenInBytes) {
- byte[] array = new byte[lenInBytes];
- data.get(array);
-
- return BitSet.valueOf(array);
- }
-
- private static IntBuffer readOffsets(ByteBuffer data, int size, Buffer buffer) {
- if (size == 0) {
- IntBuffer emptyOffsets = IntBuffer.allocate(1);
- return emptyOffsets;
- }
- data.position((int) buffer.offset().toFloat64());
- IntBuffer offsets = data.slice().asIntBuffer();
- offsets.limit(size + 1);
- return offsets;
- }
-
}
diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/WebChunkReaderFactory.java b/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/WebChunkReaderFactory.java
new file mode 100644
index 00000000000..d183aa491d3
--- /dev/null
+++ b/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/WebChunkReaderFactory.java
@@ -0,0 +1,327 @@
+//
+// Copyright (c) 2016-2024 Deephaven Data Labs and Patent Pending
+//
+package io.deephaven.web.client.api.barrage;
+
+import io.deephaven.base.verify.Assert;
+import io.deephaven.chunk.WritableByteChunk;
+import io.deephaven.chunk.WritableChunk;
+import io.deephaven.chunk.WritableIntChunk;
+import io.deephaven.chunk.WritableLongChunk;
+import io.deephaven.chunk.WritableObjectChunk;
+import io.deephaven.chunk.attributes.Values;
+import io.deephaven.extensions.barrage.chunk.BooleanChunkReader;
+import io.deephaven.extensions.barrage.chunk.ByteChunkReader;
+import io.deephaven.extensions.barrage.chunk.CharChunkReader;
+import io.deephaven.extensions.barrage.chunk.ChunkInputStreamGenerator;
+import io.deephaven.extensions.barrage.chunk.ChunkReader;
+import io.deephaven.extensions.barrage.chunk.DoubleChunkReader;
+import io.deephaven.extensions.barrage.chunk.FloatChunkReader;
+import io.deephaven.extensions.barrage.chunk.IntChunkReader;
+import io.deephaven.extensions.barrage.chunk.LongChunkReader;
+import io.deephaven.extensions.barrage.chunk.ShortChunkReader;
+import io.deephaven.extensions.barrage.chunk.VarListChunkReader;
+import io.deephaven.extensions.barrage.util.StreamReaderOptions;
+import io.deephaven.util.BooleanUtils;
+import io.deephaven.util.datastructures.LongSizedDataStructure;
+import io.deephaven.web.client.api.BigDecimalWrapper;
+import io.deephaven.web.client.api.BigIntegerWrapper;
+import io.deephaven.web.client.api.DateWrapper;
+import io.deephaven.web.client.api.LongWrapper;
+import org.apache.arrow.flatbuf.Date;
+import org.apache.arrow.flatbuf.DateUnit;
+import org.apache.arrow.flatbuf.FloatingPoint;
+import org.apache.arrow.flatbuf.Int;
+import org.apache.arrow.flatbuf.Precision;
+import org.apache.arrow.flatbuf.Time;
+import org.apache.arrow.flatbuf.TimeUnit;
+import org.apache.arrow.flatbuf.Timestamp;
+import org.apache.arrow.flatbuf.Type;
+
+import java.io.DataInput;
+import java.io.IOException;
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.nio.charset.StandardCharsets;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.PrimitiveIterator;
+
+/**
+ * Browser-compatible implementation of the ChunkReaderFactory, with a focus on reading from arrow types rather than
+ * successfully round-tripping to the Java server.
+ *
+ * Includes some specific workarounds to handle nullability that will make more sense for the browser.
+ */
+public class WebChunkReaderFactory implements ChunkReader.Factory {
+ @Override
+ public ChunkReader getReader(StreamReaderOptions options, int factor, ChunkReader.TypeInfo typeInfo) {
+ switch (typeInfo.arrowField().typeType()) {
+ case Type.Int: {
+ Int t = new Int();
+ typeInfo.arrowField().type(t);
+ switch (t.bitWidth()) {
+ case 8: {
+ return new ByteChunkReader(options);
+ }
+ case 16: {
+ if (t.isSigned()) {
+ return new ShortChunkReader(options);
+ }
+ return new CharChunkReader(options);
+ }
+ case 32: {
+ return new IntChunkReader(options);
+ }
+ case 64: {
+ if (t.isSigned()) {
+ return new LongChunkReader(options).transform(LongWrapper::of);
+ }
+ throw new IllegalArgumentException("Unsigned 64bit integers not supported");
+ }
+ default:
+ throw new IllegalArgumentException("Unsupported Int bitwidth: " + t.bitWidth());
+ }
+ }
+ case Type.FloatingPoint: {
+ FloatingPoint t = new FloatingPoint();
+ typeInfo.arrowField().type(t);
+ switch (t.precision()) {
+ case Precision.SINGLE: {
+ return new FloatChunkReader(options);
+ }
+ case Precision.DOUBLE: {
+ return new DoubleChunkReader(options);
+ }
+ default:
+ throw new IllegalArgumentException(
+ "Unsupported FloatingPoint precision " + Precision.name(t.precision()));
+ }
+ }
+ case Type.Binary: {
+ if (typeInfo.type() == BigIntegerWrapper.class) {
+ return (fieldNodeIter, bufferInfoIter, is, outChunk, outOffset,
+ totalRows) -> extractChunkFromInputStream(
+ is,
+ fieldNodeIter,
+ bufferInfoIter,
+ (val, off, len) -> new BigIntegerWrapper(new BigInteger(val, off, len)),
+ outChunk, outOffset, totalRows);
+ }
+ if (typeInfo.type() == BigDecimalWrapper.class) {
+ return (fieldNodeIter, bufferInfoIter, is, outChunk, outOffset,
+ totalRows) -> extractChunkFromInputStream(
+ is,
+ fieldNodeIter,
+ bufferInfoIter,
+ (final byte[] buf, final int offset, final int length) -> {
+ // read the int scale value as little endian, arrow's endianness.
+ final byte b1 = buf[offset];
+ final byte b2 = buf[offset + 1];
+ final byte b3 = buf[offset + 2];
+ final byte b4 = buf[offset + 3];
+ final int scale = b4 << 24 | (b3 & 0xFF) << 16 | (b2 & 0xFF) << 8 | (b1 & 0xFF);
+ BigDecimal bigDecimal =
+ new BigDecimal(new BigInteger(buf, offset + 4, length - 4), scale);
+ return new BigDecimalWrapper(bigDecimal);
+ },
+ outChunk, outOffset, totalRows);
+ }
+ throw new IllegalArgumentException("Unsupported Binary type " + typeInfo.type());
+ }
+ case Type.Utf8: {
+ return (fieldNodeIter, bufferInfoIter, is, outChunk, outOffset,
+ totalRows) -> extractChunkFromInputStream(is, fieldNodeIter,
+ bufferInfoIter, (buf, off, len) -> new String(buf, off, len, StandardCharsets.UTF_8),
+ outChunk, outOffset, totalRows);
+ }
+ case Type.Bool: {
+ BooleanChunkReader subReader = new BooleanChunkReader();
+ return (fieldNodeIter, bufferInfoIter, is, outChunk, outOffset, totalRows) -> {
+ try (final WritableByteChunk inner = (WritableByteChunk) subReader.readChunk(
+ fieldNodeIter, bufferInfoIter, is, null, 0, 0)) {
+
+ final WritableObjectChunk chunk;
+ if (outChunk != null) {
+ chunk = outChunk.asWritableObjectChunk();
+ } else {
+ int numRows = Math.max(totalRows, inner.size());
+ chunk = WritableObjectChunk.makeWritableChunk(numRows);
+ chunk.setSize(numRows);
+ }
+
+ if (outChunk == null) {
+ // if we're not given an output chunk then we better be writing at the front of the new one
+ Assert.eqZero(outOffset, "outOffset");
+ }
+
+ for (int ii = 0; ii < inner.size(); ++ii) {
+ byte value = inner.get(ii);
+ chunk.set(outOffset + ii, BooleanUtils.byteAsBoolean(value));
+ }
+
+ return chunk;
+ }
+
+ };
+ }
+ case Type.Date: {
+ Date t = new Date();
+ typeInfo.arrowField().type(t);
+ switch (t.unit()) {
+ case DateUnit.MILLISECOND:
+ return new LongChunkReader(options).transform(millis -> DateWrapper.of(millis * 1000 * 1000));
+ default:
+ throw new IllegalArgumentException("Unsupported Date unit: " + DateUnit.name(t.unit()));
+ }
+ }
+ case Type.Time: {
+ Time t = new Time();
+ typeInfo.arrowField().type(t);
+ switch (t.bitWidth()) {
+ case TimeUnit.NANOSECOND: {
+ return new LongChunkReader(options).transform(DateWrapper::of);
+ }
+ default:
+ throw new IllegalArgumentException("Unsupported Time unit: " + TimeUnit.name(t.unit()));
+ }
+ }
+ case Type.Timestamp: {
+ Timestamp t = new Timestamp();
+ typeInfo.arrowField().type(t);
+ switch (t.unit()) {
+ case TimeUnit.NANOSECOND: {
+ if (!t.timezone().equals("UTC")) {
+ throw new IllegalArgumentException("Unsupported tz " + t.timezone());
+ }
+ return new LongChunkReader(options).transform(DateWrapper::of);
+ }
+ default:
+ throw new IllegalArgumentException("Unsupported Timestamp unit: " + TimeUnit.name(t.unit()));
+ }
+ }
+ case Type.List: {
+ if (typeInfo.componentType() == byte.class) {
+ return (fieldNodeIter, bufferInfoIter, is, outChunk, outOffset,
+ totalRows) -> extractChunkFromInputStream(
+ is,
+ fieldNodeIter,
+ bufferInfoIter,
+ (buf, off, len) -> Arrays.copyOfRange(buf, off, off + len),
+ outChunk, outOffset, totalRows);
+ }
+ return new VarListChunkReader<>(options, typeInfo, this);
+ }
+ default:
+ throw new IllegalArgumentException("Unsupported type: " + Type.name(typeInfo.arrowField().typeType()));
+ }
+ }
+
+ public interface Mapper {
+ T constructFrom(byte[] buf, int offset, int length) throws IOException;
+ }
+
+ public static WritableObjectChunk extractChunkFromInputStream(
+ final DataInput is,
+ final Iterator fieldNodeIter,
+ final PrimitiveIterator.OfLong bufferInfoIter,
+ final Mapper mapper,
+ final WritableChunk outChunk,
+ final int outOffset,
+ final int totalRows) throws IOException {
+ final ChunkInputStreamGenerator.FieldNodeInfo nodeInfo = fieldNodeIter.next();
+ final long validityBuffer = bufferInfoIter.nextLong();
+ final long offsetsBuffer = bufferInfoIter.nextLong();
+ final long payloadBuffer = bufferInfoIter.nextLong();
+
+ final int numElements = nodeInfo.numElements;
+ final WritableObjectChunk chunk;
+ if (outChunk != null) {
+ chunk = outChunk.asWritableObjectChunk();
+ } else {
+ final int numRows = Math.max(totalRows, numElements);
+ chunk = WritableObjectChunk.makeWritableChunk(numRows);
+ chunk.setSize(numRows);
+ }
+
+ if (numElements == 0) {
+ return chunk;
+ }
+
+ final int numValidityWords = (numElements + 63) / 64;
+ try (final WritableLongChunk isValid = WritableLongChunk.makeWritableChunk(numValidityWords);
+ final WritableIntChunk offsets = WritableIntChunk.makeWritableChunk(numElements + 1)) {
+ // Read validity buffer:
+ int jj = 0;
+ for (; jj < Math.min(numValidityWords, validityBuffer / 8); ++jj) {
+ isValid.set(jj, is.readLong());
+ }
+ final long valBufRead = jj * 8L;
+ if (valBufRead < validityBuffer) {
+ is.skipBytes(LongSizedDataStructure.intSize("VBCISG", validityBuffer - valBufRead));
+ }
+ // we support short validity buffers
+ for (; jj < numValidityWords; ++jj) {
+ isValid.set(jj, -1); // -1 is bit-wise representation of all ones
+ }
+
+ // Read offsets:
+ final long offBufRead = (numElements + 1L) * Integer.BYTES;
+ if (offsetsBuffer < offBufRead) {
+ throw new IllegalStateException("offset buffer is too short for the expected number of elements");
+ }
+ for (int i = 0; i < numElements + 1; ++i) {
+ offsets.set(i, is.readInt());
+ }
+ if (offBufRead < offsetsBuffer) {
+ is.skipBytes(LongSizedDataStructure.intSize("VBCISG", offsetsBuffer - offBufRead));
+ }
+
+ // Read data:
+ final int bytesRead = LongSizedDataStructure.intSize("VBCISG", payloadBuffer);
+ final byte[] serializedData = new byte[bytesRead];
+ is.readFully(serializedData);
+
+ // Deserialize:
+ int ei = 0;
+ int pendingSkips = 0;
+
+ for (int vi = 0; vi < numValidityWords; ++vi) {
+ int bitsLeftInThisWord = Math.min(64, numElements - vi * 64);
+ long validityWord = isValid.get(vi);
+ do {
+ if ((validityWord & 1) == 1) {
+ if (pendingSkips > 0) {
+ chunk.fillWithNullValue(outOffset + ei, pendingSkips);
+ ei += pendingSkips;
+ pendingSkips = 0;
+ }
+ final int offset = offsets.get(ei);
+ final int length = offsets.get(ei + 1) - offset;
+ Assert.geq(length, "length", 0);
+ if (offset + length > serializedData.length) {
+ throw new IllegalStateException("not enough data was serialized to parse this element: " +
+ "elementIndex=" + ei + " offset=" + offset + " length=" + length +
+ " serializedLen=" + serializedData.length);
+ }
+ chunk.set(outOffset + ei++, mapper.constructFrom(serializedData, offset, length));
+ validityWord >>= 1;
+ bitsLeftInThisWord--;
+ } else {
+ final int skips = Math.min(Long.numberOfTrailingZeros(validityWord), bitsLeftInThisWord);
+ pendingSkips += skips;
+ validityWord >>= skips;
+ bitsLeftInThisWord -= skips;
+ }
+ } while (bitsLeftInThisWord > 0);
+ }
+
+ if (pendingSkips > 0) {
+ chunk.fillWithNullValue(outOffset + ei, pendingSkips);
+ }
+ }
+
+ return chunk;
+ }
+
+}
diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/data/WebBarrageSubscription.java b/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/data/WebBarrageSubscription.java
new file mode 100644
index 00000000000..a46d8ec7f90
--- /dev/null
+++ b/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/data/WebBarrageSubscription.java
@@ -0,0 +1,515 @@
+//
+// Copyright (c) 2016-2024 Deephaven Data Labs and Patent Pending
+//
+package io.deephaven.web.client.api.barrage.data;
+
+import io.deephaven.chunk.Chunk;
+import io.deephaven.chunk.ChunkType;
+import io.deephaven.chunk.attributes.Values;
+import io.deephaven.web.client.api.barrage.WebBarrageMessage;
+import io.deephaven.web.client.api.barrage.def.InitialTableDefinition;
+import io.deephaven.web.client.state.ClientTableState;
+import io.deephaven.web.shared.data.Range;
+import io.deephaven.web.shared.data.RangeSet;
+import io.deephaven.web.shared.data.ShiftedRange;
+import jsinterop.base.Any;
+
+import java.util.Arrays;
+import java.util.BitSet;
+import java.util.NavigableSet;
+import java.util.NoSuchElementException;
+import java.util.PrimitiveIterator;
+import java.util.TreeMap;
+
+/**
+ * In contrast to the server implementation, the JS API holds the "table" as distinct from the "subscription", so that
+ * developers are acutely aware of extra async costs in requesting data, and can clearly indicate how much data is
+ * requested. This class represents a barrage subscription for the JS API, and exposes access to the data presently
+ * available on the client.
+ *
+ * This is a rough analog to {@link io.deephaven.extensions.barrage.table.BarrageTable} and its subtypes, but isn't
+ * directly exposed to API consumers. Instead, the subscription types wrap this, and delegate their data storage and
+ * snapshot/delta handling here.
+ */
+public abstract class WebBarrageSubscription {
+
+ public static final boolean COLUMNS_AS_LIST = false;
+ public static final int MAX_MESSAGE_SIZE = 10_000_000;
+ public static final int BATCH_SIZE = 100_000;
+
+ public static WebBarrageSubscription subscribe(ClientTableState cts, ViewportChangedHandler viewportChangedHandler,
+ DataChangedHandler dataChangedHandler) {
+
+ WebColumnData[] dataSinks = new WebColumnData[cts.columnTypes().length];
+ ChunkType[] chunkTypes = cts.chunkTypes();
+ for (int i = 0; i < dataSinks.length; i++) {
+ switch (chunkTypes[i]) {
+ case Boolean:
+ throw new IllegalStateException("Boolean unsupported here");
+ case Char:
+ dataSinks[i] = new WebCharColumnData();
+ break;
+ case Byte:
+ dataSinks[i] = new WebByteColumnData();
+ break;
+ case Short:
+ dataSinks[i] = new WebShortColumnData();
+ break;
+ case Int:
+ dataSinks[i] = new WebIntColumnData();
+ break;
+ case Long:
+ dataSinks[i] = new WebLongColumnData();
+ break;
+ case Float:
+ dataSinks[i] = new WebFloatColumnData();
+ break;
+ case Double:
+ dataSinks[i] = new WebDoubleColumnData();
+ break;
+ case Object:
+ dataSinks[i] = new WebObjectColumnData();
+ break;
+ }
+ }
+
+ if (cts.getTableDef().getAttributes().isBlinkTable()) {
+ return new BlinkImpl(cts, viewportChangedHandler, dataChangedHandler, dataSinks);
+ }
+ return new RedirectedImpl(cts, viewportChangedHandler, dataChangedHandler, dataSinks);
+ }
+
+ public interface ViewportChangedHandler {
+ void onServerViewportChanged(RangeSet serverViewport, BitSet serverColumns, boolean serverReverseViewport);
+ }
+ public interface DataChangedHandler {
+ void onDataChanged(RangeSet rowsAdded, RangeSet rowsRemoved, RangeSet totalMods, ShiftedRange[] shifted,
+ BitSet modifiedColumnSet);
+ }
+
+ protected final ClientTableState state;
+ protected final ViewportChangedHandler viewportChangedHandler;
+ protected final DataChangedHandler dataChangedHandler;
+ protected final RangeSet currentRowSet = RangeSet.empty();
+
+ protected long capacity = 0;
+ protected WebColumnData[] destSources;
+
+ protected RangeSet serverViewport;
+ protected BitSet serverColumns;
+ protected boolean serverReverseViewport;
+
+ protected WebBarrageSubscription(ClientTableState state, ViewportChangedHandler viewportChangedHandler,
+ DataChangedHandler dataChangedHandler, WebColumnData[] dataSinks) {
+ this.state = state;
+ destSources = dataSinks;
+ this.viewportChangedHandler = viewportChangedHandler;
+ this.dataChangedHandler = dataChangedHandler;
+ }
+
+ public abstract void applyUpdates(WebBarrageMessage message);
+
+ protected void updateServerViewport(RangeSet viewport, BitSet columns, boolean reverseViewport) {
+ serverViewport = viewport;
+ serverColumns = columns == null || columns.cardinality() == numColumns() ? null : columns;
+ serverReverseViewport = reverseViewport;
+ }
+
+ protected int numColumns() {
+ return getDefinition().getColumns().length;
+ }
+
+ private InitialTableDefinition getDefinition() {
+ return state.getTableDef();
+ }
+
+ public RangeSet getCurrentRowSet() {
+ return currentRowSet;
+ }
+
+ public RangeSet getServerViewport() {
+ return serverViewport;
+ }
+
+ public boolean isReversed() {
+ return serverReverseViewport;
+ }
+
+ /**
+ * Reads a value from the table subscription.
+ *
+ * @param key the row to read in key-space
+ * @param col the index of the column to read
+ * @return the value read from the table
+ */
+ public abstract Any getData(long key, int col);
+
+ protected boolean isSubscribedColumn(int ii) {
+ return serverColumns == null || serverColumns.get(ii);
+ }
+
+ public static class BlinkImpl extends WebBarrageSubscription {
+ enum Mode {
+ BLINK, APPEND
+ }
+
+ private final Mode mode;
+
+ public BlinkImpl(ClientTableState state, ViewportChangedHandler viewportChangedHandler,
+ DataChangedHandler dataChangedHandler, WebColumnData[] dataSinks) {
+ super(state, viewportChangedHandler, dataChangedHandler, dataSinks);
+ mode = Mode.BLINK;
+ }
+
+ @Override
+ public void applyUpdates(WebBarrageMessage message) {
+ if (message.isSnapshot) {
+ updateServerViewport(message.snapshotRowSet, message.snapshotColumns, message.snapshotRowSetIsReversed);
+ viewportChangedHandler.onServerViewportChanged(serverViewport, serverColumns, serverReverseViewport);
+ }
+
+ assert message.shifted.length == 0;
+ for (int i = 0; i < message.modColumnData.length; i++) {
+ assert message.modColumnData[i].rowsModified.isEmpty();
+ }
+
+ if (message.rowsIncluded.isEmpty()) {
+ return;
+ }
+
+ long addedRows = message.rowsIncluded.size();
+ RangeSet destinationRowSet;
+ if (mode == Mode.APPEND) {
+ destinationRowSet = RangeSet.ofRange(capacity, capacity + addedRows - 1);
+ capacity += addedRows;
+ } else {
+ destinationRowSet = RangeSet.ofRange(0, addedRows - 1);
+ capacity = addedRows;
+ }
+ Arrays.stream(destSources).forEach(s -> s.ensureCapacity(capacity));
+ for (int ii = 0; ii < message.addColumnData.length; ii++) {
+ if (isSubscribedColumn(ii)) {
+ WebBarrageMessage.AddColumnData column = message.addColumnData[ii];
+ PrimitiveIterator.OfLong destIterator = destinationRowSet.indexIterator();
+ for (int j = 0; j < column.data.size(); j++) {
+ Chunk chunk = column.data.get(j);
+ destSources[ii].fillChunk(chunk, destIterator);
+ }
+ assert !destIterator.hasNext();
+ }
+ }
+
+ currentRowSet.clear();
+
+ currentRowSet.addRangeSet(message.rowsAdded);
+ state.setSize(message.rowsAdded.size());
+ dataChangedHandler.onDataChanged(message.rowsAdded, message.rowsRemoved, RangeSet.empty(), message.shifted,
+ new BitSet(0));
+ }
+
+ @Override
+ public Any getData(long key, int col) {
+ if (!isSubscribedColumn(col)) {
+ throw new NoSuchElementException("No column at index " + col);
+ }
+ return destSources[col].get(key);
+ }
+ }
+
+ public static class RedirectedImpl extends WebBarrageSubscription {
+ private RangeSet freeset = new RangeSet();
+ private final TreeMap redirectedIndexes = new TreeMap<>();
+
+ public RedirectedImpl(ClientTableState state, ViewportChangedHandler viewportChangedHandler,
+ DataChangedHandler dataChangedHandler, WebColumnData[] dataSinks) {
+ super(state, viewportChangedHandler, dataChangedHandler, dataSinks);
+ }
+
+ @Override
+ public void applyUpdates(WebBarrageMessage message) {
+ RangeSet populatedRows = serverViewport != null
+ ? currentRowSet.subsetForPositions(serverViewport, serverReverseViewport)
+ : null;
+
+ if (message.isSnapshot) {
+ updateServerViewport(message.snapshotRowSet, message.snapshotColumns, message.snapshotRowSetIsReversed);
+ viewportChangedHandler.onServerViewportChanged(serverViewport, serverColumns, serverReverseViewport);
+ }
+
+ final boolean mightBeInitialSnapshot = getCurrentRowSet().isEmpty() && message.isSnapshot;
+
+ // Apply removes to our local rowset
+ currentRowSet.removeRangeSet(message.rowsRemoved);
+
+ RangeSet removed = message.rowsRemoved;
+ if (populatedRows != null) {
+ // limit the removed rows to what intersect the viewport
+ removed = populatedRows.extract(message.rowsRemoved);
+ }
+ // free rows that are no longer needed
+ freeRows(removed);
+
+ // Apply shifts
+
+ // Shift moved rows in the redir index
+ boolean hasReverseShift = false;
+ final ShiftedRange[] shiftedRanges = message.shifted;
+ currentRowSet.applyShifts(shiftedRanges);
+ RangeSetBulkHelper populatedRowsetShifter = populatedRows == null ? null
+ : new RangeSetBulkHelper(populatedRows, RangeSetBulkHelper.Operation.APPEND);
+ for (int i = shiftedRanges.length - 1; i >= 0; --i) {
+ final ShiftedRange shiftedRange = shiftedRanges[i];
+ final long offset = shiftedRange.getDelta();
+ if (offset < 0) {
+ hasReverseShift = true;
+ continue;
+ }
+
+ // test if shift is in populatedRows before continuing
+ if (populatedRows != null) {
+ if (!populatedRows.includesAnyOf(shiftedRange.getRange())) {
+ // no rows were included, we can skip updating populatedRows and redirectedIndexes
+ continue;
+ }
+ populatedRows.removeRange(shiftedRange.getRange());
+ }
+ final NavigableSet toMove = redirectedIndexes.navigableKeySet()
+ .subSet(shiftedRange.getRange().getFirst(), true, shiftedRange.getRange().getLast(), true);
+ // iterate backward and move them forward
+ for (Long key : toMove.descendingSet()) {
+ long shiftedKey = key + offset;
+ Long oldValue = redirectedIndexes.put(shiftedKey, redirectedIndexes.remove(key));
+ assert oldValue == null : shiftedKey + " already has a value, " + oldValue;
+ if (populatedRowsetShifter != null) {
+ populatedRowsetShifter.append(shiftedKey);
+ }
+ }
+ }
+
+ if (hasReverseShift) {
+ for (int i = 0; i < shiftedRanges.length; ++i) {
+ final ShiftedRange shiftedRange = shiftedRanges[i];
+ final long offset = shiftedRange.getDelta();
+ if (offset > 0) {
+ continue;
+ }
+
+ if (populatedRows != null) {
+ if (!populatedRows.includesAnyOf(shiftedRange.getRange())) {
+ // no rows were included, we can skip updating populatedRows and redirectedIndexes
+ continue;
+ }
+ populatedRows.removeRange(shiftedRange.getRange());
+ }
+ final NavigableSet toMove = redirectedIndexes.navigableKeySet()
+ .subSet(shiftedRange.getRange().getFirst(), true, shiftedRange.getRange().getLast(), true);
+ // iterate forward and move them backward
+ for (Long key : toMove) {
+ long shiftedKey = key + offset;
+ Long oldValue = redirectedIndexes.put(shiftedKey, redirectedIndexes.remove(key));
+ assert oldValue == null : shiftedKey + " already has a value, " + oldValue;
+ if (populatedRowsetShifter != null) {
+ populatedRowsetShifter.append(shiftedKey);
+ }
+ }
+ }
+ }
+ if (populatedRowsetShifter != null) {
+ populatedRowsetShifter.flush();
+ }
+
+ currentRowSet.addRangeSet(message.rowsAdded);
+
+ RangeSet totalMods = new RangeSet();
+ for (int i = 0; i < message.modColumnData.length; i++) {
+ WebBarrageMessage.ModColumnData column = message.modColumnData[i];
+ totalMods.addRangeSet(column.rowsModified);
+ }
+
+ if (!message.rowsIncluded.isEmpty()) {
+ if (mightBeInitialSnapshot) {
+ capacity = message.rowsIncluded.size();
+ Arrays.stream(destSources).forEach(s -> s.ensureCapacity(capacity));
+ freeset.addRange(new Range(0, capacity - 1));
+ }
+
+ RangeSet destinationRowSet = getFreeRows(message.rowsIncluded.size());
+
+ for (int ii = 0; ii < message.addColumnData.length; ii++) {
+ if (isSubscribedColumn(ii)) {
+ WebBarrageMessage.AddColumnData column = message.addColumnData[ii];
+ PrimitiveIterator.OfLong destIterator = destinationRowSet.indexIterator();
+
+ for (int j = 0; j < column.data.size(); j++) {
+ Chunk chunk = column.data.get(j);
+ destSources[ii].fillChunk(chunk, destIterator);
+ }
+ assert !destIterator.hasNext();
+ }
+ }
+ // Add redirection mappings
+ PrimitiveIterator.OfLong srcIter = message.rowsIncluded.indexIterator();
+ PrimitiveIterator.OfLong destIter = destinationRowSet.indexIterator();
+ while (srcIter.hasNext()) {
+ assert destIter.hasNext();
+ redirectedIndexes.put(srcIter.next(), destIter.next());
+ }
+ assert !destIter.hasNext();
+ }
+
+ BitSet modifiedColumnSet = new BitSet(numColumns());
+ for (int ii = 0; ii < message.modColumnData.length; ii++) {
+ WebBarrageMessage.ModColumnData column = message.modColumnData[ii];
+ if (column.rowsModified.isEmpty()) {
+ continue;
+ }
+
+ modifiedColumnSet.set(ii);
+
+ PrimitiveIterator.OfLong destIterator = column.rowsModified.indexIterator();
+ for (int j = 0; j < column.data.size(); j++) {
+ Chunk chunk = column.data.get(j);
+ destSources[ii].fillChunk(chunk, destIterator);
+ }
+ assert !destIterator.hasNext();
+ }
+ if (serverViewport != null && populatedRows != null) {
+ RangeSet newPopulated = currentRowSet.subsetForPositions(serverViewport, serverReverseViewport);
+ populatedRows.removeRangeSet(newPopulated);
+ freeRows(populatedRows);
+ }
+
+ state.setSize(currentRowSet.size());
+ dataChangedHandler.onDataChanged(message.rowsAdded, removed, totalMods, message.shifted,
+ modifiedColumnSet);
+ }
+
+ @Override
+ public Any getData(long key, int col) {
+ if (!isSubscribedColumn(col)) {
+ throw new NoSuchElementException("No column at index " + col);
+ }
+ return this.destSources[col].get(redirectedIndexes.get(key));
+ }
+
+ private RangeSet getFreeRows(long size) {
+ if (size <= 0) {
+ return RangeSet.empty();
+ }
+ boolean needsResizing = false;
+ final RangeSet result;
+ if (capacity == 0) {
+ capacity = Long.highestOneBit(Math.max(size * 2, 8));
+ freeset.addRange(new Range(0, capacity - 1));
+ needsResizing = true;
+ } else {
+ if (freeset.size() < size) {
+ long usedSlots = capacity - freeset.size();
+ long prevCapacity = capacity;
+
+ do {
+ capacity *= 2;
+ } while ((capacity - usedSlots) < size);
+ freeset.addRange(new Range(prevCapacity, capacity - 1));
+ needsResizing = true;
+ }
+ }
+ if (needsResizing) {
+ Arrays.stream(destSources).forEach(s -> s.ensureCapacity(capacity));
+ }
+ result = freeset.subsetForPositions(RangeSet.ofRange(0, size - 1), false);
+ freeset.removeRange(new Range(0, result.getLastRow()));
+ assert result.size() == size : result.size() + " == " + size;
+ return result;
+ }
+
+ private void freeRows(RangeSet removed) {
+ RangeSetBulkHelper reusableHelper = new RangeSetBulkHelper(freeset, RangeSetBulkHelper.Operation.APPEND);
+ removed.indexIterator().forEachRemaining((long index) -> {
+ Long dest = redirectedIndexes.remove(index);
+ if (dest != null) {
+ reusableHelper.append(dest);
+ }
+ });
+ reusableHelper.flush();
+ }
+ }
+
+ /**
+ * Helper to avoid appending many times when modifying indexes. The append() method should be called for each key
+ * in order to ensure that addRange/removeRange isn't called excessively. When no more items will be added,
+ * flush() must be called.
+ */
+ private static class RangeSetBulkHelper {
+ enum Operation {
+ APPEND, REMOVE
+ }
+
+ private final RangeSet rangeSet;
+ private final Operation operation;
+
+ private long currentFirst = -1;
+ private long currentLast;
+
+ public RangeSetBulkHelper(final RangeSet rangeSet, Operation operation) {
+ this.rangeSet = rangeSet;
+ this.operation = operation;
+ }
+
+ public void append(long key) {
+ assert key >= 0;
+
+ if (currentFirst == -1) {
+ // first key to be added, move both first and last
+ currentFirst = key;
+ currentLast = key;
+ } else if (key == currentLast + 1) {
+ // key appends to our current range
+ currentLast = key;
+ } else if (key == currentFirst - 1) {
+ // key appends to our current range
+ currentFirst = key;
+ } else {
+ // existing range doesn't match the new item, finish the old range and start a new one
+ if (operation == Operation.APPEND) {
+ rangeSet.addRange(new Range(currentFirst, currentLast));
+ } else {
+ rangeSet.removeRange(new Range(currentFirst, currentLast));
+ }
+ currentFirst = key;
+ currentLast = key;
+ }
+ }
+
+ public void appendRange(Range range) {
+ if (currentFirst == -1) {
+ currentFirst = range.getFirst();
+ currentLast = range.getLast();
+ } else if (range.getFirst() == currentLast + 1) {
+ currentLast = range.getLast();
+ } else if (range.getLast() == currentFirst - 1) {
+ currentFirst = range.getFirst();
+ } else {
+ if (operation == Operation.APPEND) {
+ rangeSet.addRange(new Range(currentFirst, currentLast));
+ } else {
+ rangeSet.removeRange(new Range(currentFirst, currentLast));
+ }
+ currentFirst = range.getFirst();
+ currentLast = range.getLast();
+ }
+ }
+
+ public void flush() {
+ if (currentFirst != -1) {
+ if (operation == Operation.APPEND) {
+ rangeSet.addRange(new Range(currentFirst, currentLast));
+ } else {
+ rangeSet.removeRange(new Range(currentFirst, currentLast));
+ }
+ currentFirst = -1;
+ }
+ }
+ }
+
+}
diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/data/WebByteColumnData.java b/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/data/WebByteColumnData.java
new file mode 100644
index 00000000000..4f0593d05a9
--- /dev/null
+++ b/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/data/WebByteColumnData.java
@@ -0,0 +1,27 @@
+//
+// Copyright (c) 2016-2024 Deephaven Data Labs and Patent Pending
+//
+// ****** AUTO-GENERATED CLASS - DO NOT EDIT MANUALLY
+// ****** Edit WebCharColumnData and run "./gradlew replicateBarrageUtils" to regenerate
+//
+// @formatter:off
+package io.deephaven.web.client.api.barrage.data;
+
+import io.deephaven.chunk.ByteChunk;
+import io.deephaven.chunk.Chunk;
+import io.deephaven.util.QueryConstants;
+import jsinterop.base.Js;
+
+import java.util.PrimitiveIterator;
+
+public class WebByteColumnData extends WebColumnData {
+ @Override
+ public void fillChunk(Chunk> data, PrimitiveIterator.OfLong destIterator) {
+ ByteChunk> byteChunk = data.asByteChunk();
+ int i = 0;
+ while (destIterator.hasNext()) {
+ byte value = byteChunk.get(i++);
+ arr.setAt((int) destIterator.nextLong(), value == QueryConstants.NULL_BYTE ? null : Js.asAny(value));
+ }
+ }
+}
diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/data/WebCharColumnData.java b/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/data/WebCharColumnData.java
new file mode 100644
index 00000000000..dfe561d90df
--- /dev/null
+++ b/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/data/WebCharColumnData.java
@@ -0,0 +1,23 @@
+//
+// Copyright (c) 2016-2024 Deephaven Data Labs and Patent Pending
+//
+package io.deephaven.web.client.api.barrage.data;
+
+import io.deephaven.chunk.CharChunk;
+import io.deephaven.chunk.Chunk;
+import io.deephaven.util.QueryConstants;
+import jsinterop.base.Js;
+
+import java.util.PrimitiveIterator;
+
+public class WebCharColumnData extends WebColumnData {
+ @Override
+ public void fillChunk(Chunk> data, PrimitiveIterator.OfLong destIterator) {
+ CharChunk> charChunk = data.asCharChunk();
+ int i = 0;
+ while (destIterator.hasNext()) {
+ char value = charChunk.get(i++);
+ arr.setAt((int) destIterator.nextLong(), value == QueryConstants.NULL_CHAR ? null : Js.asAny(value));
+ }
+ }
+}
diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/data/WebColumnData.java b/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/data/WebColumnData.java
new file mode 100644
index 00000000000..3c8fee323b4
--- /dev/null
+++ b/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/data/WebColumnData.java
@@ -0,0 +1,27 @@
+//
+// Copyright (c) 2016-2024 Deephaven Data Labs and Patent Pending
+//
+package io.deephaven.web.client.api.barrage.data;
+
+import elemental2.core.JsArray;
+import io.deephaven.chunk.Chunk;
+import jsinterop.base.Any;
+
+import java.util.PrimitiveIterator;
+
+/**
+ * Holds data from or intended for web clients, normalizing over nulls, with helpers to handle typed chunks.
+ */
+public abstract class WebColumnData {
+ protected final JsArray arr = new JsArray<>();
+
+ public abstract void fillChunk(Chunk> data, PrimitiveIterator.OfLong destIterator);
+
+ public void ensureCapacity(long size) {
+ // Current impl does nothing, js arrays don't behave better when told the size up front
+ }
+
+ public Any get(long position) {
+ return arr.getAt((int) position);
+ }
+}
diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/data/WebDoubleColumnData.java b/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/data/WebDoubleColumnData.java
new file mode 100644
index 00000000000..d2c8e764ce7
--- /dev/null
+++ b/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/data/WebDoubleColumnData.java
@@ -0,0 +1,27 @@
+//
+// Copyright (c) 2016-2024 Deephaven Data Labs and Patent Pending
+//
+// ****** AUTO-GENERATED CLASS - DO NOT EDIT MANUALLY
+// ****** Edit WebCharColumnData and run "./gradlew replicateBarrageUtils" to regenerate
+//
+// @formatter:off
+package io.deephaven.web.client.api.barrage.data;
+
+import io.deephaven.chunk.DoubleChunk;
+import io.deephaven.chunk.Chunk;
+import io.deephaven.util.QueryConstants;
+import jsinterop.base.Js;
+
+import java.util.PrimitiveIterator;
+
+public class WebDoubleColumnData extends WebColumnData {
+ @Override
+ public void fillChunk(Chunk> data, PrimitiveIterator.OfLong destIterator) {
+ DoubleChunk> doubleChunk = data.asDoubleChunk();
+ int i = 0;
+ while (destIterator.hasNext()) {
+ double value = doubleChunk.get(i++);
+ arr.setAt((int) destIterator.nextLong(), value == QueryConstants.NULL_DOUBLE ? null : Js.asAny(value));
+ }
+ }
+}
diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/data/WebFloatColumnData.java b/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/data/WebFloatColumnData.java
new file mode 100644
index 00000000000..a624affbda6
--- /dev/null
+++ b/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/data/WebFloatColumnData.java
@@ -0,0 +1,27 @@
+//
+// Copyright (c) 2016-2024 Deephaven Data Labs and Patent Pending
+//
+// ****** AUTO-GENERATED CLASS - DO NOT EDIT MANUALLY
+// ****** Edit WebCharColumnData and run "./gradlew replicateBarrageUtils" to regenerate
+//
+// @formatter:off
+package io.deephaven.web.client.api.barrage.data;
+
+import io.deephaven.chunk.FloatChunk;
+import io.deephaven.chunk.Chunk;
+import io.deephaven.util.QueryConstants;
+import jsinterop.base.Js;
+
+import java.util.PrimitiveIterator;
+
+public class WebFloatColumnData extends WebColumnData {
+ @Override
+ public void fillChunk(Chunk> data, PrimitiveIterator.OfLong destIterator) {
+ FloatChunk> floatChunk = data.asFloatChunk();
+ int i = 0;
+ while (destIterator.hasNext()) {
+ float value = floatChunk.get(i++);
+ arr.setAt((int) destIterator.nextLong(), value == QueryConstants.NULL_FLOAT ? null : Js.asAny(value));
+ }
+ }
+}
diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/data/WebIntColumnData.java b/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/data/WebIntColumnData.java
new file mode 100644
index 00000000000..996cf43c6a8
--- /dev/null
+++ b/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/data/WebIntColumnData.java
@@ -0,0 +1,27 @@
+//
+// Copyright (c) 2016-2024 Deephaven Data Labs and Patent Pending
+//
+// ****** AUTO-GENERATED CLASS - DO NOT EDIT MANUALLY
+// ****** Edit WebCharColumnData and run "./gradlew replicateBarrageUtils" to regenerate
+//
+// @formatter:off
+package io.deephaven.web.client.api.barrage.data;
+
+import io.deephaven.chunk.IntChunk;
+import io.deephaven.chunk.Chunk;
+import io.deephaven.util.QueryConstants;
+import jsinterop.base.Js;
+
+import java.util.PrimitiveIterator;
+
+public class WebIntColumnData extends WebColumnData {
+ @Override
+ public void fillChunk(Chunk> data, PrimitiveIterator.OfLong destIterator) {
+ IntChunk> intChunk = data.asIntChunk();
+ int i = 0;
+ while (destIterator.hasNext()) {
+ int value = intChunk.get(i++);
+ arr.setAt((int) destIterator.nextLong(), value == QueryConstants.NULL_INT ? null : Js.asAny(value));
+ }
+ }
+}
diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/data/WebLongColumnData.java b/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/data/WebLongColumnData.java
new file mode 100644
index 00000000000..080c05e6034
--- /dev/null
+++ b/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/data/WebLongColumnData.java
@@ -0,0 +1,27 @@
+//
+// Copyright (c) 2016-2024 Deephaven Data Labs and Patent Pending
+//
+// ****** AUTO-GENERATED CLASS - DO NOT EDIT MANUALLY
+// ****** Edit WebCharColumnData and run "./gradlew replicateBarrageUtils" to regenerate
+//
+// @formatter:off
+package io.deephaven.web.client.api.barrage.data;
+
+import io.deephaven.chunk.LongChunk;
+import io.deephaven.chunk.Chunk;
+import io.deephaven.util.QueryConstants;
+import jsinterop.base.Js;
+
+import java.util.PrimitiveIterator;
+
+public class WebLongColumnData extends WebColumnData {
+ @Override
+ public void fillChunk(Chunk> data, PrimitiveIterator.OfLong destIterator) {
+ LongChunk> longChunk = data.asLongChunk();
+ int i = 0;
+ while (destIterator.hasNext()) {
+ long value = longChunk.get(i++);
+ arr.setAt((int) destIterator.nextLong(), value == QueryConstants.NULL_LONG ? null : Js.asAny(value));
+ }
+ }
+}
diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/data/WebObjectColumnData.java b/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/data/WebObjectColumnData.java
new file mode 100644
index 00000000000..251bca22e67
--- /dev/null
+++ b/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/data/WebObjectColumnData.java
@@ -0,0 +1,22 @@
+//
+// Copyright (c) 2016-2024 Deephaven Data Labs and Patent Pending
+//
+package io.deephaven.web.client.api.barrage.data;
+
+import io.deephaven.chunk.Chunk;
+import io.deephaven.chunk.ObjectChunk;
+import jsinterop.base.Js;
+
+import java.util.PrimitiveIterator;
+
+public class WebObjectColumnData extends WebColumnData {
+ @Override
+ public void fillChunk(Chunk> data, PrimitiveIterator.OfLong destIterator) {
+ ObjectChunk, ?> objectChunk = data.asObjectChunk();
+ int i = 0;
+ while (destIterator.hasNext()) {
+ Object value = objectChunk.get(i++);
+ arr.setAt((int) destIterator.nextLong(), Js.asAny(value));
+ }
+ }
+}
diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/data/WebShortColumnData.java b/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/data/WebShortColumnData.java
new file mode 100644
index 00000000000..328a0f654a4
--- /dev/null
+++ b/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/data/WebShortColumnData.java
@@ -0,0 +1,27 @@
+//
+// Copyright (c) 2016-2024 Deephaven Data Labs and Patent Pending
+//
+// ****** AUTO-GENERATED CLASS - DO NOT EDIT MANUALLY
+// ****** Edit WebCharColumnData and run "./gradlew replicateBarrageUtils" to regenerate
+//
+// @formatter:off
+package io.deephaven.web.client.api.barrage.data;
+
+import io.deephaven.chunk.ShortChunk;
+import io.deephaven.chunk.Chunk;
+import io.deephaven.util.QueryConstants;
+import jsinterop.base.Js;
+
+import java.util.PrimitiveIterator;
+
+public class WebShortColumnData extends WebColumnData {
+ @Override
+ public void fillChunk(Chunk> data, PrimitiveIterator.OfLong destIterator) {
+ ShortChunk> shortChunk = data.asShortChunk();
+ int i = 0;
+ while (destIterator.hasNext()) {
+ short value = shortChunk.get(i++);
+ arr.setAt((int) destIterator.nextLong(), value == QueryConstants.NULL_SHORT ? null : Js.asAny(value));
+ }
+ }
+}
diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/def/ColumnDefinition.java b/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/def/ColumnDefinition.java
index 604f80fcd77..377c09d840d 100644
--- a/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/def/ColumnDefinition.java
+++ b/web/client-api/src/main/java/io/deephaven/web/client/api/barrage/def/ColumnDefinition.java
@@ -4,108 +4,101 @@
package io.deephaven.web.client.api.barrage.def;
import io.deephaven.web.client.api.Column;
+import org.apache.arrow.flatbuf.Field;
import java.util.Map;
+import static io.deephaven.web.client.api.barrage.WebBarrageUtils.keyValuePairs;
+
public class ColumnDefinition {
- private int columnIndex;
- private String name;
- private String type;
-
- private boolean isSortable;
-
- private String styleColumn;
- private String formatColumn;
-
- private boolean isStyleColumn;
- private boolean isFormatColumn;
- private boolean isNumberFormatColumn;
- private boolean isPartitionColumn;
- private boolean isHierarchicalExpandByColumn;
- private boolean isHierarchicalRowDepthColumn;
- private boolean isHierarchicalRowExpandedColumn;
- private boolean isRollupAggregatedNodeColumn;
- private boolean isRollupConstituentNodeColumn;
- private boolean isRollupGroupByColumn;
- private String rollupAggregationInputColumn;
-
- // Indicates that this is a style column for the row
- private boolean forRow;
- private boolean isInputTableKeyColumn;
- private String description;
+ private final Field field;
+ private final int columnIndex;
+ private final String type;
+
+ private final boolean isSortable;
+
+ private final String styleColumn;
+ private final String formatColumn;
+
+ private final boolean isStyleColumn;
+ private final boolean isFormatColumn;
+ private final boolean isPartitionColumn;
+ private final boolean isHierarchicalExpandByColumn;
+ private final boolean isHierarchicalRowDepthColumn;
+ private final boolean isHierarchicalRowExpandedColumn;
+ private final boolean isRollupAggregatedNodeColumn;
+ private final boolean isRollupConstituentNodeColumn;
+ private final boolean isRollupGroupByColumn;
+ private final String rollupAggregationInputColumn;
+
+ // Indicates that this is a style column for the whole row
+ private final boolean forRow;
+ private final boolean isInputTableKeyColumn;
+ private final String description;
+
+ public ColumnDefinition(int index, Field field) {
+ Map fieldMetadata =
+ keyValuePairs("deephaven:", field.customMetadataLength(), field::customMetadata);
+ this.field = field;
+ columnIndex = index;
+ type = fieldMetadata.get("type");
+ isSortable = "true".equals(fieldMetadata.get("isSortable"));
+ isStyleColumn = "true".equals(fieldMetadata.get("isStyle"));
+ isFormatColumn = "true".equals(fieldMetadata.get("isDateFormat"))
+ || "true".equals(fieldMetadata.get("isNumberFormat"));
+ forRow = "true".equals(fieldMetadata.get("isRowStyle"));
+
+ String formatColumnName = fieldMetadata.get("dateFormatColumn");
+ if (formatColumnName == null) {
+ formatColumnName = fieldMetadata.get("numberFormatColumn");
+ }
+ formatColumn = formatColumnName;
- public String getName() {
- return name;
- }
+ styleColumn = fieldMetadata.get("styleColumn");
- public int getColumnIndex() {
- return columnIndex;
+ isInputTableKeyColumn = "true".equals(fieldMetadata.get("inputtable.isKey"));
+
+ this.description = fieldMetadata.get("description");
+
+ isPartitionColumn = "true".equals(fieldMetadata.get("isPartitioning"));
+
+ isHierarchicalExpandByColumn = "true".equals(fieldMetadata.get("hierarchicalTable.isExpandByColumn"));
+ isHierarchicalRowDepthColumn = "true".equals(fieldMetadata.get("hierarchicalTable.isRowDepthColumn"));
+ isHierarchicalRowExpandedColumn = "true".equals(fieldMetadata.get("hierarchicalTable.isRowExpandedColumn"));
+ isRollupAggregatedNodeColumn = "true".equals(fieldMetadata.get("rollupTable.isAggregatedNodeColumn"));
+ isRollupConstituentNodeColumn = "true".equals(fieldMetadata.get("rollupTable.isConstituentNodeColumn"));
+ isRollupGroupByColumn = "true".equals(fieldMetadata.get("rollupTable.isGroupByColumn"));
+ rollupAggregationInputColumn = fieldMetadata.get("rollupTable.aggregationInputColumnName");
}
- public void setColumnIndex(int columnIndex) {
- this.columnIndex = columnIndex;
+ public String getName() {
+ return field.name();
}
- public void setName(String name) {
- this.name = name;
+ public int getColumnIndex() {
+ return columnIndex;
}
public String getType() {
return type;
}
- public void setType(String type) {
- this.type = type;
- }
-
public boolean isSortable() {
return isSortable;
}
- public void setIsSortable(boolean sortable) {
- isSortable = sortable;
- }
-
public boolean isStyleColumn() {
return isStyleColumn;
}
- public void setStyleColumn(boolean styleColumn) {
- isStyleColumn = styleColumn;
- }
-
public boolean isFormatColumn() {
return isFormatColumn;
}
- public void setFormatColumn(boolean formatColumn) {
- isFormatColumn = formatColumn;
- }
-
- /**
- * @deprecated Use {@link #isFormatColumn()}
- */
- @Deprecated
- public boolean isNumberFormatColumn() {
- return isNumberFormatColumn;
- }
-
- /**
- * @deprecated Use {@link #setFormatColumn(boolean)}
- */
- @Deprecated
- public void setNumberFormatColumn(boolean numberFormatColumn) {
- isNumberFormatColumn = numberFormatColumn;
- }
-
public boolean isPartitionColumn() {
return isPartitionColumn;
}
- public void setPartitionColumn(boolean partitionColumn) {
- isPartitionColumn = partitionColumn;
- }
-
public boolean isVisible() {
return !isStyleColumn() && !isFormatColumn() && !isRollupConstituentNodeColumn()
&& !isHierarchicalRowDepthColumn() && !isHierarchicalRowExpandedColumn();
@@ -115,38 +108,18 @@ public boolean isForRow() {
return forRow;
}
- public void setForRow(boolean forRow) {
- this.forRow = forRow;
- }
-
public String getFormatColumnName() {
return formatColumn;
}
- public void setFormatColumnName(String formatColumn) {
- this.formatColumn = formatColumn;
- }
-
public String getStyleColumnName() {
return styleColumn;
}
- public void setStyleColumnName(String styleColumn) {
- this.styleColumn = styleColumn;
- }
-
- public void setInputTableKeyColumn(boolean inputTableKeyColumn) {
- this.isInputTableKeyColumn = inputTableKeyColumn;
- }
-
public boolean isInputTableKeyColumn() {
return isInputTableKeyColumn;
}
- public void setDescription(String description) {
- this.description = description;
- }
-
public String getDescription() {
return description;
}
@@ -161,10 +134,10 @@ public Column makeJsColumn(int index, Map
return makeColumn(index,
this,
- format == null || !format.isNumberFormatColumn() ? null : format.getColumnIndex(),
+ style == null ? null : style.getColumnIndex(),
style == null ? null : style.getColumnIndex(),
isPartitionColumn(),
- format == null || format.isNumberFormatColumn() ? null : format.getColumnIndex(),
+ format == null || format.isFormatColumn() ? null : format.getColumnIndex(),
getDescription(),
isInputTableKeyColumn());
}
@@ -181,55 +154,28 @@ public boolean isHierarchicalExpandByColumn() {
return isHierarchicalExpandByColumn;
}
- public void setHierarchicalExpandByColumn(boolean hierarchicalExpandByColumn) {
- isHierarchicalExpandByColumn = hierarchicalExpandByColumn;
- }
-
public boolean isHierarchicalRowDepthColumn() {
return isHierarchicalRowDepthColumn;
}
- public void setHierarchicalRowDepthColumn(boolean hierarchicalRowDepthColumn) {
- isHierarchicalRowDepthColumn = hierarchicalRowDepthColumn;
- }
-
public boolean isHierarchicalRowExpandedColumn() {
return isHierarchicalRowExpandedColumn;
}
- public void setHierarchicalRowExpandedColumn(boolean hierarchicalRowExpandedColumn) {
- isHierarchicalRowExpandedColumn = hierarchicalRowExpandedColumn;
- }
-
public boolean isRollupAggregatedNodeColumn() {
return isRollupAggregatedNodeColumn;
}
- public void setRollupAggregatedNodeColumn(boolean rollupAggregatedNodeColumn) {
- isRollupAggregatedNodeColumn = rollupAggregatedNodeColumn;
- }
-
public boolean isRollupConstituentNodeColumn() {
return isRollupConstituentNodeColumn;
}
- public void setRollupConstituentNodeColumn(boolean rollupConstituentNodeColumn) {
- isRollupConstituentNodeColumn = rollupConstituentNodeColumn;
- }
-
public boolean isRollupGroupByColumn() {
return isRollupGroupByColumn;
}
- public void setRollupGroupByColumn(boolean rollupGroupByColumn) {
- isRollupGroupByColumn = rollupGroupByColumn;
- }
-
public String getRollupAggregationInputColumn() {
return rollupAggregationInputColumn;
}
- public void setRollupAggregationInputColumn(String rollupAggregationInputColumn) {
- this.rollupAggregationInputColumn = rollupAggregationInputColumn;
- }
}
diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/parse/JsDataHandler.java b/web/client-api/src/main/java/io/deephaven/web/client/api/parse/JsDataHandler.java
index d3edf0e03f4..4bb583cd86f 100644
--- a/web/client-api/src/main/java/io/deephaven/web/client/api/parse/JsDataHandler.java
+++ b/web/client-api/src/main/java/io/deephaven/web/client/api/parse/JsDataHandler.java
@@ -3,6 +3,7 @@
//
package io.deephaven.web.client.api.parse;
+import com.google.flatbuffers.FlatBufferBuilder;
import com.google.gwt.i18n.client.TimeZone;
import elemental2.core.ArrayBuffer;
import elemental2.core.Float32Array;
@@ -14,14 +15,6 @@
import elemental2.core.TypedArray;
import elemental2.core.Uint16Array;
import elemental2.core.Uint8Array;
-import io.deephaven.javascript.proto.dhinternal.arrow.flight.flatbuf.schema_generated.org.apache.arrow.flatbuf.Binary;
-import io.deephaven.javascript.proto.dhinternal.arrow.flight.flatbuf.schema_generated.org.apache.arrow.flatbuf.FixedSizeBinary;
-import io.deephaven.javascript.proto.dhinternal.arrow.flight.flatbuf.schema_generated.org.apache.arrow.flatbuf.FloatingPoint;
-import io.deephaven.javascript.proto.dhinternal.arrow.flight.flatbuf.schema_generated.org.apache.arrow.flatbuf.Int;
-import io.deephaven.javascript.proto.dhinternal.arrow.flight.flatbuf.schema_generated.org.apache.arrow.flatbuf.Precision;
-import io.deephaven.javascript.proto.dhinternal.arrow.flight.flatbuf.schema_generated.org.apache.arrow.flatbuf.Type;
-import io.deephaven.javascript.proto.dhinternal.arrow.flight.flatbuf.schema_generated.org.apache.arrow.flatbuf.Utf8;
-import io.deephaven.javascript.proto.dhinternal.flatbuffers.Builder;
import io.deephaven.web.client.api.LongWrapper;
import io.deephaven.web.client.api.i18n.JsDateTimeFormat;
import io.deephaven.web.client.api.i18n.JsTimeZone;
@@ -29,6 +22,13 @@
import io.deephaven.web.shared.fu.JsFunction;
import jsinterop.base.Js;
import jsinterop.base.JsArrayLike;
+import org.apache.arrow.flatbuf.Binary;
+import org.apache.arrow.flatbuf.FixedSizeBinary;
+import org.apache.arrow.flatbuf.FloatingPoint;
+import org.apache.arrow.flatbuf.Int;
+import org.apache.arrow.flatbuf.Precision;
+import org.apache.arrow.flatbuf.Type;
+import org.apache.arrow.flatbuf.Utf8;
import org.gwtproject.nio.TypedArrayHelper;
import java.nio.ByteBuffer;
@@ -40,16 +40,16 @@
import java.util.List;
import java.util.Map;
-import static io.deephaven.web.client.api.subscription.QueryConstants.FALSE_BOOLEAN_AS_BYTE;
-import static io.deephaven.web.client.api.subscription.QueryConstants.NULL_BOOLEAN_AS_BYTE;
-import static io.deephaven.web.client.api.subscription.QueryConstants.NULL_BYTE;
-import static io.deephaven.web.client.api.subscription.QueryConstants.NULL_CHAR;
-import static io.deephaven.web.client.api.subscription.QueryConstants.NULL_DOUBLE;
-import static io.deephaven.web.client.api.subscription.QueryConstants.NULL_FLOAT;
-import static io.deephaven.web.client.api.subscription.QueryConstants.NULL_INT;
-import static io.deephaven.web.client.api.subscription.QueryConstants.NULL_LONG;
-import static io.deephaven.web.client.api.subscription.QueryConstants.NULL_SHORT;
-import static io.deephaven.web.client.api.subscription.QueryConstants.TRUE_BOOLEAN_AS_BYTE;
+import static io.deephaven.util.BooleanUtils.FALSE_BOOLEAN_AS_BYTE;
+import static io.deephaven.util.BooleanUtils.NULL_BOOLEAN_AS_BYTE;
+import static io.deephaven.util.BooleanUtils.TRUE_BOOLEAN_AS_BYTE;
+import static io.deephaven.util.QueryConstants.NULL_BYTE;
+import static io.deephaven.util.QueryConstants.NULL_CHAR;
+import static io.deephaven.util.QueryConstants.NULL_DOUBLE;
+import static io.deephaven.util.QueryConstants.NULL_FLOAT;
+import static io.deephaven.util.QueryConstants.NULL_INT;
+import static io.deephaven.util.QueryConstants.NULL_LONG;
+import static io.deephaven.util.QueryConstants.NULL_SHORT;
/**
* Given the expected type of a column, pick one of the enum entries and use that to read the data into arrow buffers.
@@ -101,8 +101,9 @@ public Uint8Array build() {
}
@Override
- public double writeType(Builder builder) {
- return Utf8.createUtf8(builder);
+ public int writeType(FlatBufferBuilder builder) {
+ Utf8.startUtf8(builder);
+ return Utf8.endUtf8(builder);
}
@Override
@@ -201,7 +202,7 @@ private long parseDateString(String str, ParseContext context) {
}
@Override
- public double writeType(Builder builder) {
+ public int writeType(FlatBufferBuilder builder) {
return Int.createInt(builder, 64, true);
}
@@ -249,7 +250,7 @@ public void write(Object[] data, ParseContext context, JsConsumer addNode,
},
INTEGER(Type.Int, "int") {
@Override
- public double writeType(Builder builder) {
+ public int writeType(FlatBufferBuilder builder) {
return Int.createInt(builder, 32, true);
}
@@ -261,7 +262,7 @@ public void write(Object[] data, ParseContext context, JsConsumer addNode,
},
SHORT(Type.Int, "short") {
@Override
- public double writeType(Builder builder) {
+ public int writeType(FlatBufferBuilder builder) {
return Int.createInt(builder, 16, true);
}
@@ -273,7 +274,7 @@ public void write(Object[] data, ParseContext context, JsConsumer addNode,
},
LONG(Type.Int, "long") {
@Override
- public double writeType(Builder builder) {
+ public int writeType(FlatBufferBuilder builder) {
return Int.createInt(builder, 64, true);
}
@@ -319,7 +320,7 @@ public void write(Object[] data, ParseContext context, JsConsumer addNode,
},
BYTE(Type.Int, "byte") {
@Override
- public double writeType(Builder builder) {
+ public int writeType(FlatBufferBuilder builder) {
return Int.createInt(builder, 8, true);
}
@@ -331,7 +332,7 @@ public void write(Object[] data, ParseContext context, JsConsumer addNode,
},
CHAR(Type.Int, "char") {
@Override
- public double writeType(Builder builder) {
+ public int writeType(FlatBufferBuilder builder) {
return Int.createInt(builder, 16, false);
}
@@ -343,7 +344,7 @@ public void write(Object[] data, ParseContext context, JsConsumer addNode,
},
FLOAT(Type.FloatingPoint, "float") {
@Override
- public double writeType(Builder builder) {
+ public int writeType(FlatBufferBuilder builder) {
return FloatingPoint.createFloatingPoint(builder, Precision.SINGLE);
}
@@ -355,7 +356,7 @@ public void write(Object[] data, ParseContext context, JsConsumer addNode,
},
DOUBLE(Type.FloatingPoint, "double") {
@Override
- public double writeType(Builder builder) {
+ public int writeType(FlatBufferBuilder builder) {
return FloatingPoint.createFloatingPoint(builder, Precision.DOUBLE);
}
@@ -368,7 +369,7 @@ public void write(Object[] data, ParseContext context, JsConsumer addNode,
},
BOOLEAN(Type.Bool, "boolean", "bool", "java.lang.Boolean") {
@Override
- public double writeType(Builder builder) {
+ public int writeType(FlatBufferBuilder builder) {
return Int.createInt(builder, 8, true);
}
@@ -440,25 +441,27 @@ public void write(Object[] data, ParseContext context, JsConsumer addNode,
},
BIG_DECIMAL(Type.Binary, "java.util.BigDecimal") {
@Override
- public double writeType(Builder builder) {
- return Binary.createBinary(builder);
+ public int writeType(FlatBufferBuilder builder) {
+ Binary.startBinary(builder);
+ return Binary.endBinary(builder);
}
},
BIG_INTEGER(Type.Binary, "java.util.BigInteger") {
@Override
- public double writeType(Builder builder) {
- return Binary.createBinary(builder);
+ public int writeType(FlatBufferBuilder builder) {
+ Binary.startBinary(builder);
+ return Binary.endBinary(builder);
}
},
LOCAL_DATE(Type.FixedSizeBinary, "java.time.LocalDate", "localdate") {
@Override
- public double writeType(Builder builder) {
+ public int writeType(FlatBufferBuilder builder) {
return FixedSizeBinary.createFixedSizeBinary(builder, 6);
}
},
LOCAL_TIME(Type.FixedSizeBinary, "java.time.LocalTime", "localtime") {
@Override
- public double writeType(Builder builder) {
+ public int writeType(FlatBufferBuilder builder) {
return FixedSizeBinary.createFixedSizeBinary(builder, 7);
}
},
@@ -540,10 +543,10 @@ private static class HandlersHolder {
private static final int SEPARATOR_INDEX = DEFAULT_DATE_TIME_PATTERN.indexOf('T');
- private final int arrowTypeType;
+ private final byte arrowTypeType;
private final String deephavenType;
- JsDataHandler(int arrowTypeType, String... typeNames) {
+ JsDataHandler(byte arrowTypeType, String... typeNames) {
this.arrowTypeType = arrowTypeType;
assert typeNames.length > 0 : "Must have at least one name";
this.deephavenType = typeNames[0];
@@ -553,7 +556,7 @@ private static class HandlersHolder {
}
}
- public int typeType() {
+ public byte typeType() {
return arrowTypeType;
}
@@ -561,7 +564,7 @@ public String deephavenType() {
return deephavenType;
}
- public abstract double writeType(Builder builder);
+ public abstract int writeType(FlatBufferBuilder builder);
public void write(Object[] data, ParseContext context, JsConsumer addNode, JsConsumer addBuffer) {
throw new UnsupportedOperationException("Can't parse " + name());
diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/subscription/AbstractTableSubscription.java b/web/client-api/src/main/java/io/deephaven/web/client/api/subscription/AbstractTableSubscription.java
new file mode 100644
index 00000000000..155dbb8271a
--- /dev/null
+++ b/web/client-api/src/main/java/io/deephaven/web/client/api/subscription/AbstractTableSubscription.java
@@ -0,0 +1,524 @@
+//
+// Copyright (c) 2016-2024 Deephaven Data Labs and Patent Pending
+//
+package io.deephaven.web.client.api.subscription;
+
+import com.google.flatbuffers.FlatBufferBuilder;
+import com.vertispan.tsdefs.annotations.TsIgnore;
+import elemental2.core.JsArray;
+import elemental2.dom.CustomEventInit;
+import io.deephaven.barrage.flatbuf.BarrageMessageType;
+import io.deephaven.barrage.flatbuf.BarrageSubscriptionRequest;
+import io.deephaven.extensions.barrage.BarrageSubscriptionOptions;
+import io.deephaven.extensions.barrage.ColumnConversionMode;
+import io.deephaven.javascript.proto.dhinternal.arrow.flight.protocol.flight_pb.FlightData;
+import io.deephaven.web.client.api.Column;
+import io.deephaven.web.client.api.Format;
+import io.deephaven.web.client.api.HasEventHandling;
+import io.deephaven.web.client.api.JsRangeSet;
+import io.deephaven.web.client.api.LongWrapper;
+import io.deephaven.web.client.api.TableData;
+import io.deephaven.web.client.api.WorkerConnection;
+import io.deephaven.web.client.api.barrage.CompressedRangeSetReader;
+import io.deephaven.web.client.api.barrage.WebBarrageMessage;
+import io.deephaven.web.client.api.barrage.WebBarrageStreamReader;
+import io.deephaven.web.client.api.barrage.WebBarrageUtils;
+import io.deephaven.web.client.api.barrage.data.WebBarrageSubscription;
+import io.deephaven.web.client.api.barrage.stream.BiDiStream;
+import io.deephaven.web.client.api.barrage.stream.ResponseStreamWrapper;
+import io.deephaven.web.client.fu.JsSettings;
+import io.deephaven.web.client.state.ClientTableState;
+import io.deephaven.web.shared.data.RangeSet;
+import io.deephaven.web.shared.data.ShiftedRange;
+import io.deephaven.web.shared.fu.JsRunnable;
+import jsinterop.annotations.JsProperty;
+import jsinterop.base.Any;
+import jsinterop.base.Js;
+import org.jetbrains.annotations.Nullable;
+
+import java.io.IOException;
+import java.util.BitSet;
+
+/**
+ * Superclass of various subscription types, allowing specific implementations to customize behavior for their needs.
+ *
+ * Instances are not ready to use right away, owing to the fact that we need to wait both for the provided state to
+ * resolve (so that we have the table schema, know what kind of subscription we will make, and know what column types
+ * will be resolves), and because until the subscription has finished being set up, we will not have received the size
+ * of the table. When closed, it cannot be reused again.
+ *
+ * This is also a base class for types exposed to JS.
+ *
+ * This is a rough analog of the JVM's {@code BarrageSubscriptionImpl} class. In contrast to the JVM code, this is
+ * exposed to api consumers, rather than wrapping in a Table type, as it handles the barrage stream and provides events
+ * that client code can listen to.
+ */
+public abstract class AbstractTableSubscription extends HasEventHandling {
+ /**
+ * Indicates that some new data is available on the client, either an initial snapshot or a delta update. The
+ * detail field of the event will contain a TableSubscriptionEventData detailing what has changed, or
+ * allowing access to the entire range of items currently in the subscribed columns.
+ */
+ public static final String EVENT_UPDATED = "updated";
+
+ public enum Status {
+ /** Waiting for some prerequisite before we can use it for the first time. */
+ STARTING,
+ /** Successfully created, not waiting for any messages to be accurate. */
+ ACTIVE,
+ /** Waiting for an update to return from being active to being active again. */
+ PENDING_UPDATE,
+ /** Closed or otherwise stopped, cannot be used again. */
+ DONE;
+ }
+
+ private final ClientTableState state;
+ private final WorkerConnection connection;
+ protected final int rowStyleColumn;
+ private JsArray columns;
+ private BitSet columnBitSet;
+ protected RangeSet viewportRowSet;
+ private boolean isReverseViewport;
+ private BarrageSubscriptionOptions options;
+
+ private BiDiStream doExchange;
+ protected WebBarrageSubscription barrageSubscription;
+
+ protected Status status = Status.STARTING;
+
+ private String failMsg;
+
+ public AbstractTableSubscription(ClientTableState state, WorkerConnection connection) {
+ state.retain(this);
+ this.state = state;
+ this.connection = connection;
+ rowStyleColumn = state.getRowFormatColumn() == null ? TableData.NO_ROW_FORMAT_COLUMN
+ : state.getRowFormatColumn().getIndex();
+
+ revive();
+ }
+
+ /**
+ * Creates the connection to the server. Used on initial connection, and for viewport reconnects.
+ */
+ protected void revive() {
+ // Once the state is running, set up the actual subscription
+ // Don't let subscription be used again, table failed and user will have already gotten an error elsewhere
+ state.onRunning(s -> {
+ if (status != Status.STARTING) {
+ // already closed
+ return;
+ }
+ WebBarrageSubscription.ViewportChangedHandler viewportChangedHandler = this::onViewportChange;
+ WebBarrageSubscription.DataChangedHandler dataChangedHandler = this::onDataChanged;
+
+ status = Status.ACTIVE;
+ this.barrageSubscription =
+ WebBarrageSubscription.subscribe(state, viewportChangedHandler, dataChangedHandler);
+
+ doExchange =
+ connection.streamFactory().create(
+ headers -> connection.flightServiceClient().doExchange(headers),
+ (first, headers) -> connection.browserFlightServiceClient().openDoExchange(first, headers),
+ (next, headers, c) -> connection.browserFlightServiceClient().nextDoExchange(next, headers,
+ c::apply),
+ new FlightData());
+
+ doExchange.onData(this::onFlightData);
+ doExchange.onEnd(this::onStreamEnd);
+
+ sendFirstSubscriptionRequest();
+ },
+ // If the upstream table fails, kill the subscription
+ this::fail,
+ // If the upstream table is closed, its because this subscription released it, do nothing
+ JsRunnable.doNothing());
+ }
+
+ public Status getStatus() {
+ return status;
+ }
+
+ protected static FlatBufferBuilder subscriptionRequest(byte[] tableTicket, BitSet columns,
+ @Nullable RangeSet viewport,
+ BarrageSubscriptionOptions options, boolean isReverseViewport) {
+ FlatBufferBuilder sub = new FlatBufferBuilder(1024);
+ int colOffset = BarrageSubscriptionRequest.createColumnsVector(sub, columns.toByteArray());
+ int viewportOffset = 0;
+ if (viewport != null) {
+ viewportOffset =
+ BarrageSubscriptionRequest.createViewportVector(sub, CompressedRangeSetReader.writeRange(viewport));
+ }
+ int optionsOffset = options.appendTo(sub);
+ int tableTicketOffset = BarrageSubscriptionRequest.createTicketVector(sub, tableTicket);
+ BarrageSubscriptionRequest.startBarrageSubscriptionRequest(sub);
+ BarrageSubscriptionRequest.addColumns(sub, colOffset);
+ BarrageSubscriptionRequest.addViewport(sub, viewportOffset);
+ BarrageSubscriptionRequest.addSubscriptionOptions(sub, optionsOffset);
+ BarrageSubscriptionRequest.addTicket(sub, tableTicketOffset);
+ BarrageSubscriptionRequest.addReverseViewport(sub, isReverseViewport);
+ sub.finish(BarrageSubscriptionRequest.endBarrageSubscriptionRequest(sub));
+
+ return sub;
+ }
+
+ protected abstract void sendFirstSubscriptionRequest();
+
+ protected void sendBarrageSubscriptionRequest(RangeSet viewport, JsArray columns, Double updateIntervalMs,
+ boolean isReverseViewport) {
+ if (status == Status.DONE) {
+ if (failMsg == null) {
+ throw new IllegalStateException("Can't change subscription, already closed");
+ } else {
+ throw new IllegalStateException("Can't change subscription, already failed: " + failMsg);
+ }
+ }
+ status = Status.PENDING_UPDATE;
+ this.columns = columns;
+ this.viewportRowSet = viewport;
+ this.columnBitSet = makeColumnBitset(columns);
+ this.isReverseViewport = isReverseViewport;
+ this.options = BarrageSubscriptionOptions.builder()
+ .batchSize(WebBarrageSubscription.BATCH_SIZE)
+ .maxMessageSize(WebBarrageSubscription.MAX_MESSAGE_SIZE)
+ .columnConversionMode(ColumnConversionMode.Stringify)
+ .minUpdateIntervalMs(updateIntervalMs == null ? 0 : (int) (double) updateIntervalMs)
+ .columnsAsList(false)// TODO(deephaven-core#5927) flip this to true
+ .useDeephavenNulls(true)
+ .build();
+ FlatBufferBuilder request = subscriptionRequest(
+ Js.uncheckedCast(state.getHandle().getTicket()),
+ columnBitSet,
+ viewport,
+ options,
+ isReverseViewport);
+ FlightData subscriptionRequest = new FlightData();
+ subscriptionRequest
+ .setAppMetadata(WebBarrageUtils.wrapMessage(request, BarrageMessageType.BarrageSubscriptionRequest));
+ doExchange.send(subscriptionRequest);
+ }
+
+ protected BitSet makeColumnBitset(JsArray columns) {
+ return state.makeBitset(Js.uncheckedCast(columns));
+ }
+
+ public ClientTableState state() {
+ return state;
+ }
+
+ protected WorkerConnection connection() {
+ return connection;
+ }
+
+ protected boolean isSubscriptionReady() {
+ return status == Status.ACTIVE;
+ }
+
+ public double size() {
+ if (status == Status.ACTIVE) {
+ return barrageSubscription.getCurrentRowSet().size();
+ }
+ if (status == Status.DONE) {
+ throw new IllegalStateException("Can't read size when already closed");
+ }
+ return state.getSize();
+ }
+
+ private void onDataChanged(RangeSet rowsAdded, RangeSet rowsRemoved, RangeSet totalMods, ShiftedRange[] shifted,
+ BitSet modifiedColumnSet) {
+ if (!isSubscriptionReady()) {
+ return;
+ }
+
+ notifyUpdate(rowsAdded, rowsRemoved, totalMods, shifted);
+ }
+
+ protected void notifyUpdate(RangeSet rowsAdded, RangeSet rowsRemoved, RangeSet totalMods, ShiftedRange[] shifted) {
+ // TODO (deephaven-core#2435) Rewrite shifts as adds/removed/modifies
+ UpdateEventData detail = new SubscriptionEventData(
+ barrageSubscription,
+ rowStyleColumn,
+ columns,
+ rowsAdded,
+ rowsRemoved,
+ totalMods,
+ shifted);
+ CustomEventInit event = CustomEventInit.create();
+ event.setDetail(detail);
+ fireEvent(TableSubscription.EVENT_UPDATED, event);
+ }
+
+ public static class SubscriptionRow implements TableData.Row {
+ private final WebBarrageSubscription subscription;
+ private final int rowStyleColumn;
+ protected final long index;
+ public LongWrapper indexCached;
+
+ public SubscriptionRow(WebBarrageSubscription subscription, int rowStyleColumn, long index) {
+ this.subscription = subscription;
+ this.rowStyleColumn = rowStyleColumn;
+ this.index = index;
+ }
+
+ @Override
+ public LongWrapper getIndex() {
+ if (indexCached == null) {
+ indexCached = LongWrapper.of(index);
+ }
+ return indexCached;
+ }
+
+ @Override
+ public Any get(Column column) {
+ return subscription.getData(index, column.getIndex());
+ }
+
+ @Override
+ public Format getFormat(Column column) {
+ long cellColors = 0;
+ long rowColors = 0;
+ String numberFormat = null;
+ String formatString = null;
+ if (column.getStyleColumnIndex() != null) {
+ LongWrapper wrapper = subscription.getData(index, column.getStyleColumnIndex()).uncheckedCast();
+ cellColors = wrapper == null ? 0 : wrapper.getWrapped();
+ }
+ if (rowStyleColumn != TableData.NO_ROW_FORMAT_COLUMN) {
+ LongWrapper wrapper = subscription.getData(index, rowStyleColumn).uncheckedCast();
+ rowColors = wrapper == null ? 0 : wrapper.getWrapped();
+ }
+ if (column.getFormatStringColumnIndex() != null) {
+ numberFormat = subscription.getData(index, column.getFormatStringColumnIndex()).uncheckedCast();
+ }
+ if (column.getFormatStringColumnIndex() != null) {
+ formatString = subscription.getData(index, column.getFormatStringColumnIndex()).uncheckedCast();
+ }
+ return new Format(cellColors, rowColors, numberFormat, formatString);
+ }
+ }
+
+ /**
+ * TableData type for both viewports and full table subscriptions.
+ */
+ @TsIgnore
+ public static class SubscriptionEventData extends UpdateEventData implements ViewportData, SubscriptionTableData {
+ public SubscriptionEventData(WebBarrageSubscription subscription, int rowStyleColumn, JsArray columns,
+ RangeSet added, RangeSet removed, RangeSet modified, ShiftedRange[] shifted) {
+ super(subscription, rowStyleColumn, columns, added, removed, modified, shifted);
+ }
+
+ @Override
+ public JsRangeSet getAdded() {
+ return added;
+ }
+
+ @Override
+ public JsRangeSet getRemoved() {
+ return removed;
+ }
+
+ @Override
+ public JsRangeSet getModified() {
+ return modified;
+ }
+
+ @Override
+ public JsRangeSet getFullIndex() {
+ return fullRowSet;
+ }
+ }
+
+ /**
+ * Base type to allow trees to extend from here separately from tables.
+ */
+ @TsIgnore
+ public abstract static class UpdateEventData implements TableData {
+ protected final WebBarrageSubscription subscription;
+ private final int rowStyleColumn;
+ private final JsArray columns;
+ protected final JsRangeSet added;
+ protected final JsRangeSet removed;
+ protected final JsRangeSet modified;
+ protected final JsRangeSet fullRowSet;
+
+ // cached copy in case it was requested, could be requested again
+ private JsArray allRows;
+
+ private double offset;
+
+ public UpdateEventData(WebBarrageSubscription subscription, int rowStyleColumn, JsArray columns,
+ RangeSet added, RangeSet removed, RangeSet modified, ShiftedRange[] shifted) {
+ this.subscription = subscription;
+ this.rowStyleColumn = rowStyleColumn;
+ this.columns = columns;
+ this.added = new JsRangeSet(added);
+ this.removed = new JsRangeSet(removed);
+ this.modified = new JsRangeSet(modified);
+ this.fullRowSet = new JsRangeSet(transformRowsetForConsumer(subscription.getCurrentRowSet(),
+ subscription.getServerViewport(), subscription.isReversed()));
+ }
+
+ // for ViewportData
+ @JsProperty
+ public Double getOffset() {
+ return offset;
+ }
+
+ public void setOffset(double offset) {
+ this.offset = offset;
+ }
+
+ @Override
+ public JsArray getRows() {
+ if (allRows == null) {
+ allRows = new JsArray<>();
+ fullRowSet.getRange().indexIterator().forEachRemaining((long index) -> {
+ allRows.push(makeRow(index));
+ });
+ if (JsSettings.isDevMode()) {
+ assert allRows.length == fullRowSet.getSize();
+ }
+ }
+ return (JsArray) (JsArray) allRows;
+ }
+
+ protected SubscriptionRow makeRow(long index) {
+ return new SubscriptionRow(subscription, rowStyleColumn, index);
+ }
+
+ @Override
+ public Row get(int index) {
+ return this.get((long) index);
+ }
+
+ @Override
+ public Row get(long index) {
+ return makeRow(index);
+ }
+
+ @Override
+ public Any getData(int index, Column column) {
+ return getData((long) index, column);
+ }
+
+ @Override
+ public Any getData(long key, Column column) {
+ return subscription.getData(fullRowSet.getRange().get(key), column.getIndex());
+ }
+
+ @Override
+ public Format getFormat(int index, Column column) {
+ return getFormat((long) index, column);
+ }
+
+ @Override
+ public Format getFormat(long index, Column column) {
+ long key = fullRowSet.getRange().get(index);
+ long cellColors = 0;
+ long rowColors = 0;
+ String numberFormat = null;
+ String formatString = null;
+ if (column.getStyleColumnIndex() != null) {
+ LongWrapper wrapper = subscription.getData(key, column.getStyleColumnIndex()).uncheckedCast();
+ cellColors = wrapper == null ? 0 : wrapper.getWrapped();
+ }
+ if (rowStyleColumn != NO_ROW_FORMAT_COLUMN) {
+ LongWrapper wrapper = subscription.getData(key, column.getStyleColumnIndex()).uncheckedCast();
+ rowColors = wrapper == null ? 0 : wrapper.getWrapped();
+ }
+ if (column.getFormatStringColumnIndex() != null) {
+ numberFormat = subscription.getData(key, column.getFormatStringColumnIndex()).uncheckedCast();
+ }
+ if (column.getFormatStringColumnIndex() != null) {
+ formatString = subscription.getData(key, column.getFormatStringColumnIndex()).uncheckedCast();
+ }
+ return new Format(cellColors, rowColors, numberFormat, formatString);
+ }
+
+ @Override
+ public JsArray getColumns() {
+ return columns;
+ }
+ }
+
+ /**
+ * If a viewport is in use, transforms the given rowset to position space based on that viewport.
+ *
+ * @param rowSet the rowset to possibly transform
+ * @return a transformed rowset
+ */
+ private static RangeSet transformRowsetForConsumer(RangeSet rowSet, @Nullable RangeSet viewport, boolean reversed) {
+ if (viewport != null) {
+ return rowSet.subsetForPositions(viewport, reversed);
+ }
+ return rowSet;
+ }
+
+ private void onViewportChange(RangeSet serverViewport, BitSet serverColumns, boolean serverReverseViewport) {
+ boolean subscriptionReady = ((serverColumns == null && columnBitSet == null)
+ || (serverColumns == null && columnBitSet.cardinality() == state.getTableDef().getColumns().length)
+ || (serverColumns != null && serverColumns.equals(this.columnBitSet)))
+ && (serverViewport == null && this.viewportRowSet == null
+ || (serverViewport != null && serverViewport.equals(this.viewportRowSet)))
+ && serverReverseViewport == isReverseViewport;
+ if (subscriptionReady) {
+ status = Status.ACTIVE;
+ }
+ }
+
+ private final WebBarrageStreamReader reader = new WebBarrageStreamReader();
+
+ private void onFlightData(FlightData data) {
+ WebBarrageMessage message;
+ try {
+ message = reader.parseFrom(options, state.chunkTypes(), state.columnTypes(), state.componentTypes(), data);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ if (message != null) {
+ // This payload resulted in an update to the table's contents, inform the subscription
+ barrageSubscription.applyUpdates(message);
+ }
+ }
+
+ protected void onStreamEnd(ResponseStreamWrapper.Status status) {
+ if (this.status == Status.DONE) {
+ return;
+ }
+ if (status.isTransportError()) {
+ // If the subscription isn't closed and we hit a transport error, allow it to restart
+ this.status = Status.STARTING;
+ } else {
+ // Subscription failed somehow, fire an event
+ fail(status.getDetails());
+ }
+ }
+
+ private void fail(String message) {
+ failureHandled(message);
+ this.status = Status.DONE;
+ doExchange = null;
+ failMsg = message;
+ }
+
+ /**
+ * The columns that were subscribed to when this subscription was created
+ *
+ * @return {@link Column}
+ */
+ public JsArray getColumns() {
+ return columns;
+ }
+
+ /**
+ * Stops the subscription on the server.
+ */
+ public void close() {
+ state.unretain(this);
+ if (doExchange != null) {
+ doExchange.end();
+ doExchange.cancel();
+ }
+ status = Status.DONE;
+ }
+}
diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/subscription/QueryConstants.java b/web/client-api/src/main/java/io/deephaven/web/client/api/subscription/QueryConstants.java
deleted file mode 100644
index e49eb7c2846..00000000000
--- a/web/client-api/src/main/java/io/deephaven/web/client/api/subscription/QueryConstants.java
+++ /dev/null
@@ -1,21 +0,0 @@
-//
-// Copyright (c) 2016-2024 Deephaven Data Labs and Patent Pending
-//
-package io.deephaven.web.client.api.subscription;
-
-
-/**
- * Constants for null values within the Deephaven engine From io.deephaven.util.QueryConstants
- */
-public interface QueryConstants {
- char NULL_CHAR = Character.MAX_VALUE;
- byte NULL_BYTE = Byte.MIN_VALUE;
- short NULL_SHORT = Short.MIN_VALUE;
- int NULL_INT = Integer.MIN_VALUE;
- long NULL_LONG = Long.MIN_VALUE;
- float NULL_FLOAT = -Float.MAX_VALUE;
- double NULL_DOUBLE = -Double.MAX_VALUE;
- byte NULL_BOOLEAN_AS_BYTE = NULL_BYTE;
- byte TRUE_BOOLEAN_AS_BYTE = (byte) 1;
- byte FALSE_BOOLEAN_AS_BYTE = (byte) 0;
-}
diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/subscription/SubscriptionTableData.java b/web/client-api/src/main/java/io/deephaven/web/client/api/subscription/SubscriptionTableData.java
index 7c08d330e91..2067e9068db 100644
--- a/web/client-api/src/main/java/io/deephaven/web/client/api/subscription/SubscriptionTableData.java
+++ b/web/client-api/src/main/java/io/deephaven/web/client/api/subscription/SubscriptionTableData.java
@@ -5,672 +5,53 @@
import com.vertispan.tsdefs.annotations.TsInterface;
import com.vertispan.tsdefs.annotations.TsName;
-import elemental2.core.JsArray;
-import elemental2.dom.CustomEventInit;
-import io.deephaven.web.client.api.*;
-import io.deephaven.web.client.fu.JsSettings;
-import io.deephaven.web.shared.data.*;
-import io.deephaven.web.shared.data.columns.ColumnData;
-import jsinterop.annotations.JsFunction;
-import jsinterop.annotations.JsIgnore;
-import jsinterop.annotations.JsMethod;
+import io.deephaven.web.client.api.JsRangeSet;
+import io.deephaven.web.client.api.TableData;
import jsinterop.annotations.JsProperty;
-import jsinterop.annotations.JsType;
-import jsinterop.base.Any;
-import jsinterop.base.Js;
-import jsinterop.base.JsArrayLike;
-import javax.annotation.Nullable;
-import java.math.BigDecimal;
-import java.math.BigInteger;
-import java.util.NavigableSet;
-import java.util.PrimitiveIterator;
-import java.util.TreeMap;
-import static io.deephaven.web.client.api.subscription.ViewportData.NO_ROW_FORMAT_COLUMN;
-
-public class SubscriptionTableData {
- @JsFunction
- private interface ArrayCopy {
- @SuppressWarnings("unusable-by-js")
- void copyTo(Object destArray, long destPos, Object srcArray, int srcPos);
- }
-
- private final JsArray columns;
- private final int rowStyleColumn;
- private final HasEventHandling evented;
-
- // the actual rows present on the client, in their correct order
- private RangeSet index;
-
- // mappings from the index to the position of a row in the data array
- private TreeMap redirectedIndexes;
-
- // rows in the data columns that no longer contain data and can be reused
- private RangeSet reusableDestinations;
-
- // array of data columns, cast each to a jsarray to read rows
- private Object[] data;
-
- public SubscriptionTableData(JsArray columns, int rowStyleColumn, HasEventHandling evented) {
- this.columns = columns;
- this.rowStyleColumn = rowStyleColumn;
- this.evented = evented;
- }
-
- // TODO support this being called multiple times so we can keep viewports going without clearing the data
- public TableData handleSnapshot(TableSnapshot snapshot) {
- // when changing snapshots we should actually rewrite the columns, possibly emulate ViewportData more?
- ColumnData[] dataColumns = snapshot.getDataColumns();
- data = new Object[dataColumns.length];
- reusableDestinations = RangeSet.empty();
- redirectedIndexes = new TreeMap<>();
- index = snapshot.getIncludedRows();
-
- long includedRowCount = snapshot.getIncludedRows().size();
- RangeSet destination = freeRows(includedRowCount);
- boolean indexUpdated = false;
-
- for (int index = 0; index < dataColumns.length; index++) {
- ColumnData dataColumn = dataColumns[index];
- if (dataColumn == null) {
- // no data in this column, wasn't requested
- continue;
- }
-
- final int i = index;
- Column column = columns.find((c, i1, i2) -> c.getIndex() == i);
-
- ArrayCopy arrayCopy = arrayCopyFuncForColumn(column);
-
- Object[] localCopy = new Object[(int) includedRowCount];
- data[index] = localCopy;
- PrimitiveIterator.OfLong destIter = destination.indexIterator();
- PrimitiveIterator.OfLong indexIter = snapshot.getIncludedRows().indexIterator();
- int j = 0;
- while (indexIter.hasNext()) {
- assert destIter.hasNext();
- long dest = destIter.nextLong();
- long nextIndex = indexIter.nextLong();
- if (!indexUpdated) {
- redirectedIndexes.put(nextIndex, dest);
- }
- arrayCopy.copyTo(localCopy, dest, dataColumn.getData(), j++);
- }
- assert !destIter.hasNext();
- indexUpdated = true;
- }
-
- return notifyUpdates(index, RangeSet.empty(), RangeSet.empty());
- }
+/**
+ * Event data, describing the indexes that were added/removed/updated, and providing access to Rows (and thus data in
+ * columns) either by index, or scanning the complete present index.
+ *
+ * This class supports two ways of reading the table - checking the changes made since the last update, and reading all
+ * data currently in the table. While it is more expensive to always iterate over every single row in the table, it may
+ * in some cases actually be cheaper than maintaining state separately and updating only the changes, though both
+ * options should be considered.
+ *
+ * The RangeSet objects allow iterating over the LongWrapper indexes in the table. Note that these "indexes" are not
+ * necessarily contiguous and may be negative, and represent some internal state on the server, allowing it to keep
+ * track of data efficiently. Those LongWrapper objects can be passed to the various methods on this instance to read
+ * specific rows or cells out of the table.
+ */
+@TsInterface
+@TsName(name = "SubscriptionTableData", namespace = "dh")
+public interface SubscriptionTableData extends TableData {
+
+
+ @JsProperty
+ JsRangeSet getFullIndex();
/**
- * Helper to avoid appending many times when modifying indexes. The append() method should be called for each key
- * _in order_ to ensure that RangeSet.addRange isn't called excessively. When no more items will be added, flush()
- * must be called.
+ * The ordered set of row indexes added since the last update.
+ *
+ * @return the rangeset of rows added
*/
- private static class RangeSetAppendHelper {
- private final RangeSet rangeSet;
-
- private long currentFirst = -1;
- private long currentLast;
-
- public RangeSetAppendHelper(final RangeSet rangeSet) {
- this.rangeSet = rangeSet;
- }
-
- public void append(long key) {
- assert key >= 0;
-
- if (currentFirst == -1) {
- // first key to be added, move both first and last
- currentFirst = key;
- currentLast = key;
-
- return;
- }
-
- if (key == currentLast + 1) {
- // key appends to our current range
- currentLast = key;
- } else if (key == currentFirst - 1) {
- // key appends to our current range
- currentFirst = key;
- } else {
- // existing range doesn't match the new item, finish the old range and start a new one
- rangeSet.addRange(new Range(currentFirst, currentLast));
- currentFirst = key;
- currentLast = key;
- }
- }
-
- public void flush() {
- if (currentFirst != -1) {
- rangeSet.addRange(new Range(currentFirst, currentLast));
- currentFirst = -1;
- }
- }
- }
-
- public TableData handleDelta(DeltaUpdates delta) {
- // delete old data, track slots freed up. we do this by row since they might be non-contiguous or out of order
- RangeSetAppendHelper reusableHelper = new RangeSetAppendHelper(reusableDestinations);
- delta.getRemoved().indexIterator().forEachRemaining((long index) -> {
- long dest = redirectedIndexes.remove(index);
- reusableHelper.append(dest);
- // TODO consider trimming the columns down too, and truncating the reusable slots at the end
- });
- reusableHelper.flush();
- // clean up index by ranges, not by row
- delta.getRemoved().rangeIterator().forEachRemaining(index::removeRange);
-
- // Shift moved rows in the redir index
- boolean hasReverseShift = false;
- final ShiftedRange[] shiftedRanges = delta.getShiftedRanges();
- RangeSetAppendHelper shifter = new RangeSetAppendHelper(index);
- for (int i = shiftedRanges.length - 1; i >= 0; --i) {
- final ShiftedRange shiftedRange = shiftedRanges[i];
- final long offset = shiftedRange.getDelta();
- if (offset < 0) {
- hasReverseShift = true;
- continue;
- }
- index.removeRange(shiftedRange.getRange());
- final NavigableSet toMove = redirectedIndexes.navigableKeySet()
- .subSet(shiftedRange.getRange().getFirst(), true, shiftedRange.getRange().getLast(), true);
- // iterate backward and move them forward
- for (Long key : toMove.descendingSet()) {
- long shiftedKey = key + offset;
- Long oldValue = redirectedIndexes.put(shiftedKey, redirectedIndexes.remove(key));
- assert oldValue == null : shiftedKey + " already has a value, " + oldValue;
- shifter.append(shiftedKey);
- }
- }
- if (hasReverseShift) {
- for (int i = 0; i < shiftedRanges.length; ++i) {
- final ShiftedRange shiftedRange = shiftedRanges[i];
- final long offset = shiftedRange.getDelta();
- if (offset > 0) {
- continue;
- }
- index.removeRange(shiftedRange.getRange());
- final NavigableSet toMove = redirectedIndexes.navigableKeySet()
- .subSet(shiftedRange.getRange().getFirst(), true, shiftedRange.getRange().getLast(), true);
- // iterate forward and move them backward
- for (Long key : toMove) {
- long shiftedKey = key + offset;
- Long oldValue = redirectedIndexes.put(shiftedKey, redirectedIndexes.remove(key));
- assert oldValue == null : shiftedKey + " already has a value, " + oldValue;
- shifter.append(shiftedKey);
- }
- }
- }
- shifter.flush();
-
- // Find space for the rows we're about to add. We must not adjust the index until this is done, it is used
- // to see where the end of the data is
- RangeSet addedDestination = freeRows(delta.getAdded().size());
- // Within each column, append additions
- DeltaUpdates.ColumnAdditions[] additions = delta.getSerializedAdditions();
- for (int i = 0; i < additions.length; i++) {
- DeltaUpdates.ColumnAdditions addedColumn = delta.getSerializedAdditions()[i];
- Column column = columns.find((c, i1, i2) -> c.getIndex() == addedColumn.getColumnIndex());
-
- ArrayCopy arrayCopy = arrayCopyFuncForColumn(column);
-
- PrimitiveIterator.OfLong addedIndexes = delta.getAdded().indexIterator();
- PrimitiveIterator.OfLong destIter = addedDestination.indexIterator();
- int j = 0;
- while (addedIndexes.hasNext()) {
- long origIndex = addedIndexes.nextLong();
- assert delta.getIncludedAdditions().contains(origIndex);
- assert destIter.hasNext();
- long dest = destIter.nextLong();
- Long old = redirectedIndexes.put(origIndex, dest);
- assert old == null || old == dest;
- arrayCopy.copyTo(data[addedColumn.getColumnIndex()], dest, addedColumn.getValues().getData(), j++);
- }
- }
-
- // Update the index to reflect the added items
- delta.getAdded().rangeIterator().forEachRemaining(index::addRange);
-
- // Within each column, apply modifications
- DeltaUpdates.ColumnModifications[] modifications = delta.getSerializedModifications();
- RangeSet allModified = new RangeSet();
- for (int i = 0; i < modifications.length; ++i) {
- final DeltaUpdates.ColumnModifications modifiedColumn = modifications[i];
- if (modifiedColumn == null) {
- continue;
- }
-
- modifiedColumn.getRowsIncluded().rangeIterator().forEachRemaining(allModified::addRange);
- Column column = columns.find((c, i1, i2) -> c.getIndex() == modifiedColumn.getColumnIndex());
-
- ArrayCopy arrayCopy = arrayCopyFuncForColumn(column);
-
- PrimitiveIterator.OfLong modifiedIndexes = modifiedColumn.getRowsIncluded().indexIterator();
- int j = 0;
- while (modifiedIndexes.hasNext()) {
- long origIndex = modifiedIndexes.nextLong();
- arrayCopy.copyTo(data[modifiedColumn.getColumnIndex()], redirectedIndexes.get(origIndex),
- modifiedColumn.getValues().getData(), j++);
- }
- }
-
- // Check that the index sizes make sense
- assert redirectedIndexes.size() == index.size();
- // Note that we can't do this assert, since we don't truncate arrays, we just leave nulls at the end
- // assert Js.asArrayLike(data[0]).getLength() == redirectedIndexes.size();
-
- return notifyUpdates(delta.getAdded(), delta.getRemoved(), allModified);
- }
-
- private TableData notifyUpdates(RangeSet added, RangeSet removed, RangeSet modified) {
- UpdateEventData detail = new UpdateEventData(added, removed, modified);
- if (evented != null) {
- CustomEventInit event = CustomEventInit.create();
- event.setDetail(detail);
- evented.fireEvent(TableSubscription.EVENT_UPDATED, event);
- }
- return detail;
- }
-
- private ArrayCopy arrayCopyFuncForColumn(@Nullable Column column) {
- final String type = column != null ? column.getType() : "";
- switch (type) {
- case "long":
- return (destArray, destPos, srcArray, srcPos) -> {
- final long value = Js.asArrayLike(srcArray).getAtAsAny(srcPos).asLong();
- if (value == QueryConstants.NULL_LONG) {
- Js.asArrayLike(destArray).setAt((int) destPos, null);
- } else {
- Js.asArrayLike(destArray).setAt((int) destPos, LongWrapper.of(value));
- }
- };
- case "java.time.Instant":
- case "java.time.ZonedDateTime":
- return (destArray, destPos, srcArray, srcPos) -> {
- long value = Js.asArrayLike(srcArray).getAtAsAny(srcPos).asLong();
- if (value == QueryConstants.NULL_LONG) {
- Js.asArrayLike(destArray).setAt((int) destPos, null);
- } else {
- Js.asArrayLike(destArray).setAt((int) destPos, new DateWrapper(value));
- }
- };
- case "java.math.BigDecimal":
- return (destArray, destPos, srcArray, srcPos) -> {
- final BigDecimal value = Js.cast(Js.asArrayLike(srcArray).getAt(srcPos));
- if (value == null) {
- Js.asArrayLike(destArray).setAt((int) destPos, null);
- } else {
- Js.asArrayLike(destArray).setAt((int) destPos, new BigDecimalWrapper(value));
- }
- };
- case "java.math.BigInteger":
- return (destArray, destPos, srcArray, srcPos) -> {
- final BigInteger value = Js.cast(Js.asArrayLike(srcArray).getAt(srcPos));
- if (value == null) {
- Js.asArrayLike(destArray).setAt((int) destPos, null);
- } else {
- Js.asArrayLike(destArray).setAt((int) destPos, new BigIntegerWrapper(value));
- }
- };
- case "java.time.LocalDate":
- return (destArray, destPos, srcArray, srcPos) -> {
- final LocalDate value = Js.cast(Js.asArrayLike(srcArray).getAt(srcPos));
- if (value == null) {
- Js.asArrayLike(destArray).setAt((int) destPos, null);
- } else {
- Js.asArrayLike(destArray).setAt((int) destPos, new LocalDateWrapper(value));
- }
- };
- case "java.time.LocalTime":
- return (destArray, destPos, srcArray, srcPos) -> {
- final LocalTime value = Js.cast(Js.asArrayLike(srcArray).getAt(srcPos));
- if (value == null) {
- Js.asArrayLike(destArray).setAt((int) destPos, null);
- } else {
- Js.asArrayLike(destArray).setAt((int) destPos, new LocalTimeWrapper(value));
- }
- };
- case "java.lang.Boolean":
- return (destArray, destPos, srcArray, srcPos) -> {
- final Any value = Js.asArrayLike(srcArray).getAtAsAny(srcPos);
-
- if (value == null) {
- Js.asArrayLike(destArray).setAt((int) destPos, null);
- } else if (value.asBoolean()) {
- Js.asArrayLike(destArray).setAt((int) destPos, true);
- } else {
- Js.asArrayLike(destArray).setAt((int) destPos, false);
- }
- };
- case "int":
- return (destArray, destPos, srcArray, srcPos) -> {
- final int value = Js.asArrayLike(srcArray).getAtAsAny(srcPos).asInt();
- if (value == QueryConstants.NULL_INT) {
- Js.asArrayLike(destArray).setAt((int) destPos, null);
- } else {
- Js.asArrayLike(destArray).setAt((int) destPos, value);
- }
- };
- case "byte":
- return (destArray, destPos, srcArray, srcPos) -> {
- final byte value = Js.asArrayLike(srcArray).getAtAsAny(srcPos).asByte();
- if (value == QueryConstants.NULL_BYTE) {
- Js.asArrayLike(destArray).setAt((int) destPos, null);
- } else {
- Js.asArrayLike(destArray).setAt((int) destPos, value);
- }
- };
- case "short":
- return (destArray, destPos, srcArray, srcPos) -> {
- final short value = Js.asArrayLike(srcArray).getAtAsAny(srcPos).asShort();
- if (value == QueryConstants.NULL_SHORT) {
- Js.asArrayLike(destArray).setAt((int) destPos, null);
- } else {
- Js.asArrayLike(destArray).setAt((int) destPos, value);
- }
- };
- case "double":
- return (destArray, destPos, srcArray, srcPos) -> {
- final double value = Js.asArrayLike(srcArray).getAtAsAny(srcPos).asDouble();
- if (value == QueryConstants.NULL_DOUBLE) {
- Js.asArrayLike(destArray).setAt((int) destPos, null);
- } else {
- Js.asArrayLike(destArray).setAt((int) destPos, value);
- }
- };
- case "float":
- return (destArray, destPos, srcArray, srcPos) -> {
- final float value = Js.asArrayLike(srcArray).getAtAsAny(srcPos).asFloat();
- if (value == QueryConstants.NULL_FLOAT) {
- Js.asArrayLike(destArray).setAt((int) destPos, null);
- } else {
- Js.asArrayLike(destArray).setAt((int) destPos, value);
- }
- };
- case "char":
- return (destArray, destPos, srcArray, srcPos) -> {
- final char value = Js.asArrayLike(srcArray).getAtAsAny(srcPos).asChar();
- if (value == QueryConstants.NULL_CHAR) {
- Js.asArrayLike(destArray).setAt((int) destPos, null);
- } else {
- Js.asArrayLike(destArray).setAt((int) destPos, value);
- }
- };
- default:
- // exit so we can handle null also in the method's final return
- }
- return (destArray, destPos, srcArray, srcPos) -> {
- // boring column or format data, just copy it
- Js.asArrayLike(destArray).setAt((int) destPos, Js.asArrayLike(srcArray).getAt(srcPos));
- };
- }
-
- private RangeSet freeRows(long required) {
- if (required == 0) {
- return RangeSet.empty();
- }
- long existingSlotsToReuse = reusableDestinations.size();
- if (existingSlotsToReuse > required) {
- // only take some of the ranges from the reusable list
- RangeSet reused = RangeSet.empty();
- long taken = 0;
- RangeSet stillUnused = RangeSet.empty();
- // TODO this could be more efficient, iterating entire ranges until we only need a partial range
- PrimitiveIterator.OfLong iterator = reusableDestinations.indexIterator();
- while (taken < required) {
- assert iterator.hasNext();
- long value = iterator.nextLong();
- reused.addRange(new Range(value, value));
- taken++;
- }
- assert taken == required;
- while (iterator.hasNext()) {
- long value = iterator.nextLong();
- stillUnused.addRange(new Range(value, value));
- }
- reusableDestinations = stillUnused;
- assert required == reused.size();
- return reused;
- }
- // take all ranges from the reusable list (plus make more if needed)
- RangeSet created = reusableDestinations;
- if (existingSlotsToReuse < required) {
- long nextIndex;
- if (created.isEmpty()) {
- if (index.isEmpty()) {
- nextIndex = 0;
- } else {
- nextIndex = redirectedIndexes.size();
- }
- } else if (index.isEmpty()) {
- nextIndex = created.getLastRow() + 1;
- } else {
- nextIndex = Math.max(created.getLastRow(), index.getLastRow()) + 1;
- }
- created.addRange(new Range(nextIndex, nextIndex + required - existingSlotsToReuse - 1));
- }
-
- reusableDestinations = RangeSet.empty();
- assert required == created.size();
- return created;
- }
-
- @TsInterface
- @TsName(namespace = "dh")
- public class SubscriptionRow implements TableData.Row {
- private final long index;
- public LongWrapper indexCached;
-
- public SubscriptionRow(long index) {
- this.index = index;
- }
-
- @Override
- public LongWrapper getIndex() {
- if (indexCached == null) {
- indexCached = LongWrapper.of(index);
- }
- return indexCached;
- }
-
- @Override
- public Any get(Column column) {
- int redirectedIndex = (int) (long) redirectedIndexes.get(this.index);
- JsArrayLike