-
Notifications
You must be signed in to change notification settings - Fork 5
Expand file tree
/
Copy pathapi2.py
More file actions
3624 lines (3146 loc) · 133 KB
/
api2.py
File metadata and controls
3624 lines (3146 loc) · 133 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import copy
import datetime
import io
import json
from functools import wraps
from typing import List
import cloudvolume
import nglui
import numpy as np
import pandas as pd
import pytz
import werkzeug
from cachetools import LRUCache, TTLCache, cached
from cachetools.keys import hashkey
from dynamicannotationdb.models import AnalysisTable, AnalysisVersion
from emannotationschemas.schemas.base import PostGISField, SegmentationField
from flask import Response, abort, current_app, g, request
from flask_accepts import accepts
from flask_restx import Namespace, Resource, inputs, reqparse
from geoalchemy2.types import Geometry
from marshmallow import fields as mm_fields
from middle_auth_client import (
auth_requires_permission,
)
from neuroglancer import viewer_state
from sqlalchemy.sql.sqltypes import Boolean, DateTime, Float, Integer, Numeric, String
from materializationengine.blueprints.client.common import (
generate_complex_query_dataframe,
generate_simple_query_dataframe,
get_analysis_version_and_table,
get_analysis_version_and_tables,
handle_complex_query,
handle_simple_query,
sql_query_warning,
validate_table_args,
)
from materializationengine.blueprints.client.cache import get_cached_view_metadata
from materializationengine.blueprints.client.common import (
unhandled_exception as common_unhandled_exception,
)
from materializationengine.blueprints.client.datastack import validate_datastack
from materializationengine.blueprints.client.new_query import (
remap_query,
strip_root_id_filters,
update_rootids,
)
from materializationengine.blueprints.client.precomputed import AnnotationWriter
from materializationengine.blueprints.client.query_manager import QueryManager
from materializationengine.blueprints.client.schemas import (
AnalysisViewSchema,
ComplexQuerySchema,
SimpleQuerySchema,
V2QuerySchema,
)
from materializationengine.blueprints.client.utils import (
after_request,
collect_crud_columns,
create_query_response,
get_latest_version,
update_notice_text_warnings,
)
from materializationengine.blueprints.reset_auth import reset_auth
from materializationengine.chunkedgraph_gateway import chunkedgraph_cache
from materializationengine.database import db_manager, dynamic_annotation_cache
from materializationengine.info_client import get_aligned_volumes, get_datastack_info
from materializationengine.limiter import limit_by_category
from materializationengine.models import MaterializedMetadata
from materializationengine.request_db import request_db_session
from materializationengine.schemas import AnalysisTableSchema, AnalysisVersionSchema
from materializationengine.utils import check_read_permission
__version__ = "5.13.5"
authorizations = {
"apikey": {"type": "apiKey", "in": "query", "name": "middle_auth_token"}
}
client_bp = Namespace(
"Materialization Client2",
authorizations=authorizations,
description="Materialization Client",
)
@client_bp.errorhandler(werkzeug.exceptions.BadRequest)
def bad_request_exception(e):
raise e
@client_bp.errorhandler(Exception)
def unhandled_exception(e):
return common_unhandled_exception(e)
annotation_parser = reqparse.RequestParser()
annotation_parser.add_argument(
"annotation_ids", type=int, action="split", help="list of annotation ids"
)
annotation_parser.add_argument(
"pcg_table_name", type=str, help="name of pcg segmentation table"
)
def _get_float(value):
try:
return float(value)
except (TypeError, ValueError):
raise ValueError(f"{value} is not a valid float")
class float_range(object):
"""Restrict input to an float in a range (inclusive)"""
def __init__(self, low, high, argument="argument"):
self.low = low
self.high = high
self.argument = argument
def __call__(self, value):
value = _get_float(value)
if value < self.low or value > self.high:
msg = "Invalid {arg}: {val}. {arg} must be within the range {lo} - {hi}"
raise ValueError(
msg.format(arg=self.argument, val=value, lo=self.low, hi=self.high)
)
return value
@property
def __schema__(self):
return {
"type": "integer",
"minimum": self.low,
"maximum": self.high,
}
query_parser = reqparse.RequestParser()
query_parser.add_argument(
"return_pyarrow",
type=inputs.boolean,
default=True,
required=False,
location="args",
help=(
"whether to return query in pyarrow compatible binary format"
"(faster), false returns json"
),
)
query_parser.add_argument(
"arrow_format",
type=inputs.boolean,
default=False,
required=False,
location="args",
help=("whether to convert dataframe to pyarrow ipc batch format"),
)
query_parser.add_argument(
"random_sample",
type=inputs.positive,
default=None,
required=False,
location="args",
help="How many samples to randomly get using tablesample on annotation tables, useful for visualization of large tables does not work as a random sample of query",
)
query_parser.add_argument(
"split_positions",
type=inputs.boolean,
default=False,
required=False,
location="args",
help=("whether to return position columns" "as seperate x,y,z columns (faster)"),
)
query_parser.add_argument(
"count",
type=inputs.boolean,
default=False,
required=False,
location="args",
help="whether to only return the count of a query",
)
query_parser.add_argument(
"allow_missing_lookups",
type=inputs.boolean,
default=False,
required=False,
location="args",
help="whether to return annotation results when there\
are new annotations that exist but haven't yet had supervoxel and \
rootId lookups. A warning will still be returned, but no 406 error thrown.",
)
query_parser.add_argument(
"allow_invalid_root_ids",
type=inputs.boolean,
default=False,
required=False,
location="args",
help="whether to let a query proceed when passed a set of root ids\
that are not valid at the timestamp that is queried. If True the filter will likely \
not be relevant and the user might not be getting data back that they expect, but it will not error.",
)
query_parser.add_argument(
"ipc_compress",
type=inputs.boolean,
default=True,
required=False,
location="args",
help="whether to have arrow compress the result when using \
return_pyarrow=True and arrow_format=True. \
If False, the result will not have it's internal data\
compressed (note that the entire response \
will be gzip compressed if accept-enconding includes gzip). \
If True, accept-encoding will determine what \
internal compression is used",
)
query_parser.add_argument(
"direct_sql_pandas",
type=inputs.boolean,
default=False,
required=False,
location="args",
help="whether to use direct SQL queries with pandas, \
if False it will fall back to the csv streaming method. \
which is prone to mangling types. \
CAVEclient>=8.0.0 should set this to True",
)
query_seg_prop_parser = reqparse.RequestParser()
# add an argument for a string controlling the label format
query_seg_prop_parser.add_argument(
"label_format",
type=str,
default=None,
required=False,
location="args",
help="string controlling the label format, should be formatted like a python format string,\
i.e. {cell_type}_{id}, utilizing the columns available in the response",
)
# add an argument which is a list of column strings
query_seg_prop_parser.add_argument(
"label_columns",
type=str,
action="split",
default=None,
required=False,
location="args",
help="list of column names include in a label (will be overridden by label_format)",
)
metadata_parser = reqparse.RequestParser()
# add a boolean argument for whether to return all expired versions
metadata_parser.add_argument(
"expired",
type=inputs.boolean,
default=False,
required=False,
location="args",
help="whether to return all expired versions",
)
def fix_dataframe_types(df):
"""Fix dataframe types for nglui.
Args:
df (pd.DataFrame): dataframe to fix types for.
Returns:
pd.DataFrame: dataframe with fixed types."""
for colname in df.columns:
if df[colname].isnull().all():
df.drop(columns=[colname], inplace=True)
continue
if pd.api.types.is_float_dtype(df[colname]):
df[colname]=df[colname].astype(np.float32)
if pd.api.types.is_integer_dtype(df[colname]):
df[colname]=df[colname].astype(int)
if pd.api.types.is_string_dtype(df[colname]):
df[colname]=df[colname].astype(str)
return df
@cached(cache=TTLCache(maxsize=64, ttl=600))
def get_relevant_datastack_info(datastack_name):
ds_info = get_datastack_info(datastack_name=datastack_name)
seg_source = ds_info["segmentation_source"]
pcg_table_name = seg_source.split("/")[-1]
aligned_volume_name = ds_info["aligned_volume"]["name"]
return aligned_volume_name, pcg_table_name
def check_aligned_volume(aligned_volume):
aligned_volumes = get_aligned_volumes()
if aligned_volume not in aligned_volumes:
abort(400, f"aligned volume: {aligned_volume} not valid")
def get_closest_versions(datastack_name: str, timestamp: datetime.datetime):
avn, _ = get_relevant_datastack_info(datastack_name)
analysis_version_schema = AnalysisVersionSchema() # Instantiate the schema
with db_manager.session_scope(avn) as session:
# query analysis versions to get a valid version which is
# the closest to the timestamp while still being older
# than the timestamp
past_version = (
session.query(AnalysisVersion)
.filter(AnalysisVersion.datastack == datastack_name)
.filter(AnalysisVersion.valid == True)
.filter(AnalysisVersion.time_stamp < timestamp)
.order_by(AnalysisVersion.time_stamp.desc())
.first()
)
if past_version:
past_v_data = analysis_version_schema.dump(past_version)
else:
past_v_data = None
# query analysis versions to get a valid version which is
# the closest to the timestamp while still being newer
# than the timestamp
future_version = (
session.query(AnalysisVersion)
.filter(AnalysisVersion.datastack == datastack_name)
.filter(AnalysisVersion.valid == True)
.filter(AnalysisVersion.time_stamp > timestamp)
.order_by(AnalysisVersion.time_stamp.asc())
.first()
)
if future_version:
future_v_data = analysis_version_schema.dump(future_version)
else:
future_v_data = None
return past_v_data, future_v_data, avn
def check_column_for_root_id(col):
if isinstance(col, str):
if col.endswith("root_id"):
abort(400, "we are not presently supporting joins on root_ids")
elif isinstance(col, list):
for c in col:
if c.endwith("root_id"):
abort(400, "we are not presently supporting joins on root ids")
def check_joins(joins):
for join in joins:
check_column_for_root_id(join[1])
check_column_for_root_id(join[3])
def execute_materialized_query(
datastack: str,
aligned_volume: str,
mat_version: int,
pcg_table_name: str,
user_data: dict,
query_map: dict,
cg_client,
random_sample: int = None,
split_mode: bool = False,
direct_sql_pandas: bool = False,
) -> pd.DataFrame:
"""_summary_
Args:
datastack (str): datastack to query on
mat_version (int): verison to query on
user_data (dict): dictionary of query payload including filters
query_map (dict): mapping of model column names to dataframe column names
cg_client: chunkedgraph client to use for root id lookups
random_sample (int, optional): number of random samples to get using TABLESAMPLE. Defaults to None.
split_mode (bool, optional): whether to use split mode for the query. Defaults to False.
direct_sql_pandas (bool, optional): whether to use pandas for the query. Defaults to False.
Returns:
pd.DataFrame: a dataframe with the results of the query in the materialized version
dict[dict]: a dictionary of table names, with values that are a dictionary
that has keys of model column names, and values of their name in the dataframe with suffixes added
if necessary to disambiguate.
"""
mat_db_name = f"{datastack}__mat{mat_version}"
with db_manager.session_scope(mat_db_name) as session:
mat_row_count = (
session.query(MaterializedMetadata.row_count)
.filter(MaterializedMetadata.table_name == user_data["table"])
.scalar()
)
# Validate random_sample to prevent TABLESAMPLE errors
if random_sample is not None:
if not np.isfinite(random_sample) or random_sample <= 0:
print(f"WARNING: Invalid random_sample: {random_sample}, setting to None")
random_sample = None
elif mat_row_count <= 0:
print(f"WARNING: Invalid mat_row_count: {mat_row_count}, setting random_sample to None")
random_sample = None
elif random_sample >= mat_row_count:
random_sample = None
else:
percentage = (100.0 * random_sample) / mat_row_count
if not np.isfinite(percentage) or percentage <= 0:
print(f"WARNING: Invalid percentage calculation: {percentage}, setting random_sample to None")
random_sample = None
elif percentage > 100.0:
print(f"WARNING: Percentage > 100%: {percentage}, setting random_sample to None")
random_sample = None
else:
random_sample = percentage
if mat_row_count:
# Decide between TABLESAMPLE and hash-based sampling based on sample size
hash_config = user_data.get("hash_sampling_config")
use_hash_sampling = False
use_random_sample = random_sample
if hash_config and hash_config.get("enabled"):
# Use QUERY_LIMIT_SIZE from Flask config instead of parameter
max_points = current_app.config.get("PRECOMPUTED_OVERVIEW_MAX_SIZE", 50000)
# Get configurable threshold for switching from TABLESAMPLE to hash sampling
hash_sampling_threshold = current_app.config.get("HASH_SAMPLING_THRESHOLD_PERCENT", 5.0)
volume_fraction = hash_config.get("volume_fraction", 1.0)
# Validate volume_fraction to prevent invalid calculations
if not np.isfinite(volume_fraction) or volume_fraction <= 0:
print(f"WARNING: Invalid volume_fraction in hash_config: {volume_fraction}, using 1.0")
volume_fraction = 1.0
elif volume_fraction > 1.0:
print(f"WARNING: volume_fraction > 1.0 in hash_config: {volume_fraction}, capping at 1.0")
volume_fraction = 1.0
# Calculate what percentage of the table we need to sample
if mat_row_count > 0 and volume_fraction > 0:
sample_percentage = (max_points * 100.0) / (mat_row_count * volume_fraction)
else:
print(f"WARNING: Invalid values for percentage calculation: mat_row_count={mat_row_count}, volume_fraction={volume_fraction}")
sample_percentage = 100.0 # Fallback to no sampling
if sample_percentage >= 100.0: # Table is small enough - show all points
# No sampling needed, table has fewer rows than QUERY_LIMIT_SIZE
use_hash_sampling = False
use_random_sample = None
elif sample_percentage < hash_sampling_threshold: # Less than threshold - use TABLESAMPLE
# Calculate percentage needed (with some buffer to account for randomness)
use_random_sample = sample_percentage
# Validate that use_random_sample is valid for TABLESAMPLE
if not np.isfinite(use_random_sample) or use_random_sample <= 0:
print(f"WARNING: Invalid use_random_sample: {use_random_sample}, switching to hash sampling")
use_hash_sampling = True
use_random_sample = None
elif use_random_sample > 100.0:
print(f"WARNING: use_random_sample > 100%: {use_random_sample}, capping at 100%")
use_random_sample = 100.0
use_hash_sampling = False
else:
use_hash_sampling = False
else: # Threshold to 100% of table - use hash-based sampling
use_hash_sampling = True
use_random_sample = None # Don't use TABLESAMPLE when using hash sampling
# setup a query manager
qm = QueryManager(
mat_db_name,
segmentation_source=pcg_table_name,
meta_db_name=aligned_volume,
split_mode=split_mode,
random_sample=use_random_sample,
direct_sql_pandas=direct_sql_pandas
)
qm.configure_query(user_data)
qm.apply_filter({user_data["table"]: {"valid": True}}, qm.apply_equal_filter)
# Apply hash-based sampling if determined above
if use_hash_sampling:
qm.add_hash_spatial_sampling(
table_name=hash_config["table_name"],
spatial_column=hash_config["spatial_column"],
max_points=max_points, # Use the max_points from QUERY_LIMIT_SIZE
total_row_count=mat_row_count
)
# return the result
df, column_names = qm.execute_query(
desired_resolution=user_data["desired_resolution"]
)
df, warnings = update_rootids(df, user_data["timestamp"], query_map, cg_client)
if "limit" in user_data:
if len(df) >= user_data["limit"]:
warnings.append(
f"result has {len(df)} entries, which is equal or more \
than limit of {user_data['limit']} there may be more results which are not shown"
)
if not direct_sql_pandas:
warnings.append(sql_query_warning)
return df, column_names, warnings
else:
return None, {}, []
def execute_production_query(
aligned_volume_name: str,
segmentation_source: str,
user_data: dict,
chosen_timestamp: datetime.datetime,
cg_client,
allow_missing_lookups: bool = False,
direct_sql_pandas: bool = False,
) -> pd.DataFrame:
"""_summary_
Args:
datastack (str): _description_
user_data (dict): _description_
timestamp_start (datetime.datetime): _description_
timestamp_end (datetime.datetime): _description_
Returns:
pd.DataFrame: dataframe of query
dict: _map of table name to column name mappings
dict: list of warnings
"""
user_timestamp = user_data["timestamp"]
if chosen_timestamp < user_timestamp:
query_forward = True
start_time = chosen_timestamp
end_time = user_timestamp
elif chosen_timestamp > user_timestamp:
query_forward = False
start_time = user_timestamp
end_time = chosen_timestamp
else:
abort(400, "do not use live live query to query a materialized timestamp")
# setup a query manager on production database with split tables
qm = QueryManager(
aligned_volume_name,
segmentation_source,
split_mode=True,
split_mode_outer=True,
direct_sql_pandas=direct_sql_pandas
)
user_data_modified = strip_root_id_filters(user_data)
qm.configure_query(user_data_modified)
qm.select_column(user_data["table"], "created")
qm.select_column(user_data["table"], "deleted")
qm.select_column(user_data["table"], "superceded_id")
qm.apply_table_crud_filter(user_data["table"], start_time, end_time)
df, column_names = qm.execute_query(
desired_resolution=user_data["desired_resolution"]
)
df, warnings = update_rootids(
df, user_timestamp, {}, cg_client, allow_missing_lookups
)
if "limit" in user_data:
if len(df) >= user_data["limit"]:
warnings.append(
f"result has {len(df)} entries, which is equal or more \
than limit of {user_data['limit']} there may be more results which are not shown"
)
return df, column_names, warnings
def apply_filters(df, user_data, column_names):
filter_in_dict = user_data.get("filter_in_dict", None)
filter_out_dict = user_data.get("filter_out_dict", None)
filter_equal_dict = user_data.get("filter_equal_dict", None)
filter_greater_dict = user_data.get("filter_greater_dict", None)
filter_less_dict = user_data.get("filter_less_dict", None)
filter_greater_equal_dict = user_data.get("filter_greater_equal_dict", None)
filter_less_equal_dict = user_data.get("filter_less_equal_dict", None)
if filter_in_dict:
for table, filter in filter_in_dict.items():
for col, val in filter.items():
colname = column_names[table][col]
df = df[df[colname].isin(val)]
if filter_out_dict:
for table, filter in filter_in_dict.items():
for col, val in filter.items():
colname = column_names[table][col]
df = df[~df[colname].isin(val)]
if filter_equal_dict:
for table, filter in filter_equal_dict.items():
for col, val in filter.items():
colname = column_names[table][col]
df = df[df[colname] == val]
if filter_greater_dict:
for table, filter in filter_greater_dict.items():
for col, val in filter.items():
colname = column_names[table][col]
df = df[df[colname] > val]
if filter_less_dict:
for table, filter in filter_less_dict.items():
for col, val in filter.items():
colname = column_names[table][col]
df = df[df[colname] < val]
if filter_greater_equal_dict:
for table, filter in filter_greater_equal_dict.items():
for col, val in filter.items():
colname = column_names[table][col]
df = df[df[colname] >= val]
if filter_less_equal_dict:
for table, filter in filter_less_equal_dict.items():
for col, val in filter.items():
colname = column_names[table][col]
df = df[df[colname] <= val]
return df
def combine_queries(
mat_df: pd.DataFrame,
prod_df: pd.DataFrame,
chosen_version: AnalysisVersion,
user_data: dict,
column_names: dict,
column_names_prod: dict,
) -> pd.DataFrame:
"""combine a materialized query with an production query
will remove deleted rows from materialized query,
strip deleted entries from prod_df remove any CRUD columns
and then append the two dataframes together to be a coherent
result.
Args:
mat_df (pd.DataFrame): _description_
prod_df (pd.DataFrame): _description_
user_data (dict): _description_
Returns:
pd.DataFrame: _description_
"""
crud_columns, created_columns = collect_crud_columns(column_names=column_names_prod)
if mat_df is not None:
if len(mat_df) == 0:
if prod_df is None:
return mat_df.drop(columns=crud_columns, axis=1, errors="ignore")
else:
mat_df = None
user_timestamp = user_data["timestamp"]
chosen_timestamp = pytz.utc.localize(chosen_version.time_stamp)
table = user_data["table"]
if mat_df is not None:
mat_df = mat_df.set_index(column_names[table]["id"])
if prod_df is not None:
prod_df = prod_df.set_index(column_names[table]["id"])
if (prod_df is None) and (mat_df is None):
abort(400, f"This query on table {user_data['table']} returned no results")
# if there is nothing to combine, just return the prod table to reflect
# schema with no rows
if (mat_df is None) and len(prod_df)==0:
cut_prod_df = prod_df.drop(crud_columns, axis=1,errors="ignore")
if len(created_columns) > 0:
cut_prod_df = cut_prod_df.drop(created_columns, axis=1,errors="ignore")
return cut_prod_df.reset_index()
if prod_df is not None:
# if we are moving forward in time
if chosen_timestamp < user_timestamp:
deleted_between = (
prod_df[column_names_prod[table]["deleted"]] > chosen_timestamp
) & (prod_df[column_names_prod[table]["deleted"]] < user_timestamp)
created_between = (
prod_df[column_names_prod[table]["created"]] > chosen_timestamp
) & (prod_df[column_names_prod[table]["created"]] < user_timestamp)
to_delete_in_mat = deleted_between & ~created_between
to_add_in_mat = created_between & ~deleted_between
if len(prod_df[deleted_between].index) > 0:
cut_prod_df = prod_df.drop(prod_df[deleted_between].index, axis=0)
else:
cut_prod_df = prod_df
else:
deleted_between = (
prod_df[column_names_prod[table]["deleted"]] > user_timestamp
) & (prod_df[column_names_prod[table]["deleted"]] < chosen_timestamp)
created_between = (
prod_df[column_names_prod[table]["created"]] > user_timestamp
) & (prod_df[column_names_prod[table]["created"]] < chosen_timestamp)
to_delete_in_mat = created_between & ~deleted_between
to_add_in_mat = deleted_between & ~created_between
if len(prod_df[created_between].index) > 0:
cut_prod_df = prod_df.drop(prod_df[created_between].index, axis=0)
else:
cut_prod_df = prod_df
# # delete those rows from materialized dataframe
cut_prod_df = cut_prod_df.drop(crud_columns, axis=1)
if mat_df is not None:
created_columns = [c for c in created_columns if c not in mat_df]
if len(created_columns) > 0:
cut_prod_df = cut_prod_df.drop(created_columns, axis=1)
if len(prod_df[to_delete_in_mat].index) > 0:
mat_df = mat_df.drop(
prod_df[to_delete_in_mat].index, axis=0, errors="ignore"
)
comb_df = pd.concat([cut_prod_df, mat_df])
else:
comb_df = prod_df[to_add_in_mat].drop(
columns=crud_columns, axis=1, errors="ignore"
)
else:
comb_df = mat_df.drop(columns=crud_columns, axis=1, errors="ignore")
return comb_df.reset_index()
@client_bp.expect(metadata_parser)
@client_bp.route("/datastack/<string:datastack_name>/versions")
class DatastackVersions(Resource):
method_decorators = [
limit_by_category("fast_query"),
auth_requires_permission("view", table_arg="datastack_name"),
reset_auth,
]
@client_bp.doc("datastack_versions", security="apikey")
def get(self, datastack_name: str):
"""get available versions
Args:
datastack_name (str): datastack name
Returns:
list(int): list of versions that are available
"""
aligned_volume_name, pcg_table_name = get_relevant_datastack_info(
datastack_name
)
with db_manager.session_scope(aligned_volume_name) as session:
query = session.query(AnalysisVersion.version).filter(
AnalysisVersion.datastack == datastack_name
)
args = metadata_parser.parse_args()
if not args.get("expired"):
query = query.filter(AnalysisVersion.valid == True)
version_tuples = query.all()
versions = [v[0] for v in version_tuples]
return versions, 200
@client_bp.route(
"/datastack/<string:datastack_name>/version/<int(signed=True):version>"
)
class DatastackVersion(Resource):
method_decorators = [
limit_by_category("fast_query"),
auth_requires_permission("view", table_arg="datastack_name"),
reset_auth,
]
@client_bp.doc("version metadata", security="apikey")
def get(self, datastack_name: str, version: int):
"""get version metadata
Args:
datastack_name (str): datastack name
version (int): version number
Returns:
dict: metadata dictionary for this version
"""
aligned_volume_name, pcg_table_name = get_relevant_datastack_info(
datastack_name
)
with db_manager.session_scope(aligned_volume_name) as session:
analysis_version_obj = (
session.query(AnalysisVersion)
.filter(AnalysisVersion.datastack == datastack_name)
.filter(AnalysisVersion.version == version)
.first()
)
if analysis_version_obj is None:
return "No version found", 404
schema = AnalysisVersionSchema()
result = schema.dump(analysis_version_obj)
return result, 200
@client_bp.route(
"/datastack/<string:datastack_name>/version/<int(signed=True):version>/table/<string:table_name>/count"
)
class FrozenTableCount(Resource):
method_decorators = [
validate_datastack,
limit_by_category("fast_query"),
auth_requires_permission("view", table_arg="datastack_name"),
reset_auth,
]
@client_bp.doc("table count", security="apikey")
def get(
self,
datastack_name: str,
version: int,
table_name: str,
target_datastack: str = None,
target_version: int = None,
):
"""get annotation count in table
Args:
datastack_name (str): datastack name of table
version (int): version of table
table_name (str): table name
Returns:
int: number of rows in this table
"""
validate_table_args([table_name], target_datastack, target_version)
db_name = f"{datastack_name}__mat{version}"
# if the database is a split database get a split model
# and if its not get a flat model
with db_manager.session_scope(db_name) as session:
mat_row_count = (
session.query(MaterializedMetadata.row_count)
.filter(MaterializedMetadata.table_name == table_name)
.scalar()
)
return mat_row_count, 200
class CustomResource(Resource):
@staticmethod
def apply_decorators(*decorators):
def wrapper(func):
for decorator in reversed(decorators):
func = decorator(func)
return func
return wrapper
@client_bp.expect(metadata_parser)
@client_bp.route("/datastack/<string:datastack_name>/metadata", strict_slashes=False)
class DatastackMetadata(Resource):
method_decorators = [
limit_by_category("fast_query"),
auth_requires_permission("view", table_arg="datastack_name"),
reset_auth,
]
@client_bp.doc("all valid version metadata", security="apikey")
def get(self, datastack_name: str):
"""get materialized metadata for all valid versions
Args:
datastack_name (str): datastack name
Returns:
list: list of metadata dictionaries
"""
aligned_volume_name, pcg_table_name = get_relevant_datastack_info(
datastack_name
)
with db_manager.session_scope(aligned_volume_name) as session:
query = session.query(AnalysisVersion).filter(
AnalysisVersion.datastack == datastack_name
)
args = metadata_parser.parse_args()
if not args.get("expired"):
query = query.filter(AnalysisVersion.valid == True)
analysis_versions = query.all()
if not analysis_versions:
return "No valid versions found", 404
schema = AnalysisVersionSchema()
result = schema.dump(analysis_versions, many=True)
return result, 200
@client_bp.route(
"/datastack/<string:datastack_name>/version/<int(signed=True):version>/tables"
)
class FrozenTableVersions(Resource):
method_decorators = [
limit_by_category("fast_query"),
auth_requires_permission("view", table_arg="datastack_name"),
reset_auth,
]
@client_bp.doc("get_frozen_tables", security="apikey")
def get(self, datastack_name: str, version: int):
"""get frozen tables
Args:
datastack_name (str): datastack name
version (int): version number
Returns:
list(str): list of frozen tables in this version
"""
aligned_volume_name, pcg_table_name = get_relevant_datastack_info(
datastack_name
)
with db_manager.session_scope(aligned_volume_name) as session:
av = (
session.query(AnalysisVersion)
.filter(AnalysisVersion.version == version)
.filter(AnalysisVersion.datastack == datastack_name)
.first()
)
if av is None:
return None, 404
response = (
session.query(AnalysisTable.table_name)
.filter(AnalysisTable.analysisversion_id == av.id)
.filter(AnalysisTable.valid == True)
.all()
)
table_names = [r[0] for r in response]
if not table_names:
return None, 404
return table_names, 200
@client_bp.route(
"/datastack/<string:datastack_name>/version/<int(signed=True):version>/tables/metadata"
)
class FrozenTablesMetadata(Resource):
method_decorators = [
validate_datastack,
limit_by_category("fast_query"),
auth_requires_permission("view", table_arg="datastack_name"),
reset_auth,
]
@client_bp.doc("get_frozen_tables_metadata", security="apikey")
def get(
self,
datastack_name: str,
version: int,
target_datastack: str = None,
target_version: int = None,
):
"""get frozen tables metadata
Args:
datastack_name (str): datastack name
version (int): version number
Returns:
dict: dictionary of table metadata
"""
aligned_volume_name, pcg_table_name = get_relevant_datastack_info(
target_datastack
)
analysis_version, analysis_tables = get_analysis_version_and_tables(
target_datastack, target_version, aligned_volume_name
)
if not analysis_tables:
return [], 404
with request_db_session(aligned_volume_name) as db:
for table in analysis_tables:
table_name = table["table_name"]
ann_md = db.database.get_table_metadata(table_name)
# the get_table_metadata function joins on the segmentationmetadata which
# has the segmentation_table in the table_name and the annotation table name in the annotation_table
# field. So when we update here, we overwrite the table_name with the segmentation table name,
# which was not the intent of the API.
ann_table = ann_md.pop("annotation_table", None)
if ann_table:
ann_md["table_name"] = ann_table
ann_md.pop("id")
ann_md.pop("deleted")
table.update(ann_md)
return analysis_tables, 200
@client_bp.route(
"/datastack/<string:datastack_name>/version/<int(signed=True):version>/table/<string:table_name>/metadata"
)
class FrozenTableMetadata(Resource):
method_decorators = [
validate_datastack,
limit_by_category("fast_query"),
auth_requires_permission("view", table_arg="datastack_name"),
reset_auth,