@@ -136,9 +136,9 @@ def __db_connection(self):
136
136
137
137
return self .__db_conn
138
138
139
- def __execute (self , statement , * args ):
139
+ def __execute_dml (self , statement , * args ):
140
140
logger .debug (statement , args )
141
- c = self .__connection ()
141
+ c = self .__db_connection ()
142
142
# If timeout is set, then apply it to the connection. PyODBC will then assign that value to the Cursor created during execute()
143
143
if self .timeout :
144
144
c .timeout = self .timeout
@@ -149,18 +149,21 @@ def __execute(self, statement, *args):
149
149
pass
150
150
return cur
151
151
152
- def __db_execute (self , statement , * args ):
152
+ def __execute_ddl (self , statement , * args ):
153
153
logger .debug (statement , args )
154
154
c = self .__db_connection ()
155
155
# If timeout is set, then apply it to the connection. PyODBC will then assign that value to the Cursor created during execute()
156
156
if self .timeout :
157
157
c .timeout = self .timeout
158
- # If the SQL query causes multiple messages to come back (either extra row counts from triggers, or PRINT statements),
159
- # then we need to keep running nextset() for PyODBC to get the query to run to completion
160
- cur = c .execute (statement , * args )
161
- while cur .nextset ():
162
- pass
163
- return cur
158
+ return c .execute (statement , * args )
159
+
160
+ def __execute_server (self , statement , * args ):
161
+ logger .debug (statement , args )
162
+ c = self .__connection ()
163
+ # If timeout is set, then apply it to the connection. PyODBC will then assign that value to the Cursor created during execute()
164
+ if self .timeout :
165
+ c .timeout = self .timeout
166
+ return c .execute (statement , * args )
164
167
165
168
def __get_path (self , filepath ):
166
169
if "\\ " in filepath :
@@ -179,7 +182,7 @@ def __get_default_datafolder(self):
179
182
checking the model db seems like a good 'boring' solution
180
183
:return: Default data directory e.g. "C:\\ DATA"
181
184
"""
182
- datafile = self .__execute (
185
+ datafile = self .__execute_server (
183
186
"""
184
187
SELECT physical_name
185
188
FROM sys.master_files mf
@@ -197,7 +200,7 @@ def __get_default_logfolder(self):
197
200
__get_default_datafolder: see for more info
198
201
:return:
199
202
"""
200
- logfile = self .__execute (
203
+ logfile = self .__execute_server (
201
204
"""
202
205
SELECT physical_name
203
206
FROM sys.master_files mf
@@ -217,7 +220,7 @@ def __get_file_moves(self, input_path):
217
220
datadir = self .__get_default_datafolder ()
218
221
logdir = self .__get_default_logfolder ()
219
222
220
- filelist = self .__execute (
223
+ filelist = self .__execute_server (
221
224
f"RESTORE FILELISTONLY FROM DISK = ?;" , input_path
222
225
).fetchall ()
223
226
@@ -255,7 +258,7 @@ def __run_scripts(self, script_list, title=""):
255
258
256
259
for i , script in enumerate (script_list ):
257
260
logger .info (f'Running { title } script #{ i } "{ script [:50 ]} "' )
258
- cursor = self .__db_execute (script )
261
+ cursor = self .__execute_dml (script )
259
262
results = None
260
263
try :
261
264
results = cursor .fetchall ()
@@ -272,10 +275,10 @@ def __create_seed_table(self, qualifier_map):
272
275
SEED_TABLE_NAME , "," .join (seed_column_lines )
273
276
)
274
277
275
- self .__db_execute (create_statement )
278
+ self .__execute_ddl (create_statement )
276
279
277
280
def __drop_seed_table (self ):
278
- self .__db_execute ("DROP TABLE IF EXISTS [{}];" .format (SEED_TABLE_NAME ))
281
+ self .__execute_ddl ("DROP TABLE IF EXISTS [{}];" .format (SEED_TABLE_NAME ))
279
282
280
283
def __insert_seed_row (self , qualifier_map ):
281
284
column_list = "," .join (
@@ -289,7 +292,7 @@ def __insert_seed_row(self, qualifier_map):
289
292
statement = "INSERT INTO [{}]({}) VALUES ({});" .format (
290
293
SEED_TABLE_NAME , column_list , substitution_list
291
294
)
292
- self .__db_execute (statement , value_list )
295
+ self .__execute_dml (statement , value_list )
293
296
294
297
def __seed (self , qualifier_map ):
295
298
for i in self .progress (
@@ -322,10 +325,10 @@ def create_database(self):
322
325
323
326
def drop_database (self ):
324
327
# force connection close so we can always drop the db: sometimes timing makes a normal drop impossible.
325
- self .__execute (
328
+ self .__execute_server (
326
329
f"ALTER DATABASE [{ self .db_name } ] SET SINGLE_USER WITH ROLLBACK IMMEDIATE;"
327
330
)
328
- self .__execute (f"DROP DATABASE IF EXISTS [{ self .db_name } ];" )
331
+ self .__execute_server (f"DROP DATABASE IF EXISTS [{ self .db_name } ];" )
329
332
330
333
def anonymize_database (self , database_strategy , db_workers ):
331
334
qualifier_map = database_strategy .fake_update_qualifier_map
@@ -353,13 +356,13 @@ def anonymize_table(progressbar, table_strategy: TableStrategy):
353
356
354
357
if table_strategy .strategy_type == TableStrategyTypes .TRUNCATE :
355
358
progressbar .set_description ("Truncating {}" .format (table_name ))
356
- self .__db_execute (
359
+ self .__execute_dml (
357
360
"TRUNCATE TABLE {}[{}];" .format (schema_prefix , table_name )
358
361
)
359
362
360
363
elif table_strategy .strategy_type == TableStrategyTypes .DELETE :
361
364
progressbar .set_description ("Deleting {}" .format (table_name ))
362
- self .__db_execute (
365
+ self .__execute_dml (
363
366
"DELETE FROM {}[{}];" .format (schema_prefix , table_name )
364
367
)
365
368
@@ -396,7 +399,7 @@ def anonymize_table(progressbar, table_strategy: TableStrategy):
396
399
397
400
# set ansi warnings off because otherwise we run into lots of little incompatibilities between the seed data nd the columns
398
401
# e.g. string or binary data would be truncated (when the data is too long)
399
- self .__db_execute (
402
+ self .__execute_dml (
400
403
f"{ ansi_warnings_prefix } UPDATE { schema_prefix } [{ table_name } ] SET { column_assignments } { where_clause } ; { ansi_warnings_suffix } "
401
404
)
402
405
@@ -436,7 +439,7 @@ def restore_database(self, input_path):
436
439
move_clauses = ", " .join (["MOVE ? TO ?" ] * len (move_files ))
437
440
move_clause_params = [item for pair in move_files .items () for item in pair ]
438
441
439
- restore_cursor = self .__execute (
442
+ restore_cursor = self .__execute_server (
440
443
f"RESTORE DATABASE ? FROM DISK = ? WITH { move_clauses } , STATS = ?;" ,
441
444
[self .db_name , input_path , * move_clause_params , self .__STATS ],
442
445
)
@@ -452,7 +455,7 @@ def dump_database(self, output_path):
452
455
"," .join (with_options ) + ", " if len (with_options ) > 0 else ""
453
456
)
454
457
455
- dump_cursor = self .__execute (
458
+ dump_cursor = self .__execute_server (
456
459
f"BACKUP DATABASE ? TO DISK = ? WITH { with_options_str } STATS = ?;" ,
457
460
[self .db_name , output_path , self .__STATS ],
458
461
)
0 commit comments