Skip to content

Commit ac9fef2

Browse files
AzrenbethAzrenbeth
Azrenbeth
authored and
Azrenbeth
committed
All printing replaced by logging
This allows other packages that use the compressor to control what and how the libary prints.
1 parent 4b5ec6b commit ac9fef2

File tree

8 files changed

+62
-29
lines changed

8 files changed

+62
-29
lines changed

Cargo.lock

+2
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

Cargo.toml

+2
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,8 @@ postgres-openssl = "0.5.0"
1818
rand = "0.8.0"
1919
rayon = "1.3.0"
2020
string_cache = "0.8.0"
21+
env_logger = "0.9.0"
22+
log = "0.4.14"
2123

2224
[dependencies.state-map]
2325
git = "https://github.com/matrix-org/rust-matrix-state-map"

compressor_integration_tests/tests/auto_compressor_state_saving_tests.rs

+2-1
Original file line numberDiff line numberDiff line change
@@ -2,13 +2,14 @@ use auto_compressor::state_saving::{
22
connect_to_database, create_tables_if_needed, read_room_compressor_state,
33
write_room_compressor_state,
44
};
5-
use compressor_integration_tests::{clear_compressor_state, DB_URL};
5+
use compressor_integration_tests::{clear_compressor_state, setup_logger, DB_URL};
66
use serial_test::serial;
77
use synapse_compress_state::Level;
88

99
#[test]
1010
#[serial(db)]
1111
fn write_then_read_state_gives_correct_results() {
12+
setup_logger();
1213
let mut client = connect_to_database(DB_URL).unwrap();
1314
create_tables_if_needed(&mut client).unwrap();
1415
clear_compressor_state();

compressor_integration_tests/tests/compressor_config_tests.rs

+9-1
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ use compressor_integration_tests::{
77
compressed_3_3_from_0_to_13_with_state, line_segments_with_state, line_with_state,
88
structure_from_edges_with_state,
99
},
10-
DB_URL,
10+
setup_logger, DB_URL,
1111
};
1212
use serial_test::serial;
1313
use synapse_compress_state::{run, Config};
@@ -22,6 +22,7 @@ use synapse_compress_state::{run, Config};
2222
#[test]
2323
#[serial(db)]
2424
fn run_succeeds_without_crashing() {
25+
setup_logger();
2526
// This starts with the following structure
2627
//
2728
// 0-1-2-3-4-5-6-7-8-9-10-11-12-13
@@ -67,6 +68,7 @@ fn run_succeeds_without_crashing() {
6768
#[test]
6869
#[serial(db)]
6970
fn changes_commited_if_no_min_saved_rows() {
71+
setup_logger();
7072
// This starts with the following structure
7173
//
7274
// 0-1-2 3-4-5 6-7-8 9-10-11 12-13
@@ -132,6 +134,7 @@ fn changes_commited_if_no_min_saved_rows() {
132134
#[test]
133135
#[serial(db)]
134136
fn changes_commited_if_min_saved_rows_exceeded() {
137+
setup_logger();
135138
// This starts with the following structure
136139
//
137140
// 0-1-2 3-4-5 6-7-8 9-10-11 12-13
@@ -197,6 +200,7 @@ fn changes_commited_if_min_saved_rows_exceeded() {
197200
#[test]
198201
#[serial(db)]
199202
fn changes_not_commited_if_fewer_than_min_saved_rows() {
203+
setup_logger();
200204
// This starts with the following structure
201205
//
202206
// 0-1-2 3-4-5 6-7-8 9-10-11 12-13
@@ -263,6 +267,7 @@ fn changes_not_commited_if_fewer_than_min_saved_rows() {
263267
#[test]
264268
#[should_panic(expected = "Error connecting to the database:")]
265269
fn run_panics_if_invalid_db_url() {
270+
setup_logger();
266271
// set up the config options
267272
let db_url = "thisIsAnInvalidURL".to_string();
268273
let room_id = "room1".to_string();
@@ -298,6 +303,7 @@ fn run_panics_if_invalid_db_url() {
298303
#[test]
299304
#[serial(db)]
300305
fn run_only_affects_given_room_id() {
306+
setup_logger();
301307
// build room1 stuff up
302308
// This starts with the following structure
303309
//
@@ -374,6 +380,7 @@ fn run_only_affects_given_room_id() {
374380
#[test]
375381
#[serial(db)]
376382
fn run_respects_groups_to_compress() {
383+
setup_logger();
377384
// This starts with the following structure
378385
//
379386
// 0-1-2 3-4-5 6-7-8 9-10-11 12-13
@@ -456,6 +463,7 @@ fn run_respects_groups_to_compress() {
456463
#[test]
457464
#[serial(db)]
458465
fn run_is_idempotent_when_run_on_whole_room() {
466+
setup_logger();
459467
// This starts with the following structure
460468
//
461469
// 0-1-2 3-4-5 6-7-8 9-10-11 12-13

compressor_integration_tests/tests/compressor_continue_run_tests.rs

+2-1
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ use compressor_integration_tests::{
22
add_contents_to_database, database_collapsed_states_match_map, database_structure_matches_map,
33
empty_database,
44
map_builder::{compressed_3_3_from_0_to_13_with_state, line_segments_with_state},
5-
DB_URL,
5+
setup_logger, DB_URL,
66
};
77
use serial_test::serial;
88
use synapse_compress_state::{continue_run, Level};
@@ -13,6 +13,7 @@ use synapse_compress_state::{continue_run, Level};
1313
#[test]
1414
#[serial(db)]
1515
fn continue_run_called_twice_same_as_run() {
16+
setup_logger();
1617
// This starts with the following structure
1718
//
1819
// 0-1-2 3-4-5 6-7-8 9-10-11 12-13

src/database.rs

+5-6
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
// limitations under the License.
1414

1515
use indicatif::{ProgressBar, ProgressStyle};
16+
use log::{debug, trace};
1617
use openssl::ssl::{SslConnector, SslMethod, SslVerifyMode};
1718
use postgres::{fallible_iterator::FallibleIterator, types::ToSql, Client};
1819
use postgres_openssl::MakeTlsConnector;
@@ -40,7 +41,6 @@ use super::StateGroupEntry;
4041
/// * `max_state_group` - If specified, then only fetch the entries for state
4142
/// groups lower than or equal to this number.
4243
/// * 'groups_to_compress' - The number of groups to get from the database before stopping
43-
4444
pub fn get_data_from_db(
4545
db_url: &str,
4646
room_id: &str,
@@ -59,7 +59,6 @@ pub fn get_data_from_db(
5959
// Search for the group id of the groups_to_compress'th group after min_state_group
6060
// If this is saved, then the compressor can continue by having min_state_group being
6161
// set to this maximum. If no such group can be found then return None.
62-
6362
let max_group_found = find_max_group(
6463
&mut client,
6564
room_id,
@@ -221,7 +220,7 @@ fn load_map_from_db(
221220
max_group_found,
222221
));
223222

224-
println!("Got initial state from database. Checking for any missing state groups...");
223+
debug!("Got initial state from database. Checking for any missing state groups...");
225224

226225
// Due to reasons some of the state groups appear in the edges table, but
227226
// not in the state_groups_state table.
@@ -250,14 +249,14 @@ fn load_map_from_db(
250249
.collect();
251250

252251
if missing_sgs.is_empty() {
253-
// println!("No missing state groups");
252+
trace!("No missing state groups");
254253
break;
255254
}
256255

257256
missing_sgs.sort_unstable();
258257
missing_sgs.dedup();
259258

260-
// println!("Missing {} state groups", missing_sgs.len());
259+
trace!("Missing {} state groups", missing_sgs.len());
261260

262261
// find state groups not picked up already and add them to the map
263262
let map = get_missing_from_db(client, &missing_sgs, min_state_group, max_group_found);
@@ -531,7 +530,7 @@ pub fn send_changes_to_db(
531530

532531
let mut client = Client::connect(db_url, connector).unwrap();
533532

534-
println!("Writing changes...");
533+
debug!("Writing changes...");
535534

536535
// setup the progress bar
537536
let pb: ProgressBar;

src/lib.rs

+21-20
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
// of arguments - this hopefully doesn't make the code unclear
2121
// #[allow(clippy::too_many_arguments)] is therefore used around some functions
2222

23+
use log::{info, warn};
2324
use pyo3::{exceptions, prelude::*};
2425

2526
use clap::{crate_authors, crate_description, crate_name, crate_version, value_t, App, Arg};
@@ -290,7 +291,7 @@ impl Config {
290291
291292
pub fn run(mut config: Config) {
292293
// First we need to get the current state groups
293-
println!("Fetching state from DB for room '{}'...", config.room_id);
294+
info!("Fetching state from DB for room '{}'...", config.room_id);
294295

295296
let (state_group_map, max_group_found) = database::get_data_from_db(
296297
&config.db_url,
@@ -301,19 +302,19 @@ pub fn run(mut config: Config) {
301302
)
302303
.unwrap_or_else(|| panic!("No state groups found within this range"));
303304

304-
println!("Fetched state groups up to {}", max_group_found);
305+
info!("Fetched state groups up to {}", max_group_found);
305306

306-
println!("Number of state groups: {}", state_group_map.len());
307+
info!("Number of state groups: {}", state_group_map.len());
307308

308309
let original_summed_size = state_group_map
309310
.iter()
310311
.fold(0, |acc, (_, v)| acc + v.state_map.len());
311312

312-
println!("Number of rows in current table: {}", original_summed_size);
313+
info!("Number of rows in current table: {}", original_summed_size);
313314

314315
// Now we actually call the compression algorithm.
315316

316-
println!("Compressing state...");
317+
info!("Compressing state...");
317318

318319
let compressor = Compressor::compress(&state_group_map, &config.level_sizes.0);
319320

@@ -327,22 +328,22 @@ pub fn run(mut config: Config) {
327328

328329
let ratio = (compressed_summed_size as f64) / (original_summed_size as f64);
329330

330-
println!(
331+
info!(
331332
"Number of rows after compression: {} ({:.2}%)",
332333
compressed_summed_size,
333334
ratio * 100.
334335
);
335336

336-
println!("Compression Statistics:");
337-
println!(
337+
info!("Compression Statistics:");
338+
info!(
338339
" Number of forced resets due to lacking prev: {}",
339340
compressor.stats.resets_no_suitable_prev
340341
);
341-
println!(
342+
info!(
342343
" Number of compressed rows caused by the above: {}",
343344
compressor.stats.resets_no_suitable_prev_size
344345
);
345-
println!(
346+
info!(
346347
" Number of state groups changed: {}",
347348
compressor.stats.state_groups_changed
348349
);
@@ -352,14 +353,14 @@ pub fn run(mut config: Config) {
352353
}
353354

354355
if ratio > 1.0 {
355-
println!("This compression would not remove any rows. Exiting.");
356+
warn!("This compression would not remove any rows. Exiting.");
356357
return;
357358
}
358359

359360
if let Some(min) = config.min_saved_rows {
360361
let saving = (original_summed_size - compressed_summed_size) as i32;
361362
if saving < min {
362-
println!(
363+
warn!(
363364
"Only {} rows would be saved by this compression. Skipping output.",
364365
saving
365366
);
@@ -485,7 +486,7 @@ fn output_sql(
485486
return;
486487
}
487488

488-
println!("Writing changes...");
489+
info!("Writing changes...");
489490

490491
let pb: ProgressBar;
491492
if cfg!(feature = "no-progress-bars") {
@@ -559,7 +560,7 @@ pub fn continue_run(
559560
let ratio = (new_num_rows as f64) / (original_num_rows as f64);
560561

561562
if ratio > 1.0 {
562-
println!("This compression would not remove any rows. Aborting.");
563+
warn!("This compression would not remove any rows. Aborting.");
563564
return Some(ChunkStats {
564565
new_level_info: compressor.get_level_info(),
565566
last_compressed_group: max_group_found,
@@ -600,7 +601,7 @@ fn check_that_maps_match(
600601
old_map: &BTreeMap<i64, StateGroupEntry>,
601602
new_map: &BTreeMap<i64, StateGroupEntry>,
602603
) {
603-
println!("Checking that state maps match...");
604+
info!("Checking that state maps match...");
604605

605606
let pb: ProgressBar;
606607
if cfg!(feature = "no-progress-bars") {
@@ -625,10 +626,10 @@ fn check_that_maps_match(
625626
pb.inc(1);
626627

627628
if expected != actual {
628-
println!("State Group: {}", sg);
629-
println!("Expected: {:#?}", expected);
630-
println!("actual: {:#?}", actual);
631-
Err(format!("States for group {} do not match", sg))
629+
Err(format!(
630+
"States for group {} do not match. Expected {:#?}, found {:#?}",
631+
sg, expected, actual
632+
))
632633
} else {
633634
Ok(())
634635
}
@@ -637,7 +638,7 @@ fn check_that_maps_match(
637638

638639
pb.finish();
639640

640-
println!("New state map matches old one");
641+
info!("New state map matches old one");
641642
}
642643

643644
/// Gets the full state for a given group from the map (of deltas)

src/main.rs

+19
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,27 @@
2020
#[global_allocator]
2121
static GLOBAL: jemallocator::Jemalloc = jemallocator::Jemalloc;
2222

23+
use log::LevelFilter;
24+
use std::env;
25+
use std::io::Write;
26+
2327
use synapse_compress_state as comp_state;
2428

2529
fn main() {
30+
// setup the logger
31+
// The default can be overwritten with COMPRESSOR_LOG_LEVEL
32+
// see the README for more information <---- TODO
33+
if env::var("COMPRESSOR_LOG_LEVEL").is_err() {
34+
let mut log_builder = env_logger::builder();
35+
// Only output the log message (and not the prefixed timestamp etc.)
36+
log_builder.format(|buf, record| writeln!(buf, "{}", record.args()));
37+
// By default print all of the debugging messages from this library
38+
log_builder.filter_module("synapse_compress_state", LevelFilter::Debug);
39+
log_builder.init();
40+
} else {
41+
// If COMPRESSOR_LOG_LEVEL was set then use that
42+
env_logger::Builder::from_env("COMPRESSOR_LOG_LEVEL").init();
43+
}
44+
2645
comp_state::run(comp_state::Config::parse_arguments());
2746
}

0 commit comments

Comments
 (0)