Skip to content

Replace printing with logging #67

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 2 additions & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@ postgres-openssl = "0.5.0"
rand = "0.8.0"
rayon = "1.3.0"
string_cache = "0.8.0"
env_logger = "0.9.0"
log = "0.4.14"

[dependencies.state-map]
git = "https://github.com/matrix-org/rust-matrix-state-map"
Expand Down
8 changes: 4 additions & 4 deletions compressor_integration_tests/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -357,9 +357,9 @@ fn functions_are_self_consistent() {

pub fn setup_logger() {
// setup the logger for the auto_compressor
// The default can be overwritten with COMPRESSOR_LOG_LEVEL
// The default can be overwritten with RUST_LOG
// see the README for more information <--- TODO
if env::var("COMPRESSOR_LOG_LEVEL").is_err() {
if env::var("RUST_LOG").is_err() {
let mut log_builder = env_logger::builder();
// set is_test(true) so that the output is hidden by cargo test (unless the test fails)
log_builder.is_test(true);
Expand All @@ -370,8 +370,8 @@ pub fn setup_logger() {
// use try_init() incase the logger has been setup by some previous test
let _ = log_builder.try_init();
} else {
// If COMPRESSOR_LOG_LEVEL was set then use that
let mut log_builder = env_logger::Builder::from_env("COMPRESSOR_LOG_LEVEL");
// If RUST_LOG was set then use that
let mut log_builder = env_logger::Builder::from_env("RUST_LOG");
// set is_test(true) so that the output is hidden by cargo test (unless the test fails)
log_builder.is_test(true);
// use try_init() in case the logger has been setup by some previous test
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,14 @@ use auto_compressor::state_saving::{
connect_to_database, create_tables_if_needed, read_room_compressor_state,
write_room_compressor_state,
};
use compressor_integration_tests::{clear_compressor_state, DB_URL};
use compressor_integration_tests::{clear_compressor_state, setup_logger, DB_URL};
use serial_test::serial;
use synapse_compress_state::Level;

#[test]
#[serial(db)]
fn write_then_read_state_gives_correct_results() {
setup_logger();
let mut client = connect_to_database(DB_URL).unwrap();
create_tables_if_needed(&mut client).unwrap();
clear_compressor_state();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ use compressor_integration_tests::{
compressed_3_3_from_0_to_13_with_state, line_segments_with_state, line_with_state,
structure_from_edges_with_state,
},
DB_URL,
setup_logger, DB_URL,
};
use serial_test::serial;
use synapse_compress_state::{run, Config};
Expand All @@ -22,6 +22,7 @@ use synapse_compress_state::{run, Config};
#[test]
#[serial(db)]
fn run_succeeds_without_crashing() {
setup_logger();
// This starts with the following structure
//
// 0-1-2-3-4-5-6-7-8-9-10-11-12-13
Expand Down Expand Up @@ -67,6 +68,7 @@ fn run_succeeds_without_crashing() {
#[test]
#[serial(db)]
fn changes_commited_if_no_min_saved_rows() {
setup_logger();
// This starts with the following structure
//
// 0-1-2 3-4-5 6-7-8 9-10-11 12-13
Expand Down Expand Up @@ -132,6 +134,7 @@ fn changes_commited_if_no_min_saved_rows() {
#[test]
#[serial(db)]
fn changes_commited_if_min_saved_rows_exceeded() {
setup_logger();
// This starts with the following structure
//
// 0-1-2 3-4-5 6-7-8 9-10-11 12-13
Expand Down Expand Up @@ -197,6 +200,7 @@ fn changes_commited_if_min_saved_rows_exceeded() {
#[test]
#[serial(db)]
fn changes_not_commited_if_fewer_than_min_saved_rows() {
setup_logger();
// This starts with the following structure
//
// 0-1-2 3-4-5 6-7-8 9-10-11 12-13
Expand Down Expand Up @@ -263,6 +267,7 @@ fn changes_not_commited_if_fewer_than_min_saved_rows() {
#[test]
#[should_panic(expected = "Error connecting to the database:")]
fn run_panics_if_invalid_db_url() {
setup_logger();
// set up the config options
let db_url = "thisIsAnInvalidURL".to_string();
let room_id = "room1".to_string();
Expand Down Expand Up @@ -298,6 +303,7 @@ fn run_panics_if_invalid_db_url() {
#[test]
#[serial(db)]
fn run_only_affects_given_room_id() {
setup_logger();
// build room1 stuff up
// This starts with the following structure
//
Expand Down Expand Up @@ -374,6 +380,7 @@ fn run_only_affects_given_room_id() {
#[test]
#[serial(db)]
fn run_respects_groups_to_compress() {
setup_logger();
// This starts with the following structure
//
// 0-1-2 3-4-5 6-7-8 9-10-11 12-13
Expand Down Expand Up @@ -456,6 +463,7 @@ fn run_respects_groups_to_compress() {
#[test]
#[serial(db)]
fn run_is_idempotent_when_run_on_whole_room() {
setup_logger();
// This starts with the following structure
//
// 0-1-2 3-4-5 6-7-8 9-10-11 12-13
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ use compressor_integration_tests::{
add_contents_to_database, database_collapsed_states_match_map, database_structure_matches_map,
empty_database,
map_builder::{compressed_3_3_from_0_to_13_with_state, line_segments_with_state},
DB_URL,
setup_logger, DB_URL,
};
use serial_test::serial;
use synapse_compress_state::{continue_run, Level};
Expand All @@ -13,6 +13,7 @@ use synapse_compress_state::{continue_run, Level};
#[test]
#[serial(db)]
fn continue_run_called_twice_same_as_run() {
setup_logger();
// This starts with the following structure
//
// 0-1-2 3-4-5 6-7-8 9-10-11 12-13
Expand Down
11 changes: 5 additions & 6 deletions src/database.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
// limitations under the License.

use indicatif::{ProgressBar, ProgressStyle};
use log::{debug, trace};
use openssl::ssl::{SslConnector, SslMethod, SslVerifyMode};
use postgres::{fallible_iterator::FallibleIterator, types::ToSql, Client};
use postgres_openssl::MakeTlsConnector;
Expand Down Expand Up @@ -40,7 +41,6 @@ use super::StateGroupEntry;
/// * `max_state_group` - If specified, then only fetch the entries for state
/// groups lower than or equal to this number.
/// * 'groups_to_compress' - The number of groups to get from the database before stopping

pub fn get_data_from_db(
db_url: &str,
room_id: &str,
Expand All @@ -59,7 +59,6 @@ pub fn get_data_from_db(
// Search for the group id of the groups_to_compress'th group after min_state_group
// If this is saved, then the compressor can continue by having min_state_group being
// set to this maximum. If no such group can be found then return None.

let max_group_found = find_max_group(
&mut client,
room_id,
Expand Down Expand Up @@ -222,7 +221,7 @@ fn load_map_from_db(
max_group_found,
));

println!("Got initial state from database. Checking for any missing state groups...");
debug!("Got initial state from database. Checking for any missing state groups...");

// Due to reasons some of the state groups appear in the edges table, but
// not in the state_groups_state table.
Expand Down Expand Up @@ -251,14 +250,14 @@ fn load_map_from_db(
.collect();

if missing_sgs.is_empty() {
// println!("No missing state groups");
trace!("No missing state groups");
break;
}

missing_sgs.sort_unstable();
missing_sgs.dedup();

// println!("Missing {} state groups", missing_sgs.len());
trace!("Missing {} state groups", missing_sgs.len());

// find state groups not picked up already and add them to the map
let map = get_missing_from_db(client, &missing_sgs, min_state_group, max_group_found);
Expand Down Expand Up @@ -535,7 +534,7 @@ pub fn send_changes_to_db(

let mut client = Client::connect(db_url, connector).unwrap();

println!("Writing changes...");
debug!("Writing changes...");

// setup the progress bar
let pb: ProgressBar;
Expand Down
41 changes: 21 additions & 20 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
// of arguments - this hopefully doesn't make the code unclear
// #[allow(clippy::too_many_arguments)] is therefore used around some functions

use log::{info, warn};
use pyo3::{exceptions, prelude::*};

use clap::{crate_authors, crate_description, crate_name, crate_version, value_t, App, Arg};
Expand Down Expand Up @@ -290,7 +291,7 @@ impl Config {

pub fn run(mut config: Config) {
// First we need to get the current state groups
println!("Fetching state from DB for room '{}'...", config.room_id);
info!("Fetching state from DB for room '{}'...", config.room_id);

let (state_group_map, max_group_found) = database::get_data_from_db(
&config.db_url,
Expand All @@ -301,19 +302,19 @@ pub fn run(mut config: Config) {
)
.unwrap_or_else(|| panic!("No state groups found within this range"));

println!("Fetched state groups up to {}", max_group_found);
info!("Fetched state groups up to {}", max_group_found);

println!("Number of state groups: {}", state_group_map.len());
info!("Number of state groups: {}", state_group_map.len());

let original_summed_size = state_group_map
.iter()
.fold(0, |acc, (_, v)| acc + v.state_map.len());

println!("Number of rows in current table: {}", original_summed_size);
info!("Number of rows in current table: {}", original_summed_size);

// Now we actually call the compression algorithm.

println!("Compressing state...");
info!("Compressing state...");

let compressor = Compressor::compress(&state_group_map, &config.level_sizes.0);

Expand All @@ -327,22 +328,22 @@ pub fn run(mut config: Config) {

let ratio = (compressed_summed_size as f64) / (original_summed_size as f64);

println!(
info!(
"Number of rows after compression: {} ({:.2}%)",
compressed_summed_size,
ratio * 100.
);

println!("Compression Statistics:");
println!(
info!("Compression Statistics:");
info!(
" Number of forced resets due to lacking prev: {}",
compressor.stats.resets_no_suitable_prev
);
println!(
info!(
" Number of compressed rows caused by the above: {}",
compressor.stats.resets_no_suitable_prev_size
);
println!(
info!(
" Number of state groups changed: {}",
compressor.stats.state_groups_changed
);
Expand All @@ -352,14 +353,14 @@ pub fn run(mut config: Config) {
}

if ratio > 1.0 {
println!("This compression would not remove any rows. Exiting.");
warn!("This compression would not remove any rows. Exiting.");
return;
}

if let Some(min) = config.min_saved_rows {
let saving = (original_summed_size - compressed_summed_size) as i32;
if saving < min {
println!(
warn!(
"Only {} rows would be saved by this compression. Skipping output.",
saving
);
Expand Down Expand Up @@ -485,7 +486,7 @@ fn output_sql(
return;
}

println!("Writing changes...");
info!("Writing changes...");

let pb: ProgressBar;
if cfg!(feature = "no-progress-bars") {
Expand Down Expand Up @@ -559,7 +560,7 @@ pub fn continue_run(
let ratio = (new_num_rows as f64) / (original_num_rows as f64);

if ratio > 1.0 {
println!("This compression would not remove any rows. Aborting.");
warn!("This compression would not remove any rows. Aborting.");
return Some(ChunkStats {
new_level_info: compressor.get_level_info(),
last_compressed_group: max_group_found,
Expand Down Expand Up @@ -600,7 +601,7 @@ fn check_that_maps_match(
old_map: &BTreeMap<i64, StateGroupEntry>,
new_map: &BTreeMap<i64, StateGroupEntry>,
) {
println!("Checking that state maps match...");
info!("Checking that state maps match...");

let pb: ProgressBar;
if cfg!(feature = "no-progress-bars") {
Expand All @@ -625,10 +626,10 @@ fn check_that_maps_match(
pb.inc(1);

if expected != actual {
println!("State Group: {}", sg);
println!("Expected: {:#?}", expected);
println!("actual: {:#?}", actual);
Err(format!("States for group {} do not match", sg))
Err(format!(
"States for group {} do not match. Expected {:#?}, found {:#?}",
sg, expected, actual
))
} else {
Ok(())
}
Expand All @@ -637,7 +638,7 @@ fn check_that_maps_match(

pb.finish();

println!("New state map matches old one");
info!("New state map matches old one");
}

/// Gets the full state for a given group from the map (of deltas)
Expand Down
19 changes: 19 additions & 0 deletions src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,27 @@
#[global_allocator]
static GLOBAL: jemallocator::Jemalloc = jemallocator::Jemalloc;

use log::LevelFilter;
use std::env;
use std::io::Write;

use synapse_compress_state as comp_state;

fn main() {
// setup the logger
// The default can be overwritten with RUST_LOG
// see the README for more information <---- TODO
if env::var("RUST_LOG").is_err() {
let mut log_builder = env_logger::builder();
// Only output the log message (and not the prefixed timestamp etc.)
log_builder.format(|buf, record| writeln!(buf, "{}", record.args()));
// By default print all of the debugging messages from this library
log_builder.filter_module("synapse_compress_state", LevelFilter::Debug);
log_builder.init();
} else {
// If RUST_LOG was set then use that
env_logger::Builder::from_env("RUST_LOG").init();
}

comp_state::run(comp_state::Config::parse_arguments());
}