diff --git a/configure.py b/configure.py index 8b2dad52..80f453a6 100644 --- a/configure.py +++ b/configure.py @@ -123,10 +123,10 @@ def vcpkg_export(): exit(1) pkgs = vcpkg_requirements() - out = "vcpkg-export-%s" % odm_version().replace(".", "") + out = "vcpkg-export-%s" % get_version().replace(".", "") run("vcpkg\\vcpkg export %s --output=%s --zip" % (" ".join(pkgs), out)) -def odm_version(): +def get_version(): with open("VERSION") as f: return f.read().split("\n")[0].strip() diff --git a/opendm/ai.py b/opendm/ai.py index 3f29a79d..7ac09b11 100644 --- a/opendm/ai.py +++ b/opendm/ai.py @@ -41,7 +41,7 @@ def get_model(namespace, url, version, name = "model.onnx"): # Check if we need to download it model_file = os.path.join(versioned_dir, name) if not os.path.isfile(model_file): - log.ODM_INFO("Downloading AI model from %s ..." % url) + log.INFO("Downloading AI model from %s ..." % url) last_update = 0 @@ -51,23 +51,23 @@ def callback(progress): time_has_elapsed = time.time() - last_update >= 2 if time_has_elapsed or int(progress) == 100: - log.ODM_INFO("Downloading: %s%%" % int(progress)) + log.INFO("Downloading: %s%%" % int(progress)) last_update = time.time() try: downloaded_file = download(url, versioned_dir, progress_callback=callback) except Exception as e: - log.ODM_WARNING("Cannot download %s: %s" % (url, str(e))) + log.WARNING("Cannot download %s: %s" % (url, str(e))) return None if os.path.basename(downloaded_file).lower().endswith(".zip"): - log.ODM_INFO("Extracting %s ..." % downloaded_file) + log.INFO("Extracting %s ..." % downloaded_file) with zipfile.ZipFile(downloaded_file, 'r') as z: z.extractall(versioned_dir) os.remove(downloaded_file) if not os.path.isfile(model_file): - log.ODM_WARNING("Cannot find %s (is the URL to the AI model correct?)" % model_file) + log.WARNING("Cannot find %s (is the URL to the AI model correct?)" % model_file) return None else: return model_file diff --git a/opendm/align.py b/opendm/align.py index c3f8a520..349cb9a0 100644 --- a/opendm/align.py +++ b/opendm/align.py @@ -56,7 +56,7 @@ def compute_alignment_matrix(input_laz, align_file, stats_dir): # Check if we need to reproject align file input_crs = get_point_cloud_crs(input_laz) - log.ODM_INFO("Input CRS: %s" % input_crs) + log.INFO("Input CRS: %s" % input_crs) _, ext = os.path.splitext(align_file) repr_func = None @@ -68,16 +68,16 @@ def compute_alignment_matrix(input_laz, align_file, stats_dir): align_crs = get_point_cloud_crs(align_file) repr_func = reproject_point_cloud else: - log.ODM_WARNING("Unsupported alignment file: %s" % align_file) + log.WARNING("Unsupported alignment file: %s" % align_file) return to_delete = [] try: - log.ODM_INFO("Align CRS: %s" % align_crs) + log.INFO("Align CRS: %s" % align_crs) if input_crs != align_crs: # Reprojection needed - log.ODM_INFO("Reprojecting %s to %s" % (align_file, input_crs)) + log.INFO("Reprojecting %s to %s" % (align_file, input_crs)) align_file = repr_func(align_file, input_crs) to_delete.append(align_file) @@ -85,10 +85,10 @@ def compute_alignment_matrix(input_laz, align_file, stats_dir): fnd_obj, aoi_obj = codem.preprocess(conf) fnd_obj.prep() aoi_obj.prep() - log.ODM_INFO("Aligning reconstruction to %s" % align_file) - log.ODM_INFO("Coarse registration...") + log.INFO("Aligning reconstruction to %s" % align_file) + log.INFO("Coarse registration...") dsm_reg = codem.coarse_registration(fnd_obj, aoi_obj, conf) - log.ODM_INFO("Fine registration...") + log.INFO("Fine registration...") icp_reg = codem.fine_registration(fnd_obj, aoi_obj, dsm_reg, conf) app_reg = codem.registration.ApplyRegistration( diff --git a/opendm/arghelpers.py b/opendm/arghelpers.py index d3587601..ec210b46 100644 --- a/opendm/arghelpers.py +++ b/opendm/arghelpers.py @@ -35,7 +35,7 @@ def save_opts(opts_json, args): with open(opts_json, "w", encoding='utf-8') as f: f.write(json.dumps(args_to_dict(args))) except Exception as e: - log.ODM_WARNING("Cannot save options to %s: %s" % (opts_json, str(e))) + log.WARNING("Cannot save options to %s: %s" % (opts_json, str(e))) def compare_args(opts_json, args, rerun_stages): if not os.path.isfile(opts_json): diff --git a/opendm/bgfilter.py b/opendm/bgfilter.py index 5285535c..95f73228 100644 --- a/opendm/bgfilter.py +++ b/opendm/bgfilter.py @@ -19,12 +19,12 @@ class BgFilter(): def __init__(self, model): self.model = model - log.ODM_INFO(' ?> Using provider %s' % provider) + log.INFO(' ?> Using provider %s' % provider) self.load_model() def load_model(self): - log.ODM_INFO(' -> Loading the model') + log.INFO(' -> Loading the model') self.session = ort.InferenceSession(self.model, providers=[provider]) diff --git a/opendm/camera.py b/opendm/camera.py index ee78caff..9687546e 100644 --- a/opendm/camera.py +++ b/opendm/camera.py @@ -64,7 +64,7 @@ def get_opensfm_camera_models(cameras): for k in keys: if not k in valid_fields: camera.pop(k) - log.ODM_WARNING("Invalid camera key ignored: %s" % k) + log.WARNING("Invalid camera key ignored: %s" % k) result[osfm_camera_id] = camera return result diff --git a/opendm/cogeo.py b/opendm/cogeo.py index 98c74071..732226af 100644 --- a/opendm/cogeo.py +++ b/opendm/cogeo.py @@ -15,10 +15,10 @@ def convert_to_cogeo(src_path, blocksize=256, max_workers=1, compression="DEFLAT """ if not os.path.isfile(src_path): - log.ODM_WARNING("Cannot convert to cogeo: %s (file does not exist)" % src_path) + log.WARNING("Cannot convert to cogeo: %s (file does not exist)" % src_path) return False - log.ODM_INFO("Optimizing %s as Cloud Optimized GeoTIFF" % src_path) + log.INFO("Optimizing %s as Cloud Optimized GeoTIFF" % src_path) tmpfile = io.related_file_path(src_path, postfix='_cogeo') @@ -47,7 +47,7 @@ def convert_to_cogeo(src_path, blocksize=256, max_workers=1, compression="DEFLAT "--config GDAL_NUM_THREADS {threads} " "\"{src_path}\" \"{tmpfile}\" ".format(**kwargs)) except Exception as e: - log.ODM_WARNING("Cannot create Cloud Optimized GeoTIFF: %s" % str(e)) + log.WARNING("Cannot create Cloud Optimized GeoTIFF: %s" % str(e)) if os.path.isfile(tmpfile): shutil.move(src_path, swapfile) # Move to swap location @@ -55,7 +55,7 @@ def convert_to_cogeo(src_path, blocksize=256, max_workers=1, compression="DEFLAT try: shutil.move(tmpfile, src_path) except IOError as e: - log.ODM_WARNING("Cannot move %s to %s: %s" % (tmpfile, src_path, str(e))) + log.WARNING("Cannot move %s to %s: %s" % (tmpfile, src_path, str(e))) shutil.move(swapfile, src_path) # Attempt to restore if os.path.isfile(swapfile): diff --git a/opendm/concurrency.py b/opendm/concurrency.py index dfcf58f4..78af7e70 100644 --- a/opendm/concurrency.py +++ b/opendm/concurrency.py @@ -92,7 +92,7 @@ def stop_workers(): if error is not None and single_thread_fallback: # Try to reprocess using a single thread # in case this was a memory error - log.ODM_WARNING("Failed to run process in parallel, retrying with a single thread...") + log.WARNING("Failed to run process in parallel, retrying with a single thread...") use_single_thread = True else: use_single_thread = True diff --git a/opendm/config.py b/opendm/config.py index 647fe949..125c3271 100755 --- a/opendm/config.py +++ b/opendm/config.py @@ -175,7 +175,7 @@ def config(argv=None, parser=None): yaml_defaults = {k.replace('-', '_'): v for k, v in yaml_defaults.items()} parser.set_defaults(**yaml_defaults) except Exception as e: - log.ODM_WARNING(f"Could not load settings from {context.settings_path}: {e}") + log.WARNING(f"Could not load settings from {context.settings_path}: {e}") parser.add_argument('--project-path', metavar='', @@ -902,37 +902,37 @@ def config(argv=None, parser=None): for p in unknown: if p in DEPRECATED: - log.ODM_WARNING("%s is no longer a valid argument and will be ignored!" % p) + log.WARNING("%s is no longer a valid argument and will be ignored!" % p) # check that the project path setting has been set properly if not args.project_path: - log.ODM_ERROR('You need to set the project path in the ' + log.ERROR('You need to set the project path in the ' 'settings.yaml file before you can run ODM, ' 'or use `--project-path `. Run `python3 ' 'run.py --help` for more information. ') sys.exit(1) if args.fast_orthophoto: - log.ODM_INFO('Fast orthophoto is turned on, automatically setting --skip-3dmodel') + log.INFO('Fast orthophoto is turned on, automatically setting --skip-3dmodel') args.skip_3dmodel = True if args.dtm and not args.pc_classify: - log.ODM_INFO("DTM is turned on, automatically turning on point cloud classification") + log.INFO("DTM is turned on, automatically turning on point cloud classification") args.pc_classify = True if args.skip_3dmodel and args.use_3dmesh: - log.ODM_WARNING('--skip-3dmodel is set, but so is --use-3dmesh. --skip-3dmodel will be ignored.') + log.WARNING('--skip-3dmodel is set, but so is --use-3dmesh. --skip-3dmodel will be ignored.') args.skip_3dmodel = False if args.orthophoto_cutline and not args.crop: - log.ODM_WARNING("--orthophoto-cutline is set, but --crop is not. --crop will be set to 0.01") + log.WARNING("--orthophoto-cutline is set, but --crop is not. --crop will be set to 0.01") args.crop = 0.01 if args.sm_cluster: try: Node.from_url(args.sm_cluster).info() except exceptions.NodeConnectionError as e: - log.ODM_ERROR("Cluster node seems to be offline: %s" % str(e)) + log.ERROR("Cluster node seems to be offline: %s" % str(e)) sys.exit(1) return args diff --git a/opendm/cropper.py b/opendm/cropper.py index 7637bfb6..2ed4ee38 100644 --- a/opendm/cropper.py +++ b/opendm/cropper.py @@ -23,10 +23,10 @@ def path(self, suffix): @staticmethod def crop(gpkg_path, geotiff_path, gdal_options, keep_original=True, warp_options=[]): if not os.path.exists(gpkg_path) or not os.path.exists(geotiff_path): - log.ODM_WARNING("Either {} or {} does not exist, will skip cropping.".format(gpkg_path, geotiff_path)) + log.WARNING("Either {} or {} does not exist, will skip cropping.".format(gpkg_path, geotiff_path)) return geotiff_path - log.ODM_INFO("Cropping %s" % geotiff_path) + log.INFO("Cropping %s" % geotiff_path) # Rename original file # path/to/odm_orthophoto.tif --> path/to/odm_orthophoto.original.tif @@ -64,7 +64,7 @@ def crop(gpkg_path, geotiff_path, gdal_options, keep_original=True, warp_options os.remove(original_geotiff) except Exception as e: - log.ODM_WARNING('Something went wrong while cropping: {}'.format(e)) + log.WARNING('Something went wrong while cropping: {}'.format(e)) # Revert rename os.replace(original_geotiff, geotiff_path) @@ -131,7 +131,7 @@ def create_bounds_geojson(self, pointcloud_path, buffer_distance = 0, decimation @return filename to GeoJSON containing the polygon """ if not os.path.exists(pointcloud_path): - log.ODM_WARNING('Point cloud does not exist, cannot generate bounds {}'.format(pointcloud_path)) + log.WARNING('Point cloud does not exist, cannot generate bounds {}'.format(pointcloud_path)) return '' # Do decimation prior to extracting boundary information @@ -143,7 +143,7 @@ def create_bounds_geojson(self, pointcloud_path, buffer_distance = 0, decimation "--filters.decimation.step={} ".format(pointcloud_path, decimated_pointcloud_path, decimation_step)) if not os.path.exists(decimated_pointcloud_path): - log.ODM_WARNING('Could not decimate point cloud, thus cannot generate GPKG bounds {}'.format(decimated_pointcloud_path)) + log.WARNING('Could not decimate point cloud, thus cannot generate GPKG bounds {}'.format(decimated_pointcloud_path)) return '' # Use PDAL to dump boundary information @@ -188,7 +188,7 @@ def create_bounds_geojson(self, pointcloud_path, buffer_distance = 0, decimation if tmp.Area() > 0: convexhull = tmp else: - log.ODM_WARNING("Very small crop area detected, we will not smooth it.") + log.WARNING("Very small crop area detected, we will not smooth it.") # Save to a new file bounds_geojson_path = self.path('bounds.geojson') @@ -228,7 +228,7 @@ def create_bounds_gpkg(self, pointcloud_path, buffer_distance = 0, decimation_st @return filename to Geopackage containing the polygon """ if not os.path.exists(pointcloud_path): - log.ODM_WARNING('Point cloud does not exist, cannot generate GPKG bounds {}'.format(pointcloud_path)) + log.WARNING('Point cloud does not exist, cannot generate GPKG bounds {}'.format(pointcloud_path)) return '' diff --git a/opendm/cutline.py b/opendm/cutline.py index a3a2f02a..f23a02a6 100644 --- a/opendm/cutline.py +++ b/opendm/cutline.py @@ -34,12 +34,12 @@ def write_raster(data, file): def compute_cutline(orthophoto_file, crop_area_file, destination, max_concurrency=1, scale=1): if io.file_exists(orthophoto_file) and io.file_exists(crop_area_file): - log.ODM_INFO("Computing cutline") + log.INFO("Computing cutline") scale = max(0.0001, min(1, scale)) scaled_orthophoto = None if scale < 1: - log.ODM_INFO("Scaling orthophoto to %s%% to compute cutline" % (scale * 100)) + log.INFO("Scaling orthophoto to %s%% to compute cutline" % (scale * 100)) scaled_orthophoto = io.related_file_path(orthophoto_file, postfix=".scaled") # Scale orthophoto before computing cutline @@ -65,12 +65,12 @@ def compute_cutline(orthophoto_file, crop_area_file, destination, max_concurrenc line_ver_offset = int(height / number_lines) if line_hor_offset <= 2 or line_ver_offset <= 2: - log.ODM_WARNING("Cannot compute cutline, orthophoto is too small (%sx%spx)" % (width, height)) + log.WARNING("Cannot compute cutline, orthophoto is too small (%sx%spx)" % (width, height)) return crop_f = fiona.open(crop_area_file, 'r') if len(crop_f) == 0: - log.ODM_WARNING("Crop area is empty, cannot compute cutline") + log.WARNING("Crop area is empty, cannot compute cutline") return crop_poly = shape(crop_f[1]['geometry']) @@ -82,7 +82,7 @@ def compute_cutline(orthophoto_file, crop_area_file, destination, max_concurrenc edges = canny(rast) def compute_linestrings(direction): - log.ODM_INFO("Computing %s cutlines" % direction) + log.INFO("Computing %s cutlines" % direction) # Initialize cost map cost_map = np.full((height, width), 1, dtype=np.float32) @@ -129,7 +129,7 @@ def compute_linestrings(direction): # Generate polygons and keep only those inside the crop area - log.ODM_INFO("Generating polygons... this could take a bit.") + log.INFO("Generating polygons... this could take a bit.") polygons = [] for p in polygonize(unary_union(linestrings)): if crop_poly.contains(p): @@ -137,10 +137,10 @@ def compute_linestrings(direction): # This should never happen if len(polygons) == 0: - log.ODM_WARNING("No polygons, cannot compute cutline") + log.WARNING("No polygons, cannot compute cutline") return - log.ODM_INFO("Merging polygons") + log.INFO("Merging polygons") cutline_polygons = unary_union(polygons) if not hasattr(cutline_polygons, 'geoms'): @@ -154,7 +154,7 @@ def compute_linestrings(direction): max_area = p.area largest_cutline = p - log.ODM_INFO("Largest cutline found: %s m^2" % max_area) + log.INFO("Largest cutline found: %s m^2" % max_area) meta = { 'crs': fiona.crs.CRS.from_wkt(f.crs.to_wkt()), @@ -175,10 +175,10 @@ def compute_linestrings(direction): 'properties': {} }) f.close() - log.ODM_INFO("Wrote %s" % destination) + log.INFO("Wrote %s" % destination) # Cleanup if scaled_orthophoto is not None and os.path.exists(scaled_orthophoto): os.remove(scaled_orthophoto) else: - log.ODM_WARNING("We've been asked to compute cutline, but either %s or %s is missing. Skipping..." % (orthophoto_file, crop_area_file)) + log.WARNING("We've been asked to compute cutline, but either %s or %s is missing. Skipping..." % (orthophoto_file, crop_area_file)) diff --git a/opendm/dem/commands.py b/opendm/dem/commands.py index 2b7a7606..45b0251e 100755 --- a/opendm/dem/commands.py +++ b/opendm/dem/commands.py @@ -39,9 +39,9 @@ def classify(lasFile, scalar, slope, threshold, window): try: pdal.run_pdaltranslate_smrf(lasFile, lasFile, scalar, slope, threshold, window) except: - log.ODM_WARNING("Error creating classified file %s" % lasFile) + log.WARNING("Error creating classified file %s" % lasFile) - log.ODM_INFO('Created %s in %s' % (lasFile, datetime.now() - start)) + log.INFO('Created %s in %s' % (lasFile, datetime.now() - start)) return lasFile error = None @@ -89,7 +89,7 @@ def create_dem(input_point_cloud, dem_type, output_type='max', radiuses=['0.56'] if len(tiles) == 0: raise system.ExitException("No DEM tiles were generated, something went wrong") - log.ODM_INFO("Generated %s tiles" % len(tiles)) + log.INFO("Generated %s tiles" % len(tiles)) # Sort tiles by decreasing radius tiles.sort(key=lambda t: float(t['radius']), reverse=True) @@ -177,12 +177,12 @@ def create_dem(input_point_cloud, dem_type, output_type='max', radiuses=['0.56'] for t in tiles: if os.path.exists(t['filename']): os.remove(t['filename']) - log.ODM_INFO('Completed %s in %s' % (output_file, datetime.now() - start)) + log.INFO('Completed %s in %s' % (output_file, datetime.now() - start)) def compute_euclidean_map(geotiff_path, output_path, overwrite=False): if not os.path.exists(geotiff_path): - log.ODM_WARNING("Cannot compute euclidean map (file does not exist: %s)" % geotiff_path) + log.WARNING("Cannot compute euclidean map (file does not exist: %s)" % geotiff_path) return nodata = -9999 @@ -193,7 +193,7 @@ def compute_euclidean_map(geotiff_path, output_path, overwrite=False): if os.path.isfile(output_path): os.remove(output_path) - log.ODM_INFO("Computing euclidean distance: %s" % output_path) + log.INFO("Computing euclidean distance: %s" % output_path) if gdal_proximity is not None: try: @@ -204,17 +204,17 @@ def compute_euclidean_map(geotiff_path, output_path, overwrite=False): '-co', 'COMPRESS=DEFLATE', ]) except Exception as e: - log.ODM_WARNING("Cannot compute euclidean distance: %s" % str(e)) + log.WARNING("Cannot compute euclidean distance: %s" % str(e)) if os.path.exists(output_path): return output_path else: - log.ODM_WARNING("Cannot compute euclidean distance file: %s" % output_path) + log.WARNING("Cannot compute euclidean distance file: %s" % output_path) else: - log.ODM_WARNING("Cannot compute euclidean map, gdal_proximity is missing") + log.WARNING("Cannot compute euclidean map, gdal_proximity is missing") else: - log.ODM_INFO("Found a euclidean distance map: %s" % output_path) + log.INFO("Found a euclidean distance map: %s" % output_path) return output_path @@ -239,7 +239,7 @@ def median_smoothing(geotiff_path, output_path, window_size=512, num_workers=1, '--co BIGTIFF=IF_SAFER ' '--co COMPRESS=DEFLATE '.format(**kwargs), env_vars={'OMP_NUM_THREADS': num_workers}) - log.ODM_INFO('Completed smoothing to create %s in %s' % (output_path, datetime.now() - start)) + log.INFO('Completed smoothing to create %s in %s' % (output_path, datetime.now() - start)) return output_path diff --git a/opendm/dem/merge.py b/opendm/dem/merge.py index fdeef93a..af0e32c9 100644 --- a/opendm/dem/merge.py +++ b/opendm/dem/merge.py @@ -26,12 +26,12 @@ def euclidean_merge_dems(input_dems, output_dem, creation_options={}, euclidean_ existing_dems = [] for dem in input_dems: if not io.file_exists(dem): - log.ODM_WARNING("%s does not exist. Will skip from merged DEM." % dem) + log.WARNING("%s does not exist. Will skip from merged DEM." % dem) continue existing_dems.append(dem) if len(existing_dems) == 0: - log.ODM_WARNING("No input DEMs, skipping euclidean merge.") + log.WARNING("No input DEMs, skipping euclidean merge.") return with rasterio.open(existing_dems[0]) as first: @@ -45,7 +45,7 @@ def euclidean_merge_dems(input_dems, output_dem, creation_options={}, euclidean_ if eumap and io.file_exists(eumap): inputs.append((dem, eumap)) - log.ODM_INFO("%s valid DEM rasters to merge" % len(inputs)) + log.INFO("%s valid DEM rasters to merge" % len(inputs)) sources = [(rasterio.open(d), rasterio.open(e)) for d,e in inputs] @@ -64,7 +64,7 @@ def euclidean_merge_dems(input_dems, output_dem, creation_options={}, euclidean_ if src_d.profile["count"] != 1 or src_e.profile["count"] != 1: raise ValueError("Inputs must be 1-band rasters") dst_w, dst_s, dst_e, dst_n = min(xs), min(ys), max(xs), max(ys) - log.ODM_INFO("Output bounds: %r %r %r %r" % (dst_w, dst_s, dst_e, dst_n)) + log.INFO("Output bounds: %r %r %r %r" % (dst_w, dst_s, dst_e, dst_n)) output_transform = Affine.translation(dst_w, dst_n) output_transform *= Affine.scale(res[0], -res[1]) @@ -76,8 +76,8 @@ def euclidean_merge_dems(input_dems, output_dem, creation_options={}, euclidean_ # Adjust bounds to fit. dst_e, dst_s = output_transform * (output_width, output_height) - log.ODM_INFO("Output width: %d, height: %d" % (output_width, output_height)) - log.ODM_INFO("Adjusted bounds: %r %r %r %r" % (dst_w, dst_s, dst_e, dst_n)) + log.INFO("Output width: %d, height: %d" % (output_width, output_height)) + log.INFO("Adjusted bounds: %r %r %r %r" % (dst_w, dst_s, dst_e, dst_n)) profile["transform"] = output_transform profile["height"] = output_height diff --git a/opendm/dem/pdal.py b/opendm/dem/pdal.py index 35a33de4..9bfa4452 100644 --- a/opendm/dem/pdal.py +++ b/opendm/dem/pdal.py @@ -173,7 +173,7 @@ def run_pdaltranslate_smrf(fin, fout, scalar, slope, threshold, window): def merge_point_clouds(input_files, output_file): if len(input_files) == 0: - log.ODM_WARNING("Cannot merge point clouds, no point clouds to merge.") + log.WARNING("Cannot merge point clouds, no point clouds to merge.") return cmd = [ diff --git a/opendm/entwine.py b/opendm/entwine.py index fbfddbf9..900268e7 100644 --- a/opendm/entwine.py +++ b/opendm/entwine.py @@ -11,18 +11,18 @@ def build(input_point_cloud_files, output_path, max_concurrency=8, rerun=False): num_files = len(input_point_cloud_files) if num_files == 0: - log.ODM_WARNING("No input point cloud files to process") + log.WARNING("No input point cloud files to process") return tmpdir = io.related_file_path(output_path, postfix="-tmp") def dir_cleanup(): if io.dir_exists(output_path): - log.ODM_WARNING("Removing previous EPT directory: %s" % output_path) + log.WARNING("Removing previous EPT directory: %s" % output_path) shutil.rmtree(output_path) if io.dir_exists(tmpdir): - log.ODM_WARNING("Removing previous EPT temp directory: %s" % tmpdir) + log.WARNING("Removing previous EPT temp directory: %s" % tmpdir) shutil.rmtree(tmpdir) if rerun: @@ -32,7 +32,7 @@ def dir_cleanup(): try: build_entwine(input_point_cloud_files, tmpdir, output_path, max_concurrency=max_concurrency) except Exception as e: - log.ODM_WARNING("Cannot build EPT using entwine (%s), attempting with untwine..." % str(e)) + log.WARNING("Cannot build EPT using entwine (%s), attempting with untwine..." % str(e)) dir_cleanup() build_untwine(input_point_cloud_files, tmpdir, output_path, max_concurrency=max_concurrency) @@ -64,21 +64,21 @@ def build_untwine(input_point_cloud_files, tmpdir, output_path, max_concurrency= def build_copc(input_point_cloud_files, output_file, convert_rgb_8_to_16=False): if len(input_point_cloud_files) == 0: - logger.ODM_WARNING("Cannot build COPC, no input files") + log.WARNING("Cannot build COPC, no input files") return base_path, ext = os.path.splitext(output_file) tmpdir = io.related_file_path(base_path, postfix="-tmp") if os.path.exists(tmpdir): - log.ODM_WARNING("Removing previous directory %s" % tmpdir) + log.WARNING("Removing previous directory %s" % tmpdir) shutil.rmtree(tmpdir) cleanup = [tmpdir] if convert_rgb_8_to_16: tmpdir16 = io.related_file_path(base_path, postfix="-tmp16") if os.path.exists(tmpdir16): - log.ODM_WARNING("Removing previous directory %s" % tmpdir16) + log.WARNING("Removing previous directory %s" % tmpdir16) shutil.rmtree(tmpdir16) os.makedirs(tmpdir16, exist_ok=True) cleanup.append(tmpdir16) @@ -98,7 +98,7 @@ def build_copc(input_point_cloud_files, output_file, convert_rgb_8_to_16=False): converted.append(out_16) except Exception as e: - log.ODM_WARNING("Cannot convert point cloud to 16bit RGB, COPC is not going to follow the official spec: %s" % str(e)) + log.WARNING("Cannot convert point cloud to 16bit RGB, COPC is not going to follow the official spec: %s" % str(e)) ok = False break if ok: diff --git a/opendm/exiftool.py b/opendm/exiftool.py index 7dfe087a..0516bedf 100644 --- a/opendm/exiftool.py +++ b/opendm/exiftool.py @@ -42,13 +42,13 @@ def extract_raw_thermal_image_data(image_path): raise Exception("Invalid JSON (not a list)") except Exception as e: - log.ODM_WARNING("Cannot extract tags using exiftool: %s" % str(e)) + log.WARNING("Cannot extract tags using exiftool: %s" % str(e)) return {}, None finally: if os.path.isfile(tmp_file_path): os.remove(tmp_file_path) except Exception as e: - log.ODM_WARNING("Cannot create temporary file: %s" % str(e)) + log.WARNING("Cannot create temporary file: %s" % str(e)) return {}, None def unit(unit): @@ -58,7 +58,7 @@ def _convert(v): elif isinstance(v, str): if not v[-1].isnumeric(): if v[-1].upper() != unit.upper(): - log.ODM_WARNING("Assuming %s is in %s" % (v, unit)) + log.WARNING("Assuming %s is in %s" % (v, unit)) return float(v[:-1]) else: return float(v) diff --git a/opendm/gcp.py b/opendm/gcp.py index 4c6273cc..2934a168 100644 --- a/opendm/gcp.py +++ b/opendm/gcp.py @@ -38,7 +38,7 @@ def read(self): if len(parts) >= 6: self.entries.append(line) else: - log.ODM_WARNING("Malformed GCP line: %s" % line) + log.WARNING("Malformed GCP line: %s" % line) def iter_entries(self): for entry in self.entries: @@ -60,18 +60,18 @@ def check_entries(self): if coords[k] < 3: description = "insufficient" if coords[k] < 2 else "not ideal" for entry in gcps[k]: - log.ODM_WARNING(str(entry)) - log.ODM_WARNING("The number of images where the GCP %s has been tagged are %s" % (k, description)) - log.ODM_WARNING("You should tag at least %s more images" % (3 - coords[k])) - log.ODM_WARNING("=====================================") + log.WARNING(str(entry)) + log.WARNING("The number of images where the GCP %s has been tagged are %s" % (k, description)) + log.WARNING("You should tag at least %s more images" % (3 - coords[k])) + log.WARNING("=====================================") errors += 1 if len(coords) < 3: - log.ODM_WARNING("Low number of GCPs detected (%s). For best results use at least 5." % (3 - len(coords))) - log.ODM_WARNING("=====================================") + log.WARNING("Low number of GCPs detected (%s). For best results use at least 5." % (3 - len(coords))) + log.WARNING("=====================================") errors += 1 if errors > 0: - log.ODM_WARNING("Some issues detected with GCPs (but we're going to process this anyway)") + log.WARNING("Some issues detected with GCPs (but we're going to process this anyway)") def parse_entry(self, entry): if entry: diff --git a/opendm/geo.py b/opendm/geo.py index 7baa8c91..897cd8ee 100644 --- a/opendm/geo.py +++ b/opendm/geo.py @@ -56,7 +56,7 @@ def __init__(self, geo_path): horizontal_accuracy, vertical_accuracy, extras) else: - log.ODM_WARNING("Malformed geo line: %s" % line) + log.WARNING("Malformed geo line: %s" % line) def get_entry(self, filename): return self.entries.get(filename) diff --git a/opendm/get_image_size.py b/opendm/get_image_size.py index 679351d1..a34b74dd 100644 --- a/opendm/get_image_size.py +++ b/opendm/get_image_size.py @@ -21,7 +21,7 @@ def get_image_size(file_path, fallback_on_error=True): width, height = img.size except Exception as e: if fallback_on_error: - log.ODM_WARNING("Cannot read %s with image library, fallback to cv2: %s" % (file_path, str(e))) + log.WARNING("Cannot read %s with image library, fallback to cv2: %s" % (file_path, str(e))) img = cv2.imread(file_path) width = img.shape[1] height = img.shape[0] diff --git a/opendm/gltf.py b/opendm/gltf.py index dfbd4f11..bf509158 100644 --- a/opendm/gltf.py +++ b/opendm/gltf.py @@ -117,7 +117,7 @@ def convert_materials_to_jpeg(materials): with MemoryFile() as memfile: bands, h, w = image.shape bands = min(3, bands) - with memfile.open(driver='JPEG', jpeg_quality=90, count=bands, width=w, height=h, dtype=rasterio.dtypes.uint8) as dst: + with memfile.open(driver='JPEG', quality=90, count=bands, width=w, height=h, dtype=rasterio.dtypes.uint8) as dst: for b in range(1, min(3, bands) + 1): dst.write(image[b - 1], b) memfile.seek(0) @@ -305,6 +305,6 @@ def addBufferView(buf, target=None): os.remove(output_glb) os.rename(compressed_glb, output_glb) except Exception as e: - log.ODM_WARNING("Cannot compress GLB with draco: %s" % str(e)) + log.WARNING("Cannot compress GLB with draco: %s" % str(e)) diff --git a/opendm/gpu.py b/opendm/gpu.py index f701c3a9..956a54c3 100644 --- a/opendm/gpu.py +++ b/opendm/gpu.py @@ -16,10 +16,10 @@ def has_popsift_and_can_handle_texsize(width, height): compute_major, compute_minor = get_cuda_compute_version(0) if compute_major < 3 or (compute_major == 3 and compute_minor < 5): # Not supported - log.ODM_INFO("CUDA compute platform is not supported (detected: %s.%s but we need at least 3.5)" % (compute_major, compute_minor)) + log.INFO("CUDA compute platform is not supported (detected: %s.%s but we need at least 3.5)" % (compute_major, compute_minor)) return False except Exception as e: - log.ODM_WARNING(str(e)) + log.WARNING(str(e)) return False @lru_cache(maxsize=None) @@ -60,24 +60,24 @@ def get_cuda_compute_version(device_id = 0): def has_gpu(args): if gpu_disabled_by_user_env(): - log.ODM_INFO("Disabling GPU features (ODM_NO_GPU is set)") + log.INFO("Disabling GPU features (ODM_NO_GPU is set)") return False if args.no_gpu: - log.ODM_INFO("Disabling GPU features (--no-gpu is set)") + log.INFO("Disabling GPU features (--no-gpu is set)") return False if sys.platform == 'win32': nvcuda_path = os.path.join(os.environ.get('SYSTEMROOT'), 'system32', 'nvcuda.dll') if os.path.isfile(nvcuda_path): - log.ODM_INFO("CUDA drivers detected") + log.INFO("CUDA drivers detected") return True else: - log.ODM_INFO("No CUDA drivers detected") + log.INFO("No CUDA drivers detected") return False else: if shutil.which('nvidia-smi') is not None: - log.ODM_INFO("nvidia-smi detected") + log.INFO("nvidia-smi detected") return True else: - log.ODM_INFO("No nvidia-smi detected") + log.INFO("No nvidia-smi detected") return False diff --git a/opendm/gsd.py b/opendm/gsd.py index cb221329..3a6ef65a 100644 --- a/opendm/gsd.py +++ b/opendm/gsd.py @@ -87,14 +87,14 @@ def cap_resolution(resolution, reconstruction_json, gsd_error_estimate = 0.1, gs if gsd is not None: gsd = gsd * (1 - gsd_error_estimate) * gsd_scaling if gsd > resolution or ignore_resolution: - log.ODM_WARNING('Maximum resolution set to {} * (GSD - {}%) ' + log.WARNING('Maximum resolution set to {} * (GSD - {}%) ' '({:.2f} cm / pixel, requested resolution was {:.2f} cm / pixel)' .format(gsd_scaling, gsd_error_estimate * 100, gsd, resolution)) return gsd else: return resolution else: - log.ODM_WARNING('Cannot calculate GSD, using requested resolution of {:.2f}'.format(resolution)) + log.WARNING('Cannot calculate GSD, using requested resolution of {:.2f}'.format(resolution)) return resolution @@ -127,7 +127,7 @@ def opensfm_reconstruction_average_gsd(reconstruction_json, use_all_shots=False) shot_height = shot_origin[2] focal_ratio = camera.get('focal', camera.get('focal_x')) if not focal_ratio: - log.ODM_WARNING("Cannot parse focal values from %s. This is likely an unsupported camera model." % reconstruction_json) + log.WARNING("Cannot parse focal values from %s. This is likely an unsupported camera model." % reconstruction_json) return None shot_origin[2] = 0 @@ -144,7 +144,7 @@ def opensfm_reconstruction_average_gsd(reconstruction_json, use_all_shots=False) if len(gsds) > 0: mean = np.mean(gsds) if mean < 0: - log.ODM_WARNING("Negative GSD estimated, this might indicate a flipped Z-axis.") + log.WARNING("Negative GSD estimated, this might indicate a flipped Z-axis.") return abs(mean) return None diff --git a/opendm/location.py b/opendm/location.py index 9f4cf2e3..71701c51 100644 --- a/opendm/location.py +++ b/opendm/location.py @@ -21,7 +21,7 @@ def extract_utm_coords(photos, images_path, output_coords_file): reference_photo = None for photo in photos: if photo.latitude is None or photo.longitude is None: - log.ODM_WARNING("GPS position not available for %s" % photo.filename) + log.WARNING("GPS position not available for %s" % photo.filename) continue if utm_zone is None: @@ -145,7 +145,7 @@ def parse_srs_header(header): else: raise RuntimeError('Could not parse coordinates. Bad SRS supplied: %s' % header) except RuntimeError as e: - log.ODM_ERROR('Uh oh! There seems to be a problem with your coordinates/GCP file.\n\n' + log.ERROR('Uh oh! There seems to be a problem with your coordinates/GCP file.\n\n' 'The line: %s\n\n' 'Is not valid. Projections that are valid include:\n' ' - EPSG:*****\n' diff --git a/opendm/log.py b/opendm/log.py index 34ea6939..f9aaec0a 100644 --- a/opendm/log.py +++ b/opendm/log.py @@ -7,7 +7,7 @@ import multiprocessing from functools import lru_cache -from opendm.arghelpers import args_to_dict +from opendm.arghelpers import double_quote, args_to_dict from vmem import virtual_memory if sys.platform == 'win32' or os.getenv('no_ansiesc'): @@ -16,7 +16,7 @@ OKBLUE = '' OKGREEN = '' DEFAULT = '' - WARNING = '' + WARN = '' FAIL = '' ENDC = '' else: @@ -24,14 +24,14 @@ OKBLUE = '\033[94m' OKGREEN = '\033[92m' DEFAULT = '\033[39m' - WARNING = '\033[93m' + WARN = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' lock = threading.Lock() @lru_cache(maxsize=None) -def odm_version(): +def get_version(): with open(os.path.join(os.path.dirname(__file__), "..", "VERSION")) as f: return f.read().split("\n")[0].strip() @@ -39,10 +39,10 @@ def memory(): mem = virtual_memory() return { 'total': round(mem.total / 1024 / 1024), - 'available': round(mem.available / 1024 / 1024), + 'available': round(mem.available / 1024 / 1024) } -class ODMLogger: +class Logger: def __init__(self): self.json = None self.json_output_file = None @@ -63,7 +63,9 @@ def init_json_output(self, output_files, args): self.json_output_files = output_files self.json_output_file = output_files[0] self.json = {} - self.json['odmVersion'] = odm_version() + self.json['odmVersion'] = get_version() # deprecated + self.json['engine'] = 'ODX' + self.json['version'] = get_version() self.json['memory'] = memory() self.json['cpus'] = multiprocessing.cpu_count() self.json['images'] = -1 @@ -119,7 +121,6 @@ def _log_json_end_time(self): if self.json['stages']: last_stage = self.json['stages'][-1] last_stage['endTime'] = end_time.isoformat() - # NOTE use Z replacement for Python < 3.11. Python 3.11+ dosesn't need this start_time = datetime.datetime.fromisoformat(last_stage['startTime'].replace("Z", "+00:00")) last_stage['totalTime'] = round((end_time - start_time).total_seconds(), 2) @@ -127,7 +128,7 @@ def info(self, msg): self.log(DEFAULT, msg, "INFO") def warning(self, msg): - self.log(WARNING, msg, "WARNING") + self.log(WARN, msg, "WARNING") def error(self, msg): self.log(FAIL, msg, "ERROR") @@ -145,9 +146,9 @@ def close(self): except Exception as e: print("Cannot write log.json: %s" % str(e)) -logger = ODMLogger() +logger = Logger() -ODM_INFO = logger.info -ODM_WARNING = logger.warning -ODM_ERROR = logger.error -ODM_EXCEPTION = logger.exception +INFO = logger.info +WARNING = logger.warning +ERROR = logger.error +EXCEPTION = logger.exception diff --git a/opendm/mesh.py b/opendm/mesh.py index 26bc847f..3283a460 100644 --- a/opendm/mesh.py +++ b/opendm/mesh.py @@ -18,9 +18,9 @@ def create_25dmesh(inPointCloud, outMesh, radius_steps=["0.05"], dsm_resolution= if os.path.exists(tmp_directory): shutil.rmtree(tmp_directory) os.mkdir(tmp_directory) - log.ODM_INFO('Created temporary directory: %s' % tmp_directory) + log.INFO('Created temporary directory: %s' % tmp_directory) - log.ODM_INFO('Creating DSM for 2.5D mesh') + log.INFO('Creating DSM for 2.5D mesh') commands.create_dem( inPointCloud, @@ -54,7 +54,7 @@ def create_25dmesh(inPointCloud, outMesh, radius_steps=["0.05"], dsm_resolution= def dem_to_points(inGeotiff, outPointCloud): - log.ODM_INFO('Sampling points from DSM: %s' % inGeotiff) + log.INFO('Sampling points from DSM: %s' % inGeotiff) kwargs = { 'bin': context.dem2points_path, @@ -73,7 +73,7 @@ def dem_to_points(inGeotiff, outPointCloud): def dem_to_mesh_gridded(inGeotiff, outMesh, maxVertexCount, maxConcurrency=1): - log.ODM_INFO('Creating mesh from DSM: %s' % inGeotiff) + log.INFO('Creating mesh from DSM: %s' % inGeotiff) mesh_path, mesh_filename = os.path.split(outMesh) # mesh_path = path/to @@ -107,7 +107,7 @@ def dem_to_mesh_gridded(inGeotiff, outMesh, maxVertexCount, maxConcurrency=1): except Exception as e: maxConcurrency = math.floor(maxConcurrency / 2) if maxConcurrency >= 1: - log.ODM_WARNING("dem2mesh failed, retrying with lower concurrency (%s) in case this is a memory issue" % maxConcurrency) + log.WARNING("dem2mesh failed, retrying with lower concurrency (%s) in case this is a memory issue" % maxConcurrency) else: raise e @@ -149,7 +149,7 @@ def screened_poisson_reconstruction(inPointCloud, outMesh, depth = 8, samples = # Since PoissonRecon has some kind of a race condition on ppc64el, and this helps... if platform.machine() == 'ppc64le': - log.ODM_WARNING("ppc64le platform detected, forcing single-threaded operation for PoissonRecon") + log.WARNING("ppc64le platform detected, forcing single-threaded operation for PoissonRecon") threads = 1 while True: @@ -174,7 +174,7 @@ def screened_poisson_reconstruction(inPointCloud, outMesh, depth = 8, samples = '{parallel} ' '--confidence'.format(**poissonReconArgs), env_vars={'OMP_NUM_THREADS': int(threads)}) except Exception as e: - log.ODM_WARNING(str(e)) + log.WARNING(str(e)) if os.path.isfile(outMeshDirty): break # Done! @@ -187,7 +187,7 @@ def screened_poisson_reconstruction(inPointCloud, outMesh, depth = 8, samples = if threads < 1: break else: - log.ODM_WARNING("PoissonRecon failed with %s threads, let's retry with %s..." % (threads * 2, threads)) + log.WARNING("PoissonRecon failed with %s threads, let's retry with %s..." % (threads * 2, threads)) # Cleanup and reduce vertex count if necessary diff --git a/opendm/multispectral.py b/opendm/multispectral.py index 97d3c92f..42542906 100644 --- a/opendm/multispectral.py +++ b/opendm/multispectral.py @@ -50,7 +50,7 @@ def dn_to_radiance(photo, image): if bit_depth_max: image /= bit_depth_max else: - log.ODM_WARNING("Cannot normalize DN for %s, bit depth is missing" % photo.filename) + log.WARNING("Cannot normalize DN for %s, bit depth is missing" % photo.filename) if V is not None: # vignette correction @@ -162,7 +162,7 @@ def compute_irradiance(photo, use_sun_sensor=True): horizontal_irradiance = direct_irradiance * np.sin(solar_elevation) + scattered_irradiance return horizontal_irradiance elif use_sun_sensor: - log.ODM_WARNING("No sun sensor values found for %s" % photo.filename) + log.WARNING("No sun sensor values found for %s" % photo.filename) return 1.0 @@ -193,7 +193,7 @@ def get_primary_band_name(multi_camera, user_band_name): band_name_fallback = multi_camera[0]['name'] - log.ODM_WARNING("Cannot find band name \"%s\", will use \"%s\" instead" % (user_band_name, band_name_fallback)) + log.WARNING("Cannot find band name \"%s\", will use \"%s\" instead" % (user_band_name, band_name_fallback)) return band_name_fallback @@ -248,7 +248,7 @@ def compute_band_maps(multi_camera, primary_band): return s2p, p2s except Exception as e: # Fallback on filename conventions - log.ODM_WARNING("%s, will use filenames instead" % str(e)) + log.WARNING("%s, will use filenames instead" % str(e)) filename_map = {} s2p = {} @@ -285,7 +285,7 @@ def compute_band_maps(multi_camera, primary_band): return s2p, p2s def compute_alignment_matrices(multi_camera, primary_band_name, images_path, s2p, p2s, max_concurrency=1, max_samples=30): - log.ODM_INFO("Computing band alignment") + log.INFO("Computing band alignment") alignment_info = {} @@ -297,21 +297,21 @@ def compute_alignment_matrices(multi_camera, primary_band_name, images_path, s2p def parallel_compute_homography(p): try: if len(matrices) >= max_samples: - # log.ODM_INFO("Got enough samples for %s (%s)" % (band['name'], max_samples)) + # log.INFO("Got enough samples for %s (%s)" % (band['name'], max_samples)) return # Find good matrix candidates for alignment primary_band_photo = s2p.get(p['filename']) if primary_band_photo is None: - log.ODM_WARNING("Cannot find primary band photo for %s" % p['filename']) + log.WARNING("Cannot find primary band photo for %s" % p['filename']) return warp_matrix, dimension, algo = compute_homography(os.path.join(images_path, p['filename']), os.path.join(images_path, primary_band_photo.filename)) if warp_matrix is not None: - log.ODM_INFO("%s --> %s good match" % (p['filename'], primary_band_photo.filename)) + log.INFO("%s --> %s good match" % (p['filename'], primary_band_photo.filename)) matrices.append({ 'warp_matrix': warp_matrix, @@ -320,9 +320,9 @@ def parallel_compute_homography(p): 'algo': algo }) else: - log.ODM_INFO("%s --> %s cannot be matched" % (p['filename'], primary_band_photo.filename)) + log.INFO("%s --> %s cannot be matched" % (p['filename'], primary_band_photo.filename)) except Exception as e: - log.ODM_WARNING("Failed to compute homography for %s: %s" % (p['filename'], str(e))) + log.WARNING("Failed to compute homography for %s: %s" % (p['filename'], str(e))) parallel_map(parallel_compute_homography, [{'filename': p.filename} for p in band['photos']], max_concurrency, single_thread_fallback=False) @@ -342,9 +342,9 @@ def parallel_compute_homography(p): if len(matrices) > 0: alignment_info[band['name']] = matrices[0] - log.ODM_INFO("%s band will be aligned using warp matrix %s (score: %s)" % (band['name'], matrices[0]['warp_matrix'], matrices[0]['score'])) + log.INFO("%s band will be aligned using warp matrix %s (score: %s)" % (band['name'], matrices[0]['warp_matrix'], matrices[0]['score'])) else: - log.ODM_WARNING("Cannot find alignment matrix for band %s, The band might end up misaligned!" % band['name']) + log.WARNING("Cannot find alignment matrix for band %s, The band might end up misaligned!" % band['name']) return alignment_info @@ -359,7 +359,7 @@ def compute_homography(image_filename, align_image_filename): max_dim = max(image_gray.shape) if max_dim <= 320: - log.ODM_WARNING("Small image for band alignment (%sx%s), this might be tough to compute." % (image_gray.shape[1], image_gray.shape[0])) + log.WARNING("Small image for band alignment (%sx%s), this might be tough to compute." % (image_gray.shape[1], image_gray.shape[0])) align_image = imread(align_image_filename, unchanged=True, anydepth=True) if align_image.shape[2] == 3: @@ -371,7 +371,7 @@ def compute_using(algorithm): try: h = algorithm(image_gray, align_image_gray) except Exception as e: - log.ODM_WARNING("Cannot compute homography: %s" % str(e)) + log.WARNING("Cannot compute homography: %s" % str(e)) return None, (None, None) if h is None: @@ -404,14 +404,14 @@ def compute_using(algorithm): if result[0] is None: algo = 'ecc' - log.ODM_INFO("Can't use features matching, will use ECC (this might take a bit)") + log.INFO("Can't use features matching, will use ECC (this might take a bit)") result = compute_using(find_ecc_homography) if result[0] is None: algo = None else: # ECC only for low resolution images algo = 'ecc' - log.ODM_INFO("Using ECC (this might take a bit)") + log.INFO("Using ECC (this might take a bit)") result = compute_using(find_ecc_homography) if result[0] is None: algo = None @@ -420,7 +420,7 @@ def compute_using(algorithm): return warp_matrix, dimension, algo except Exception as e: - log.ODM_WARNING("Compute homography: %s" % str(e)) + log.WARNING("Compute homography: %s" % str(e)) return None, (None, None), None def find_ecc_homography(image_gray, align_image_gray, number_of_iterations=1000, termination_eps=1e-8, start_eps=1e-4): @@ -444,7 +444,7 @@ def find_ecc_homography(image_gray, align_image_gray, number_of_iterations=1000, min_dim /= 2.0 pyramid_levels += 1 - log.ODM_INFO("Pyramid levels: %s" % pyramid_levels) + log.INFO("Pyramid levels: %s" % pyramid_levels) # Quick check on size if align_image_gray.shape[0] != image_gray.shape[0]: @@ -488,11 +488,11 @@ def find_ecc_homography(image_gray, align_image_gray, number_of_iterations=1000, number_of_iterations, eps) try: - log.ODM_INFO("Computing ECC pyramid level %s" % level) + log.INFO("Computing ECC pyramid level %s" % level) _, warp_matrix = cv2.findTransformECC(ig, aig, warp_matrix, cv2.MOTION_HOMOGRAPHY, criteria, inputMask=None, gaussFiltSize=9) except Exception as e: if level != pyramid_levels: - log.ODM_INFO("Could not compute ECC warp_matrix at pyramid level %s, resetting matrix" % level) + log.INFO("Could not compute ECC warp_matrix at pyramid level %s, resetting matrix" % level) warp_matrix = np.eye(3, 3, dtype=np.float32) else: raise e diff --git a/opendm/nvm.py b/opendm/nvm.py index d3c07d99..d93dfb4a 100644 --- a/opendm/nvm.py +++ b/opendm/nvm.py @@ -29,7 +29,7 @@ def replace_nvm_images(src_nvm_file, img_map, dst_nvm_file): if new_filename is not None: entries.append("%s %s" % (os.path.join(dir_name, new_filename), " ".join(p))) else: - log.ODM_WARNING("Cannot find %s in image map for %s" % (file_name, dst_nvm_file)) + log.WARNING("Cannot find %s in image map for %s" % (file_name, dst_nvm_file)) if num_images != len(entries): raise Exception("Cannot write %s, not all band images have been matched" % dst_nvm_file) diff --git a/opendm/ogctiles.py b/opendm/ogctiles.py index 1f4b690e..775517c3 100644 --- a/opendm/ogctiles.py +++ b/opendm/ogctiles.py @@ -13,14 +13,14 @@ def build_textured_model(input_obj, output_path, reference_lla = None, model_bounds_file=None, rerun=False): if not os.path.isfile(input_obj): - log.ODM_WARNING("No input OBJ file to process") + log.WARNING("No input OBJ file to process") return if rerun and io.dir_exists(output_path): - log.ODM_WARNING("Removing previous 3D tiles directory: %s" % output_path) + log.WARNING("Removing previous 3D tiles directory: %s" % output_path) shutil.rmtree(output_path) - log.ODM_INFO("Generating OGC 3D Tiles textured model") + log.INFO("Generating OGC 3D Tiles textured model") lat = lon = alt = 0 # Read reference_lla.json (if provided) @@ -32,7 +32,7 @@ def build_textured_model(input_obj, output_path, reference_lla = None, model_bou lon = reference_lla['longitude'] alt = reference_lla['altitude'] except Exception as e: - log.ODM_WARNING("Cannot read %s: %s" % (reference_lla, str(e))) + log.WARNING("Cannot read %s: %s" % (reference_lla, str(e))) # Read model bounds (if provided) divisions = 1 # default @@ -44,16 +44,16 @@ def build_textured_model(input_obj, output_path, reference_lla = None, model_bou if len(f) == 1: poly = shape(f[1]['geometry']) area = poly.area - log.ODM_INFO("Approximate area: %s m^2" % round(area, 2)) + log.INFO("Approximate area: %s m^2" % round(area, 2)) if area < DIV_THRESHOLD: divisions = 0 else: divisions = math.ceil(math.log((area / DIV_THRESHOLD), 4)) else: - log.ODM_WARNING("Invalid boundary file: %s" % model_bounds_file) + log.WARNING("Invalid boundary file: %s" % model_bounds_file) except Exception as e: - log.ODM_WARNING("Cannot read %s: %s" % (model_bounds_file, str(e))) + log.WARNING("Cannot read %s: %s" % (model_bounds_file, str(e))) try: kwargs = { @@ -67,18 +67,18 @@ def build_textured_model(input_obj, output_path, reference_lla = None, model_bou system.run('Obj2Tiles "{input}" "{output}" --divisions {divisions} --lat {lat} --lon {lon} --alt {alt} '.format(**kwargs)) except Exception as e: - log.ODM_WARNING("Cannot build 3D tiles textured model: %s" % str(e)) + log.WARNING("Cannot build 3D tiles textured model: %s" % str(e)) def build_pointcloud(input_pointcloud, output_path, max_concurrency, rerun=False): if not os.path.isfile(input_pointcloud): - log.ODM_WARNING("No input point cloud file to process") + log.WARNING("No input point cloud file to process") return if rerun and io.dir_exists(output_path): - log.ODM_WARNING("Removing previous 3D tiles directory: %s" % output_path) + log.WARNING("Removing previous 3D tiles directory: %s" % output_path) shutil.rmtree(output_path) - log.ODM_INFO("Generating OGC 3D Tiles point cloud") + log.INFO("Generating OGC 3D Tiles point cloud") try: if not os.path.isdir(output_path): @@ -99,7 +99,7 @@ def build_pointcloud(input_pointcloud, output_path, max_concurrency, rerun=False if os.path.isdir(d): shutil.rmtree(d) except Exception as e: - log.ODM_WARNING("Cannot build 3D tiles point cloud: %s" % str(e)) + log.WARNING("Cannot build 3D tiles point cloud: %s" % str(e)) def build_3dtiles(args, tree, reconstruction, rerun=False): @@ -125,11 +125,11 @@ def build_3dtiles(args, tree, reconstruction, rerun=False): build_textured_model(input_obj, model_output_path, reference_lla, model_bounds_file, rerun) else: - log.ODM_WARNING("OGC 3D Tiles model %s already generated" % model_output_path) + log.WARNING("OGC 3D Tiles model %s already generated" % model_output_path) # Point cloud if not os.path.isdir(pointcloud_output_path) or rerun: build_pointcloud(tree.odm_georeferencing_model_laz, pointcloud_output_path, args.max_concurrency, rerun) else: - log.ODM_WARNING("OGC 3D Tiles model %s already generated" % model_output_path) \ No newline at end of file + log.WARNING("OGC 3D Tiles model %s already generated" % model_output_path) \ No newline at end of file diff --git a/opendm/opc.py b/opendm/opc.py index b5fdd19e..f335d79a 100644 --- a/opendm/opc.py +++ b/opendm/opc.py @@ -22,10 +22,10 @@ def classify(point_cloud, max_threads=8): os.remove(point_cloud) os.rename(tmp_output, point_cloud) else: - log.ODM_WARNING("Cannot classify using OpenPointClass (no output generated)") + log.WARNING("Cannot classify using OpenPointClass (no output generated)") else: - log.ODM_WARNING("Cannot download/access model from %s" % (model_url)) + log.WARNING("Cannot download/access model from %s" % (model_url)) except Exception as e: - log.ODM_WARNING("Cannot classify using OpenPointClass: %s" % str(e)) + log.WARNING("Cannot classify using OpenPointClass: %s" % str(e)) diff --git a/opendm/orthophoto.py b/opendm/orthophoto.py index b78a4f5c..c19f0e0c 100644 --- a/opendm/orthophoto.py +++ b/opendm/orthophoto.py @@ -30,7 +30,7 @@ def get_orthophoto_vars(args): } def build_overviews(orthophoto_file): - log.ODM_INFO("Building Overviews") + log.INFO("Building Overviews") kwargs = {'orthophoto': orthophoto_file} # Run gdaladdo @@ -83,7 +83,7 @@ def generate_png(orthophoto_file, output_file=None, outsize=None): gtif = None except Exception as e: - log.ODM_WARNING("Cannot read orthophoto information for PNG generation: %s" % str(e)) + log.WARNING("Cannot read orthophoto information for PNG generation: %s" % str(e)) if outsize is not None: params.append("-outsize %s 0" % outsize) @@ -148,9 +148,9 @@ def generate_extent_polygon(orthophoto_file): feature = None ds = None gtif = None - log.ODM_INFO("Wrote %s" % output_file) + log.INFO("Wrote %s" % output_file) except Exception as e: - log.ODM_WARNING("Cannot create extent layer for %s: %s" % (orthophoto_file, str(e))) + log.WARNING("Cannot create extent layer for %s: %s" % (orthophoto_file, str(e))) def generate_tfw(orthophoto_file): @@ -164,9 +164,9 @@ def generate_tfw(orthophoto_file): # rasterio affine values taken by # https://mharty3.github.io/til/GIS/raster-affine-transforms/ f.write("\n".join([str(v) for v in [t.a, t.d, t.b, t.e, t.c, t.f]]) + "\n") - log.ODM_INFO("Wrote %s" % tfw_file) + log.INFO("Wrote %s" % tfw_file) except Exception as e: - log.ODM_WARNING("Cannot create .tfw for %s: %s" % (orthophoto_file, str(e))) + log.WARNING("Cannot create .tfw for %s: %s" % (orthophoto_file, str(e))) def post_orthophoto_steps(args, bounds_file_path, orthophoto_file, orthophoto_tiles_dir, resolution, reconstruction, tree, embed_gcp_meta=False): @@ -195,14 +195,14 @@ def post_orthophoto_steps(args, bounds_file_path, orthophoto_file, orthophoto_ti def compute_mask_raster(input_raster, vector_mask, output_raster, blend_distance=20, only_max_coords_feature=False): if not os.path.exists(input_raster): - log.ODM_WARNING("Cannot mask raster, %s does not exist" % input_raster) + log.WARNING("Cannot mask raster, %s does not exist" % input_raster) return if not os.path.exists(vector_mask): - log.ODM_WARNING("Cannot mask raster, %s does not exist" % vector_mask) + log.WARNING("Cannot mask raster, %s does not exist" % vector_mask) return - log.ODM_INFO("Computing mask raster: %s" % output_raster) + log.INFO("Computing mask raster: %s" % output_raster) with rasterio.open(input_raster, 'r') as rast: with fiona.open(vector_mask) as src: @@ -232,7 +232,7 @@ def compute_mask_raster(input_raster, vector_mask, output_raster, blend_distance dist_t[dist_t > blend_distance] = 1 np.multiply(alpha_band, dist_t, out=alpha_band, casting="unsafe") else: - log.ODM_WARNING("%s does not have an alpha band, cannot blend cutline!" % input_raster) + log.WARNING("%s does not have an alpha band, cannot blend cutline!" % input_raster) with rasterio.open(output_raster, 'w', BIGTIFF="IF_SAFER", **rast.profile) as dst: dst.colorinterp = rast.colorinterp @@ -242,10 +242,10 @@ def compute_mask_raster(input_raster, vector_mask, output_raster, blend_distance def feather_raster(input_raster, output_raster, blend_distance=20): if not os.path.exists(input_raster): - log.ODM_WARNING("Cannot feather raster, %s does not exist" % input_raster) + log.WARNING("Cannot feather raster, %s does not exist" % input_raster) return - log.ODM_INFO("Computing feather raster: %s" % output_raster) + log.INFO("Computing feather raster: %s" % output_raster) with rasterio.open(input_raster, 'r') as rast: out_image = rast.read() @@ -257,7 +257,7 @@ def feather_raster(input_raster, output_raster, blend_distance=20): dist_t[dist_t > blend_distance] = 1 np.multiply(alpha_band, dist_t, out=alpha_band, casting="unsafe") else: - log.ODM_WARNING("%s does not have an alpha band, cannot feather raster!" % input_raster) + log.WARNING("%s does not have an alpha band, cannot feather raster!" % input_raster) with rasterio.open(output_raster, 'w', BIGTIFF="IF_SAFER", **rast.profile) as dst: dst.colorinterp = rast.colorinterp @@ -276,15 +276,15 @@ def merge(input_ortho_and_ortho_cuts, output_orthophoto, orthophoto_vars={}): for o, c in input_ortho_and_ortho_cuts: if not io.file_exists(o): - log.ODM_WARNING("%s does not exist. Will skip from merged orthophoto." % o) + log.WARNING("%s does not exist. Will skip from merged orthophoto." % o) continue if not io.file_exists(c): - log.ODM_WARNING("%s does not exist. Will skip from merged orthophoto." % c) + log.WARNING("%s does not exist. Will skip from merged orthophoto." % c) continue inputs.append((o, c)) if len(inputs) == 0: - log.ODM_WARNING("No input orthophotos, skipping merge.") + log.WARNING("No input orthophotos, skipping merge.") return with rasterio.open(inputs[0][0]) as first: @@ -294,7 +294,7 @@ def merge(input_ortho_and_ortho_cuts, output_orthophoto, orthophoto_vars={}): num_bands = first.meta['count'] - 1 # minus alpha colorinterp = first.colorinterp - log.ODM_INFO("%s valid orthophoto rasters to merge" % len(inputs)) + log.INFO("%s valid orthophoto rasters to merge" % len(inputs)) sources = [(rasterio.open(o), rasterio.open(c)) for o,c in inputs] # scan input files. @@ -308,7 +308,7 @@ def merge(input_ortho_and_ortho_cuts, output_orthophoto, orthophoto_vars={}): if src.profile["count"] < 2: raise ValueError("Inputs must be at least 2-band rasters") dst_w, dst_s, dst_e, dst_n = min(xs), min(ys), max(xs), max(ys) - log.ODM_INFO("Output bounds: %r %r %r %r" % (dst_w, dst_s, dst_e, dst_n)) + log.INFO("Output bounds: %r %r %r %r" % (dst_w, dst_s, dst_e, dst_n)) output_transform = Affine.translation(dst_w, dst_n) output_transform *= Affine.scale(res[0], -res[1]) @@ -320,8 +320,8 @@ def merge(input_ortho_and_ortho_cuts, output_orthophoto, orthophoto_vars={}): # Adjust bounds to fit. dst_e, dst_s = output_transform * (output_width, output_height) - log.ODM_INFO("Output width: %d, height: %d" % (output_width, output_height)) - log.ODM_INFO("Adjusted bounds: %r %r %r %r" % (dst_w, dst_s, dst_e, dst_n)) + log.INFO("Output width: %d, height: %d" % (output_width, output_height)) + log.INFO("Adjusted bounds: %r %r %r %r" % (dst_w, dst_s, dst_e, dst_n)) profile["transform"] = output_transform profile["height"] = output_height diff --git a/opendm/osfm.py b/opendm/osfm.py index c44b4564..6e6dea07 100644 --- a/opendm/osfm.py +++ b/opendm/osfm.py @@ -47,7 +47,7 @@ def create_tracks(self, rerun=False): if not io.file_exists(tracks_file) or rerun: self.run('create_tracks') else: - log.ODM_WARNING('Found a valid OpenSfM tracks file in: %s' % tracks_file) + log.WARNING('Found a valid OpenSfM tracks file in: %s' % tracks_file) def reconstruct(self, rolling_shutter_correct=False, merge_partial=False, rerun=False): reconstruction_file = os.path.join(self.opensfm_project_path, 'reconstruction.json') @@ -56,7 +56,7 @@ def reconstruct(self, rolling_shutter_correct=False, merge_partial=False, rerun= if merge_partial: self.check_merge_partial_reconstructions() else: - log.ODM_WARNING('Found a valid OpenSfM reconstruction file in: %s' % reconstruction_file) + log.WARNING('Found a valid OpenSfM reconstruction file in: %s' % reconstruction_file) # Check that a reconstruction file has been created if not self.reconstructed(): @@ -72,7 +72,7 @@ def reconstruct(self, rolling_shutter_correct=False, merge_partial=False, rerun= if not io.file_exists(rs_file) or rerun: self.run('rs_correct') - log.ODM_INFO("Re-running the reconstruction pipeline") + log.INFO("Re-running the reconstruction pipeline") self.match_features(True) self.create_tracks(True) @@ -80,7 +80,7 @@ def reconstruct(self, rolling_shutter_correct=False, merge_partial=False, rerun= self.touch(rs_file) else: - log.ODM_WARNING("Rolling shutter correction already applied") + log.WARNING("Rolling shutter correction already applied") def check_merge_partial_reconstructions(self): if self.reconstructed(): @@ -89,8 +89,8 @@ def check_merge_partial_reconstructions(self): tracks_manager = data.load_tracks_manager() if len(reconstructions) > 1: - log.ODM_WARNING("Multiple reconstructions detected (%s), this might be an indicator that some areas did not have sufficient overlap" % len(reconstructions)) - log.ODM_INFO("Attempting merge") + log.WARNING("Multiple reconstructions detected (%s), this might be an indicator that some areas did not have sufficient overlap" % len(reconstructions)) + log.INFO("Attempting merge") merged = Reconstruction() merged.set_reference(reconstructions[0].reference) @@ -100,7 +100,7 @@ def check_merge_partial_reconstructions(self): # Should never happen continue - log.ODM_INFO("Merging reconstruction %s" % ix_r) + log.INFO("Merging reconstruction %s" % ix_r) for camera in rec.cameras.values(): merged.add_camera(camera) @@ -110,7 +110,7 @@ def check_merge_partial_reconstructions(self): new_point = merged.create_point(point.id, point.coordinates) new_point.color = point.color except RuntimeError as e: - log.ODM_WARNING("Cannot merge shot id %s (%s)" % (shot.id, str(e))) + log.WARNING("Cannot merge shot id %s (%s)" % (shot.id, str(e))) continue for shot in rec.shots.values(): @@ -118,7 +118,7 @@ def check_merge_partial_reconstructions(self): try: obsdict = tracks_manager.get_shot_observations(shot.id) except RuntimeError: - log.ODM_WARNING("Shot id %s missing from tracks_manager!" % shot.id) + log.WARNING("Shot id %s missing from tracks_manager!" % shot.id) continue for track_id, obs in obsdict.items(): if track_id in merged.points: @@ -143,7 +143,7 @@ def setup(self, args, images_path, reconstruction, append_config = [], rerun=Fal photos = get_photos_by_band(reconstruction.multi_camera, args.primary_band) if len(photos) < 1: raise Exception("Not enough images in selected band %s" % args.primary_band.lower()) - log.ODM_INFO("Reconstruction will use %s images from %s band" % (len(photos), args.primary_band.lower())) + log.INFO("Reconstruction will use %s images from %s band" % (len(photos), args.primary_band.lower())) else: photos = reconstruction.photos @@ -164,7 +164,7 @@ def setup(self, args, images_path, reconstruction, append_config = [], rerun=Fal # check 0 altitude images percentage when has_alt is True if has_alt and num_zero_alt / len(photos) > 0.05: - log.ODM_WARNING("More than 5% of images have zero altitude, this might be an indicator that the images have no altitude information") + log.WARNING("More than 5% of images have zero altitude, this might be an indicator that the images have no altitude information") has_alt = False # check for image_groups.txt (split-merge) @@ -175,7 +175,7 @@ def setup(self, args, images_path, reconstruction, append_config = [], rerun=Fal if io.file_exists(image_groups_file): dst_groups_file = os.path.join(self.opensfm_project_path, "image_groups.txt") io.copy(image_groups_file, dst_groups_file) - log.ODM_INFO("Copied %s to %s" % (image_groups_file, dst_groups_file)) + log.INFO("Copied %s to %s" % (image_groups_file, dst_groups_file)) # check for cameras if args.cameras: @@ -183,9 +183,9 @@ def setup(self, args, images_path, reconstruction, append_config = [], rerun=Fal camera_overrides = camera.get_opensfm_camera_models(args.cameras) with open(os.path.join(self.opensfm_project_path, "camera_models_overrides.json"), 'w') as f: f.write(json.dumps(camera_overrides)) - log.ODM_INFO("Wrote camera_models_overrides.json to OpenSfM directory") + log.INFO("Wrote camera_models_overrides.json to OpenSfM directory") except Exception as e: - log.ODM_WARNING("Cannot set camera_models_overrides.json: %s" % str(e)) + log.WARNING("Cannot set camera_models_overrides.json: %s" % str(e)) # Check image masks masks = [] @@ -194,7 +194,7 @@ def setup(self, args, images_path, reconstruction, append_config = [], rerun=Fal masks.append((p.filename, os.path.join(images_path, p.mask))) if masks: - log.ODM_INFO("Found %s image masks" % len(masks)) + log.INFO("Found %s image masks" % len(masks)) with open(os.path.join(self.opensfm_project_path, "mask_list.txt"), 'w') as f: for fname, mask in masks: f.write("{} {}\n".format(fname, mask)) @@ -215,7 +215,7 @@ def setup(self, args, images_path, reconstruction, append_config = [], rerun=Fal if max_dims is not None: w, h = max_dims max_dim = max(w, h) - log.ODM_INFO("Maximum photo dimensions: %spx" % str(max_dim)) + log.INFO("Maximum photo dimensions: %spx" % str(max_dim)) lower_limit = 320 upper_limit = 4480 @@ -229,9 +229,9 @@ def setup(self, args, images_path, reconstruction, append_config = [], rerun=Fal factor = min(1, feature_quality_scale[args.feature_quality] * multiplier) feature_process_size = min(upper_limit, max(lower_limit, int(max_dim * factor))) - log.ODM_INFO("Photo dimensions for feature extraction: %ipx" % feature_process_size) + log.INFO("Photo dimensions for feature extraction: %ipx" % feature_process_size) else: - log.ODM_WARNING("Cannot compute max image dimensions, going with defaults") + log.WARNING("Cannot compute max image dimensions, going with defaults") # create config file for OpenSfM if args.matcher_neighbors > 0: @@ -248,7 +248,7 @@ def setup(self, args, images_path, reconstruction, append_config = [], rerun=Fal config = [ "report_name: ODX", - "report_version: %s" % log.odm_version(), + "report_version: %s" % log.get_version(), "use_exif_size: no", "flann_algorithm: KDTREE", # more stable, faster than KMEANS "feature_process_size: %s" % feature_process_size, @@ -271,7 +271,7 @@ def setup(self, args, images_path, reconstruction, append_config = [], rerun=Fal if not reconstruction.is_georeferenced(): config.append("matching_order_neighbors: %s" % args.matcher_order) else: - log.ODM_WARNING("Georeferenced reconstruction, ignoring --matcher-order") + log.WARNING("Georeferenced reconstruction, ignoring --matcher-order") if args.camera_lens != 'auto': config.append("camera_projection_type: %s" % args.camera_lens.upper()) @@ -286,20 +286,20 @@ def setup(self, args, images_path, reconstruction, append_config = [], rerun=Fal } if not has_gps and not 'matcher_type_is_set' in args: - log.ODM_INFO("No GPS information, using BOW matching by default (you can override this by setting --matcher-type explicitly)") + log.INFO("No GPS information, using BOW matching by default (you can override this by setting --matcher-type explicitly)") matcher_type = "bow" if matcher_type == "bow": # Cannot use anything other than HAHOG with BOW if feature_type != "HAHOG": - log.ODM_WARNING("Using BOW matching, will use HAHOG feature type, not SIFT") + log.WARNING("Using BOW matching, will use HAHOG feature type, not SIFT") feature_type = "HAHOG" config.append("matcher_type: %s" % osfm_matchers[matcher_type]) # GPU acceleration? if feature_type == "SIFT": - log.ODM_INFO("Checking for GPU as using SIFT for extracting features") + log.INFO("Checking for GPU as using SIFT for extracting features") if has_gpu(args) and max_dims is not None: w, h = max_dims if w > h: @@ -310,16 +310,16 @@ def setup(self, args, images_path, reconstruction, append_config = [], rerun=Fal h = int(feature_process_size) if has_popsift_and_can_handle_texsize(w, h): - log.ODM_INFO("Using GPU for extracting SIFT features") + log.INFO("Using GPU for extracting SIFT features") feature_type = "SIFT_GPU" self.gpu_sift_feature_extraction = True else: - log.ODM_INFO("Using CPU for extracting SIFT features as texture size is too large or GPU SIFT is not available") + log.INFO("Using CPU for extracting SIFT features as texture size is too large or GPU SIFT is not available") config.append("feature_type: %s" % feature_type) if has_alt: - log.ODM_INFO("Altitude data detected, enabling it for GPS alignment") + log.INFO("Altitude data detected, enabling it for GPS alignment") config.append("use_altitude_tag: yes") gcp_path = reconstruction.gcp.gcp_path @@ -329,7 +329,7 @@ def setup(self, args, images_path, reconstruction, append_config = [], rerun=Fal config.append("align_method: orientation_prior") if args.use_hybrid_bundle_adjustment: - log.ODM_INFO("Enabling hybrid bundle adjustment") + log.INFO("Enabling hybrid bundle adjustment") config.append("bundle_interval: 100") # Bundle after adding 'bundle_interval' cameras config.append("bundle_new_points_ratio: 1.2") # Bundle when (new points) / (bundled points) > bundle_new_points_ratio config.append("local_bundle_radius: 1") # Max image graph distance for images to be included in local bundle adjustment @@ -348,7 +348,7 @@ def setup(self, args, images_path, reconstruction, append_config = [], rerun=Fal config = config + append_config # write config file - log.ODM_INFO(config) + log.INFO(config) config_filename = self.get_config_file_path() with open(config_filename, 'w') as fout: fout.write("\n".join(config)) @@ -357,7 +357,7 @@ def setup(self, args, images_path, reconstruction, append_config = [], rerun=Fal if reconstruction.is_georeferenced(): self.write_reference_lla(reconstruction.georef.utm_east_offset, reconstruction.georef.utm_north_offset, reconstruction.georef.proj4()) else: - log.ODM_WARNING("%s already exists, not rerunning OpenSfM setup" % list_path) + log.WARNING("%s already exists, not rerunning OpenSfM setup" % list_path) def get_config_file_path(self): return os.path.join(self.opensfm_project_path, 'config.yaml') @@ -378,7 +378,7 @@ def photos_to_metadata(self, photos, rolling_shutter, rolling_shutter_readout, r metadata_dir = self.path("exif") if io.dir_exists(metadata_dir) and not rerun: - log.ODM_WARNING("%s already exists, not rerunning photo to metadata" % metadata_dir) + log.WARNING("%s already exists, not rerunning photo to metadata" % metadata_dir) return if io.dir_exists(metadata_dir): @@ -428,7 +428,7 @@ def feature_matching(self, rerun=False): # for various reasons, so before giving up # we try to fallback to CPU if hasattr(self, 'gpu_sift_feature_extraction'): - log.ODM_WARNING("GPU SIFT extraction failed, maybe the graphics card is not supported? Attempting fallback to CPU") + log.WARNING("GPU SIFT extraction failed, maybe the graphics card is not supported? Attempting fallback to CPU") self.update_config({'feature_type': "SIFT"}) if os.path.exists(features_dir): shutil.rmtree(features_dir) @@ -436,7 +436,7 @@ def feature_matching(self, rerun=False): else: raise e else: - log.ODM_WARNING('Detect features already done: %s exists' % features_dir) + log.WARNING('Detect features already done: %s exists' % features_dir) self.match_features(rerun) @@ -445,12 +445,12 @@ def match_features(self, rerun=False): if not io.dir_exists(matches_dir) or rerun: self.run('match_features') else: - log.ODM_WARNING('Match features already done: %s exists' % matches_dir) + log.WARNING('Match features already done: %s exists' % matches_dir) def align_reconstructions(self, rerun): alignment_file = self.path('alignment_done.txt') if not io.file_exists(alignment_file) or rerun: - log.ODM_INFO("Aligning submodels...") + log.INFO("Aligning submodels...") meta_data = metadataset.MetaDataSet(self.opensfm_project_path) reconstruction_shots = tools.load_reconstruction_shots(meta_data) transformations = tools.align_reconstructions(reconstruction_shots, @@ -460,7 +460,7 @@ def align_reconstructions(self, rerun): self.touch(alignment_file) else: - log.ODM_WARNING('Found a alignment done progress file in: %s' % alignment_file) + log.WARNING('Found a alignment done progress file in: %s' % alignment_file) def touch(self, file): with open(file, 'w') as fout: @@ -476,12 +476,12 @@ def extract_cameras(self, output, rerun=False): with open(output, 'w') as fout: fout.write(json.dumps(camera.get_cameras_from_opensfm(reconstruction_file), indent=4)) except Exception as e: - log.ODM_WARNING("Cannot export cameras to %s. %s." % (output, str(e))) + log.WARNING("Cannot export cameras to %s. %s." % (output, str(e))) else: - log.ODM_INFO("Already extracted cameras") + log.INFO("Already extracted cameras") def convert_and_undistort(self, rerun=False, imageFilter=None, image_list=None, runId="nominal"): - log.ODM_INFO("Undistorting %s ..." % self.opensfm_project_path) + log.INFO("Undistorting %s ..." % self.opensfm_project_path) done_flag_file = self.path("undistorted", "%s_done.txt" % runId) if not io.file_exists(done_flag_file) or rerun: @@ -495,7 +495,7 @@ def convert_and_undistort(self, rerun=False, imageFilter=None, image_list=None, self.touch(done_flag_file) else: - log.ODM_WARNING("Already undistorted (%s)" % runId) + log.WARNING("Already undistorted (%s)" % runId) def restore_reconstruction_backup(self): if os.path.exists(self.recon_backup_file()): @@ -504,13 +504,13 @@ def restore_reconstruction_backup(self): if os.path.exists(self.recon_file()): os.remove(self.recon_file()) os.replace(self.recon_backup_file(), self.recon_file()) - log.ODM_INFO("Restored reconstruction.json") + log.INFO("Restored reconstruction.json") def backup_reconstruction(self): if os.path.exists(self.recon_backup_file()): os.remove(self.recon_backup_file()) - log.ODM_INFO("Backing up reconstruction") + log.INFO("Backing up reconstruction") shutil.copyfile(self.recon_file(), self.recon_backup_file()) def recon_backup_file(self): @@ -531,7 +531,7 @@ def add_shots_to_reconstruction(self, p2s): for shot_id in sids: secondary_photos = p2s.get(shot_id) if secondary_photos is None: - log.ODM_WARNING("Cannot find secondary photos for %s" % shot_id) + log.WARNING("Cannot find secondary photos for %s" % shot_id) continue for p in secondary_photos: @@ -543,31 +543,31 @@ def add_shots_to_reconstruction(self, p2s): def update_config(self, cfg_dict): cfg_file = self.get_config_file_path() - log.ODM_INFO("Updating %s" % cfg_file) + log.INFO("Updating %s" % cfg_file) if os.path.exists(cfg_file): try: with open(cfg_file) as fin: cfg = yaml.safe_load(fin) for k, v in cfg_dict.items(): cfg[k] = v - log.ODM_INFO("%s: %s" % (k, v)) + log.INFO("%s: %s" % (k, v)) with open(cfg_file, 'w') as fout: fout.write(yaml.dump(cfg, default_flow_style=False)) except Exception as e: - log.ODM_WARNING("Cannot update configuration file %s: %s" % (cfg_file, str(e))) + log.WARNING("Cannot update configuration file %s: %s" % (cfg_file, str(e))) else: - log.ODM_WARNING("Tried to update configuration, but %s does not exist." % cfg_file) + log.WARNING("Tried to update configuration, but %s does not exist." % cfg_file) def export_stats(self, rerun=False): - log.ODM_INFO("Export reconstruction stats") + log.INFO("Export reconstruction stats") stats_path = self.path("stats", "stats.json") if not os.path.exists(stats_path) or rerun: self.run("compute_statistics --diagram_max_points 100000") else: - log.ODM_WARNING("Found existing reconstruction stats %s" % stats_path) + log.WARNING("Found existing reconstruction stats %s" % stats_path) def export_report(self, report_path, odm_stats, rerun=False): - log.ODM_INFO("Exporting report to %s" % report_path) + log.INFO("Exporting report to %s" % report_path) osfm_report_path = self.path("stats", "report.pdf") if not os.path.exists(report_path) or rerun: @@ -581,9 +581,9 @@ def export_report(self, report_path, odm_stats, rerun=False): os.unlink(report_path) shutil.move(osfm_report_path, report_path) else: - log.ODM_WARNING("Report could not be generated") + log.WARNING("Report could not be generated") else: - log.ODM_WARNING("Report %s already exported" % report_path) + log.WARNING("Report %s already exported" % report_path) def write_reference_lla(self, offset_x, offset_y, proj4): reference_lla = self.path("reference_lla.json") @@ -598,7 +598,7 @@ def write_reference_lla(self, offset_x, offset_y, proj4): 'altitude': 0.0 }, indent=4)) - log.ODM_INFO("Wrote reference_lla.json") + log.INFO("Wrote reference_lla.json") def ground_control_points(self, proj4): """ @@ -614,7 +614,7 @@ def ground_control_points(self, proj4): with open(gcp_stats_file) as f: gcps_stats = json.loads(f.read()) except: - log.ODM_INFO("Cannot parse %s" % gcp_stats_file) + log.INFO("Cannot parse %s" % gcp_stats_file) if not gcps_stats: return [] @@ -704,7 +704,7 @@ def get_submodel_argv(args, submodels_path = None, submodel_name = None): if isinstance(args_dict[k], dict): args_dict[k] = json.dumps(args_dict[k]) except ValueError as e: - log.ODM_WARNING("Cannot parse/read JSON: {}".format(str(e))) + log.WARNING("Cannot parse/read JSON: {}".format(str(e))) # Handle crop (cannot be zero for split/merge) if "crop" in set_keys: @@ -768,7 +768,7 @@ def get_submodel_paths(submodels_path, *paths): if os.path.exists(p): result.append(p) else: - log.ODM_WARNING("Missing %s from submodel %s" % (p, f)) + log.WARNING("Missing %s from submodel %s" % (p, f)) return result @@ -792,7 +792,7 @@ def get_all_submodel_paths(submodels_path, *all_paths): for ap in all_paths: p = os.path.join(submodels_path, f, ap) if not os.path.exists(p): - log.ODM_WARNING("Missing %s from submodel %s" % (p, f)) + log.WARNING("Missing %s from submodel %s" % (p, f)) all_found = False if all_found: diff --git a/opendm/photo.py b/opendm/photo.py index eaa3433c..f305e115 100644 --- a/opendm/photo.py +++ b/opendm/photo.py @@ -84,7 +84,7 @@ def get_mm_per_unit(resolution_unit): elif resolution_unit == 5: # um return 0.001 else: - log.ODM_WARNING("Unknown EXIF resolution unit value: {}".format(resolution_unit)) + log.WARNING("Unknown EXIF resolution unit value: {}".format(resolution_unit)) return None class PhotoCorruptedException(Exception): @@ -216,14 +216,14 @@ def parse_exif_values(self, _path_file): self.camera_make = tags['Image Make'].values self.camera_make = self.camera_make.strip() except UnicodeDecodeError: - log.ODM_WARNING("EXIF Image Make might be corrupted") + log.WARNING("EXIF Image Make might be corrupted") self.camera_make = "unknown" if 'Image Model' in tags: try: self.camera_model = tags['Image Model'].values self.camera_model = self.camera_model.strip() except UnicodeDecodeError: - log.ODM_WARNING("EXIF Image Model might be corrupted") + log.WARNING("EXIF Image Model might be corrupted") self.camera_model = "unknown" if 'GPS GPSAltitude' in tags: self.altitude = self.float_value(tags['GPS GPSAltitude']) @@ -232,17 +232,17 @@ def parse_exif_values(self, _path_file): if 'GPS GPSLatitude' in tags and 'GPS GPSLatitudeRef' in tags: self.latitude = self.dms_to_decimal(tags['GPS GPSLatitude'], tags['GPS GPSLatitudeRef']) elif 'GPS GPSLatitude' in tags: - log.ODM_WARNING("GPS position for %s might be incorrect, GPSLatitudeRef tag is missing (assuming N)" % self.filename) + log.WARNING("GPS position for %s might be incorrect, GPSLatitudeRef tag is missing (assuming N)" % self.filename) self.latitude = self.dms_to_decimal(tags['GPS GPSLatitude'], GPSRefMock('N')) if 'GPS GPSLongitude' in tags and 'GPS GPSLongitudeRef' in tags: self.longitude = self.dms_to_decimal(tags['GPS GPSLongitude'], tags['GPS GPSLongitudeRef']) elif 'GPS GPSLongitude' in tags: - log.ODM_WARNING("GPS position for %s might be incorrect, GPSLongitudeRef tag is missing (assuming E)" % self.filename) + log.WARNING("GPS position for %s might be incorrect, GPSLongitudeRef tag is missing (assuming E)" % self.filename) self.longitude = self.dms_to_decimal(tags['GPS GPSLongitude'], GPSRefMock('E')) if 'Image Orientation' in tags: self.orientation = self.int_value(tags['Image Orientation']) except (IndexError, ValueError) as e: - log.ODM_WARNING("Cannot read basic EXIF tags for %s: %s" % (self.filename, str(e))) + log.WARNING("Cannot read basic EXIF tags for %s: %s" % (self.filename, str(e))) try: if 'Image Tag 0xC61A' in tags: @@ -300,12 +300,12 @@ def parse_exif_values(self, _path_file): self.exif_height = self.int_value(tags['EXIF ExifImageLength']) except Exception as e: - log.ODM_WARNING("Cannot read extended EXIF tags for %s: %s" % (self.filename, str(e))) + log.WARNING("Cannot read extended EXIF tags for %s: %s" % (self.filename, str(e))) # Warn if GPS coordinates are suspiciously wrong if self.latitude is not None and self.latitude == 0 and \ self.longitude is not None and self.longitude == 0: - log.ODM_WARNING("%s has GPS position (0,0), possibly corrupted" % self.filename) + log.WARNING("%s has GPS position (0,0), possibly corrupted" % self.filename) # Extract XMP tags @@ -477,7 +477,7 @@ def parse_exif_values(self, _path_file): self.roll *= -1 except Exception as e: - log.ODM_WARNING("Cannot read XMP tags for %s: %s" % (self.filename, str(e))) + log.WARNING("Cannot read XMP tags for %s: %s" % (self.filename, str(e))) # self.set_attr_from_xmp_tag('center_wavelength', xtags, [ # 'Camera:CentralWavelength' @@ -512,7 +512,7 @@ def compute_focal(self, tags, xtags): try: self.focal_ratio = self.extract_focal(self.camera_make, self.camera_model, tags, xtags) except (IndexError, ValueError) as e: - log.ODM_WARNING("Cannot extract focal ratio for %s: %s" % (self.filename, str(e))) + log.WARNING("Cannot extract focal ratio for %s: %s" % (self.filename, str(e))) def extract_focal(self, make, model, tags, xtags): if make != "unknown": @@ -605,7 +605,7 @@ def get_xmp(self, file): from bs4 import BeautifulSoup xmp_str = str(BeautifulSoup(xmp_str, 'xml')) xdict = x2d.parse(xmp_str) - log.ODM_WARNING("%s has malformed XMP XML (but we fixed it)" % self.filename) + log.WARNING("%s has malformed XMP XML (but we fixed it)" % self.filename) xdict = xdict.get('x:xmpmeta', {}) xdict = xdict.get('rdf:RDF', {}) xdict = xdict.get('rdf:Description', {}) @@ -925,7 +925,7 @@ def compute_opk(self): m = np.linalg.norm(xnp) if m == 0: - log.ODM_WARNING("Cannot compute OPK angles, divider = 0") + log.WARNING("Cannot compute OPK angles, divider = 0") return # Unit vector pointing north diff --git a/opendm/point_cloud.py b/opendm/point_cloud.py index b51f65f2..6c3f9a74 100644 --- a/opendm/point_cloud.py +++ b/opendm/point_cloud.py @@ -48,17 +48,17 @@ def ply_info(input_ply): def split(input_point_cloud, outdir, filename_template, capacity, dims=None): - log.ODM_INFO("Splitting point cloud filtering in chunks of {} vertices".format(capacity)) + log.INFO("Splitting point cloud filtering in chunks of {} vertices".format(capacity)) if not os.path.exists(input_point_cloud): - log.ODM_ERROR("{} does not exist, cannot split point cloud. The program will now exit.".format(input_point_cloud)) + log.ERROR("{} does not exist, cannot split point cloud. The program will now exit.".format(input_point_cloud)) sys.exit(1) if not os.path.exists(outdir): system.mkdir_p(outdir) if len(os.listdir(outdir)) != 0: - log.ODM_ERROR("%s already contains some files. The program will now exit.".format(outdir)) + log.ERROR("%s already contains some files. The program will now exit.".format(outdir)) sys.exit(1) cmd = 'pdal split -i "%s" -o "%s" --capacity %s ' % (input_point_cloud, os.path.join(outdir, filename_template), capacity) @@ -78,7 +78,7 @@ def filter(input_point_cloud, output_point_cloud, output_stats, standard_deviati Filters a point cloud """ if not os.path.exists(input_point_cloud): - log.ODM_ERROR("{} does not exist. The program will now exit.".format(input_point_cloud)) + log.ERROR("{} does not exist. The program will now exit.".format(input_point_cloud)) sys.exit(1) args = [ @@ -88,17 +88,17 @@ def filter(input_point_cloud, output_point_cloud, output_stats, standard_deviati ] if sample_radius > 0: - log.ODM_INFO("Sampling points around a %sm radius" % sample_radius) + log.INFO("Sampling points around a %sm radius" % sample_radius) args.append('--radius %s' % sample_radius) meank = 16 - log.ODM_INFO("Filtering {} (statistical, meanK {}, standard deviation {})".format(input_point_cloud, meank, standard_deviation)) + log.INFO("Filtering {} (statistical, meanK {}, standard deviation {})".format(input_point_cloud, meank, standard_deviation)) args.append('--meank %s' % meank) args.append('--std %s' % standard_deviation) args.append('--stats "%s"' % output_stats) if boundary is not None: - log.ODM_INFO("Boundary {}".format(boundary)) + log.INFO("Boundary {}".format(boundary)) fd, boundary_json_file = tempfile.mkstemp(suffix='.boundary.json') os.close(fd) with open(boundary_json_file, 'w') as f: @@ -108,12 +108,12 @@ def filter(input_point_cloud, output_point_cloud, output_stats, standard_deviati system.run('"%s" %s' % (context.fpcfilter_path, " ".join(args))) if not os.path.exists(output_point_cloud): - log.ODM_WARNING("{} not found, filtering has failed.".format(output_point_cloud)) + log.WARNING("{} not found, filtering has failed.".format(output_point_cloud)) def get_spacing(stats_file, resolution_fallback=5.0): def fallback(): - log.ODM_WARNING("Cannot read %s, falling back to resolution estimate" % stats_file) + log.WARNING("Cannot read %s, falling back to resolution estimate" % stats_file) return (resolution_fallback / 100.0) / 2.0 if not os.path.isfile(stats_file): @@ -190,11 +190,11 @@ def get_extent(input_point_cloud): def merge(input_point_cloud_files, output_file, rerun=False): num_files = len(input_point_cloud_files) if num_files == 0: - log.ODM_WARNING("No input point cloud files to process") + log.WARNING("No input point cloud files to process") return if io.file_exists(output_file): - log.ODM_WARNING("Removing previous point cloud: %s" % output_file) + log.WARNING("Removing previous point cloud: %s" % output_file) os.remove(output_file) kwargs = { @@ -211,11 +211,11 @@ def fast_merge_ply(input_point_cloud_files, output_file): num_files = len(input_point_cloud_files) if num_files == 0: - log.ODM_WARNING("No input point cloud files to process") + log.WARNING("No input point cloud files to process") return if io.file_exists(output_file): - log.ODM_WARNING("Removing previous point cloud: %s" % output_file) + log.WARNING("Removing previous point cloud: %s" % output_file) os.remove(output_file) vertex_count = sum([ply_info(pcf)['vertex_count'] for pcf in input_point_cloud_files]) @@ -261,7 +261,7 @@ def fast_merge_ply(input_point_cloud_files, output_file): def merge_ply(input_point_cloud_files, output_file, dims=None): num_files = len(input_point_cloud_files) if num_files == 0: - log.ODM_WARNING("No input point cloud files to process") + log.WARNING("No input point cloud files to process") return cmd = [ @@ -281,7 +281,7 @@ def post_point_cloud_steps(args, tree, rerun=False): pc_classify_marker = os.path.join(tree.odm_georeferencing, 'pc_classify_done.txt') if not io.file_exists(pc_classify_marker) or rerun: - log.ODM_INFO("Classifying {} using Simple Morphological Filter (1/2)".format(tree.odm_georeferencing_model_laz)) + log.INFO("Classifying {} using Simple Morphological Filter (1/2)".format(tree.odm_georeferencing_model_laz)) commands.classify(tree.odm_georeferencing_model_laz, args.smrf_scalar, args.smrf_slope, @@ -289,7 +289,7 @@ def post_point_cloud_steps(args, tree, rerun=False): args.smrf_window ) - log.ODM_INFO("Classifying {} using OpenPointClass (2/2)".format(tree.odm_georeferencing_model_laz)) + log.INFO("Classifying {} using OpenPointClass (2/2)".format(tree.odm_georeferencing_model_laz)) classify(tree.odm_georeferencing_model_laz, args.max_concurrency) with open(pc_classify_marker, 'w') as f: @@ -301,7 +301,7 @@ def post_point_cloud_steps(args, tree, rerun=False): # XYZ point cloud output if args.pc_csv: - log.ODM_INFO("Creating CSV file (XYZ format)") + log.INFO("Creating CSV file (XYZ format)") if not io.file_exists(tree.odm_georeferencing_xyz_file) or rerun: system.run("pdal translate -i \"{}\" " @@ -312,11 +312,11 @@ def post_point_cloud_steps(args, tree, rerun=False): tree.odm_georeferencing_model_laz, tree.odm_georeferencing_xyz_file)) else: - log.ODM_WARNING("Found existing CSV file %s" % tree.odm_georeferencing_xyz_file) + log.WARNING("Found existing CSV file %s" % tree.odm_georeferencing_xyz_file) # LAS point cloud output if args.pc_las: - log.ODM_INFO("Creating LAS file") + log.INFO("Creating LAS file") if not io.file_exists(tree.odm_georeferencing_model_las) or rerun: system.run("pdal translate -i \"{}\" " @@ -324,16 +324,16 @@ def post_point_cloud_steps(args, tree, rerun=False): tree.odm_georeferencing_model_laz, tree.odm_georeferencing_model_las)) else: - log.ODM_WARNING("Found existing LAS file %s" % tree.odm_georeferencing_model_las) + log.WARNING("Found existing LAS file %s" % tree.odm_georeferencing_model_las) # EPT point cloud output if args.pc_ept: - log.ODM_INFO("Creating Entwine Point Tile output") + log.INFO("Creating Entwine Point Tile output") entwine.build([tree.odm_georeferencing_model_laz], tree.entwine_pointcloud, max_concurrency=args.max_concurrency, rerun=rerun) # COPC point clouds if args.pc_copc: - log.ODM_INFO("Creating Cloud Optimized Point Cloud (COPC)") + log.INFO("Creating Cloud Optimized Point Cloud (COPC)") copc_output = io.related_file_path(tree.odm_georeferencing_model_laz, postfix=".copc") entwine.build_copc([tree.odm_georeferencing_model_laz], copc_output, convert_rgb_8_to_16=True) \ No newline at end of file diff --git a/opendm/progress.py b/opendm/progress.py index 264db82d..0bc700ee 100644 --- a/opendm/progress.py +++ b/opendm/progress.py @@ -6,7 +6,7 @@ try: sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) except: - log.ODM_WARNING("Cannot create UDP socket, progress reporting will be disabled.") + log.WARNING("Cannot create UDP socket, progress reporting will be disabled.") sock = None class Broadcaster: @@ -28,13 +28,13 @@ def send_update(self, global_progress): UDP_IP = "127.0.0.1" if global_progress > 100: - log.ODM_WARNING("Global progress is > 100 (%s), please contact the developers." % global_progress) + log.WARNING("Global progress is > 100 (%s), please contact the developers." % global_progress) global_progress = 100 try: sock.sendto("PGUP/{}/{}/{}".format(self.pid, self.project_name, float(global_progress)).encode('utf8'), (UDP_IP, self.port)) except Exception as e: - log.ODM_WARNING("Failed to broadcast progress update on UDP port %s (%s)" % (str(self.port), str(e))) + log.WARNING("Failed to broadcast progress update on UDP port %s (%s)" % (str(self.port), str(e))) progressbc = Broadcaster(PROGRESS_BROADCAST_PORT) \ No newline at end of file diff --git a/opendm/pseudogeo.py b/opendm/pseudogeo.py index 5d706fef..1e666827 100644 --- a/opendm/pseudogeo.py +++ b/opendm/pseudogeo.py @@ -12,11 +12,11 @@ def get_pseudogeo_scale(): def add_pseudo_georeferencing(geotiff): if not io.file_exists(geotiff): - log.ODM_WARNING("Cannot add pseudo georeferencing, %s does not exist" % geotiff) + log.WARNING("Cannot add pseudo georeferencing, %s does not exist" % geotiff) return try: - log.ODM_INFO("Adding pseudo georeferencing (raster should show up at the equator) to %s" % geotiff) + log.INFO("Adding pseudo georeferencing (raster should show up at the equator) to %s" % geotiff) dst_ds = gdal.Open(geotiff, GA_Update) srs = osr.SpatialReference() @@ -27,4 +27,4 @@ def add_pseudo_georeferencing(geotiff): dst_ds = None except Exception as e: - log.ODM_WARNING("Cannot add pseudo georeferencing to %s (%s), skipping..." % (geotiff, str(e))) \ No newline at end of file + log.WARNING("Cannot add pseudo georeferencing to %s (%s), skipping..." % (geotiff, str(e))) \ No newline at end of file diff --git a/opendm/remote.py b/opendm/remote.py index d5bd7a68..dcf36836 100644 --- a/opendm/remote.py +++ b/opendm/remote.py @@ -36,12 +36,12 @@ def __init__(self, nodeUrl, rolling_shutter = False, rerun = False): } self.node_online = True - log.ODM_INFO("LRE: Initializing using cluster node %s:%s" % (self.node.host, self.node.port)) + log.INFO("LRE: Initializing using cluster node %s:%s" % (self.node.host, self.node.port)) try: info = self.node.info() - log.ODM_INFO("LRE: Node is online and running %s version %s" % (info.engine, info.engine_version)) + log.INFO("LRE: Node is online and running %s version %s" % (info.engine, info.engine_version)) except exceptions.NodeConnectionError: - log.ODM_WARNING("LRE: The node seems to be offline! We'll still process the dataset, but it's going to run entirely locally.") + log.WARNING("LRE: The node seems to be offline! We'll still process the dataset, but it's going to run entirely locally.") self.node_online = False except Exception as e: raise system.ExitException("LRE: An unexpected problem happened while opening the node connection: %s" % str(e)) @@ -72,7 +72,7 @@ class nonloc: # Create queue q = queue.Queue() for pp in self.project_paths: - log.ODM_INFO("LRE: Adding to queue %s" % pp) + log.INFO("LRE: Adding to queue %s" % pp) q.put(taskClass(pp, self.node, self.params)) def remove_task_safe(task): @@ -84,22 +84,22 @@ def remove_task_safe(task): def cleanup_remote_tasks(): if self.params['tasks']: - log.ODM_WARNING("LRE: Attempting to cleanup remote tasks") + log.WARNING("LRE: Attempting to cleanup remote tasks") else: - log.ODM_INFO("LRE: No remote tasks left to cleanup") + log.INFO("LRE: No remote tasks left to cleanup") for task in self.params['tasks']: - log.ODM_INFO("LRE: Removing remote task %s... %s" % (task.uuid, 'OK' if remove_task_safe(task) else 'NO')) + log.INFO("LRE: Removing remote task %s... %s" % (task.uuid, 'OK' if remove_task_safe(task) else 'NO')) def handle_result(task, local, error = None, partial=False): def cleanup_remote(): if not partial and task.remote_task: - log.ODM_INFO("LRE: Cleaning up remote task (%s)... %s" % (task.remote_task.uuid, 'OK' if remove_task_safe(task.remote_task) else 'NO')) + log.INFO("LRE: Cleaning up remote task (%s)... %s" % (task.remote_task.uuid, 'OK' if remove_task_safe(task.remote_task) else 'NO')) self.params['tasks'].remove(task.remote_task) task.remote_task = None if error: - log.ODM_WARNING("LRE: %s failed with: %s" % (task, str(error))) + log.WARNING("LRE: %s failed with: %s" % (task, str(error))) # Special case in which the error is caused by a SIGTERM signal # this means a local processing was terminated either by CTRL+C or @@ -123,7 +123,7 @@ def cleanup_remote(): pass nonloc.max_remote_tasks = max(1, node_task_limit) - log.ODM_INFO("LRE: Node task limit reached. Setting max remote tasks to %s" % node_task_limit) + log.INFO("LRE: Node task limit reached. Setting max remote tasks to %s" % node_task_limit) # Retry, but only if the error is not related to a task failure @@ -137,7 +137,7 @@ def cleanup_remote(): cleanup_remote() q.task_done() - log.ODM_INFO("LRE: Re-queueing %s (retries: %s)" % (task, task.retries)) + log.INFO("LRE: Re-queueing %s (retries: %s)" % (task, task.retries)) q.put(task) if not local: remote_running_tasks.increment(-1) return @@ -147,7 +147,7 @@ def cleanup_remote(): if not local: remote_running_tasks.increment(-1) else: if not partial: - log.ODM_INFO("LRE: %s finished successfully" % task) + log.INFO("LRE: %s finished successfully" % task) finished_tasks.increment() if not local: remote_running_tasks.increment(-1) @@ -184,7 +184,7 @@ def remote_worker(): # Yield to local processing if not nonloc.local_processing: - log.ODM_INFO("LRE: Yielding to local processing, sending %s back to the queue" % task) + log.INFO("LRE: Yielding to local processing, sending %s back to the queue" % task) q.put(task) q.task_done() time.sleep(0.05) @@ -222,7 +222,7 @@ def remote_worker(): while finished_tasks.value < len(self.project_paths) and nonloc.error is None: time.sleep(0.5) except KeyboardInterrupt: - log.ODM_WARNING("LRE: CTRL+C") + log.WARNING("LRE: CTRL+C") system.exit_gracefully() # stop workers @@ -268,7 +268,7 @@ def process(self, local, done): def handle_result(error = None, partial=False): done(self, local, error, partial) - log.ODM_INFO("LRE: About to process %s %s" % (self, 'locally' if local else 'remotely')) + log.INFO("LRE: About to process %s %s" % (self, 'locally' if local else 'remotely')) if local: self._process_local(handle_result) # Block until complete @@ -276,7 +276,7 @@ def handle_result(error = None, partial=False): now = datetime.datetime.now() if self.wait_until > now: wait_for = (self.wait_until - now).seconds + 1 - log.ODM_INFO("LRE: Waiting %s seconds before processing %s" % (wait_for, self)) + log.INFO("LRE: Waiting %s seconds before processing %s" % (wait_for, self)) time.sleep(wait_for) # TODO: we could consider uploading multiple tasks @@ -352,7 +352,7 @@ class nonloc: def print_progress(percentage): if (time.time() - nonloc.last_update >= 2) or int(percentage) == 100: - log.ODM_INFO("LRE: Upload of %s at [%s%%]" % (self, int(percentage))) + log.INFO("LRE: Upload of %s at [%s%%]" % (self, int(percentage))) nonloc.last_update = time.time() # Upload task @@ -381,24 +381,24 @@ def status_callback(info): # If a task switches from RUNNING to QUEUED, then we need to # stop the process and re-add the task to the queue. if info.status == TaskStatus.QUEUED: - log.ODM_WARNING("LRE: %s (%s) turned from RUNNING to QUEUED. Re-adding to back of the queue." % (self, task.uuid)) + log.WARNING("LRE: %s (%s) turned from RUNNING to QUEUED. Re-adding to back of the queue." % (self, task.uuid)) raise NodeTaskLimitReachedException("Delayed task limit reached") elif info.status == TaskStatus.RUNNING: # Print a status message once in a while nonloc.status_callback_calls += 1 if nonloc.status_callback_calls > 30: - log.ODM_INFO("LRE: %s (%s) is still running" % (self, task.uuid)) + log.INFO("LRE: %s (%s) is still running" % (self, task.uuid)) nonloc.status_callback_calls = 0 try: def print_progress(percentage): if (time.time() - nonloc.last_update >= 2) or int(percentage) == 100: - log.ODM_INFO("LRE: Download of %s at [%s%%]" % (self, int(percentage))) + log.INFO("LRE: Download of %s at [%s%%]" % (self, int(percentage))) nonloc.last_update = time.time() task.wait_for_completion(status_callback=status_callback) - log.ODM_INFO("LRE: Downloading assets for %s" % self) + log.INFO("LRE: Downloading assets for %s" % self) task.download_assets(self.project_path, progress_callback=print_progress) - log.ODM_INFO("LRE: Downloaded and extracted assets for %s" % self) + log.INFO("LRE: Downloaded and extracted assets for %s" % self) done() except exceptions.TaskFailedError as e: # Try to get output @@ -413,7 +413,7 @@ def print_progress(percentage): msg = "(%s) failed with task output: %s\nFull log saved at %s" % (task.uuid, "\n".join(output_lines[-10:]), error_log_path) done(exceptions.TaskFailedError(msg)) except: - log.ODM_WARNING("LRE: Could not retrieve task output for %s (%s)" % (self, task.uuid)) + log.WARNING("LRE: Could not retrieve task output for %s (%s)" % (self, task.uuid)) done(e) except Exception as e: done(e) @@ -441,9 +441,9 @@ def __str__(self): class ReconstructionTask(Task): def process_local(self): octx = OSFMContext(self.path("opensfm")) - log.ODM_INFO("==================================") - log.ODM_INFO("Local Reconstruction %s" % octx.name()) - log.ODM_INFO("==================================") + log.INFO("==================================") + log.INFO("Local Reconstruction %s" % octx.name()) + log.INFO("==================================") octx.feature_matching(self.params['rerun']) octx.create_tracks(self.params['rerun']) octx.reconstruct(self.params['rolling_shutter'], True, self.params['rerun']) @@ -460,7 +460,7 @@ def process_remote(self, done): "opensfm/tracks.csv", "cameras.json"]) else: - log.ODM_INFO("Already processed feature matching and reconstruction for %s" % octx.name()) + log.INFO("Already processed feature matching and reconstruction for %s" % octx.name()) done() class ToolchainTask(Task): @@ -469,9 +469,9 @@ def process_local(self): submodel_name = os.path.basename(self.project_path) if not os.path.exists(completed_file) or self.params['rerun']: - log.ODM_INFO("=============================") - log.ODM_INFO("Local Toolchain %s" % self) - log.ODM_INFO("=============================") + log.INFO("=============================") + log.INFO("Local Toolchain %s" % self) + log.INFO("=============================") submodels_path = os.path.abspath(self.path("..")) argv = get_submodel_argv(config.config(), submodels_path, submodel_name) @@ -482,7 +482,7 @@ def process_local(self): # This will only get executed if the command above succeeds self.touch(completed_file) else: - log.ODM_INFO("Already processed toolchain for %s" % submodel_name) + log.INFO("Already processed toolchain for %s" % submodel_name) def process_remote(self, done): completed_file = self.path("toolchain_completed.txt") @@ -509,5 +509,5 @@ def handle_result(error = None): "odm_report", "odm_georeferencing"]) else: - log.ODM_INFO("Already processed toolchain for %s" % submodel_name) + log.INFO("Already processed toolchain for %s" % submodel_name) handle_result() diff --git a/opendm/rollingshutter.py b/opendm/rollingshutter.py index 9fcb0df8..eca73cd6 100644 --- a/opendm/rollingshutter.py +++ b/opendm/rollingshutter.py @@ -83,17 +83,17 @@ def get_rolling_shutter_readout(photo, override_value=0): elif callable(rsd): val = float(rsd(photo)) else: - log.ODM_WARNING("Invalid rolling shutter calibration entry, returning default of %sms" % DEFAULT_RS_READOUT) + log.WARNING("Invalid rolling shutter calibration entry, returning default of %sms" % DEFAULT_RS_READOUT) if not key in info_db_found: - log.ODM_INFO("Rolling shutter profile for \"%s %s\" selected, using %sms as --rolling-shutter-readout." % (make, model, val)) + log.INFO("Rolling shutter profile for \"%s %s\" selected, using %sms as --rolling-shutter-readout." % (make, model, val)) info_db_found[key] = True return val else: # Warn once if not key in warn_db_missing: - log.ODM_WARNING("Rolling shutter readout time for \"%s %s\" is not in our database, using default of %sms which might be incorrect. Use --rolling-shutter-readout to set an actual value (see https://github.com/WebODM/RSCalibration for instructions on how to calculate this value)" % (make, model, DEFAULT_RS_READOUT)) + log.WARNING("Rolling shutter readout time for \"%s %s\" is not in our database, using default of %sms which might be incorrect. Use --rolling-shutter-readout to set an actual value (see https://github.com/WebODM/RSCalibration for instructions on how to calculate this value)" % (make, model, DEFAULT_RS_READOUT)) warn_db_missing[key] = True return float(DEFAULT_RS_READOUT) diff --git a/opendm/skyremoval/skyfilter.py b/opendm/skyremoval/skyfilter.py index bee0dce9..6a79e6d0 100644 --- a/opendm/skyremoval/skyfilter.py +++ b/opendm/skyremoval/skyfilter.py @@ -21,12 +21,12 @@ def __init__(self, model, width = 384, height = 384): self.model = model self.width, self.height = width, height - log.ODM_INFO(' ?> Using provider %s' % provider) + log.INFO(' ?> Using provider %s' % provider) self.load_model() def load_model(self): - log.ODM_INFO(' -> Loading the model') + log.INFO(' -> Loading the model') self.session = ort.InferenceSession(self.model, providers=[provider]) diff --git a/opendm/system.py b/opendm/system.py index 18be2129..eaf27c8d 100644 --- a/opendm/system.py +++ b/opendm/system.py @@ -40,19 +40,19 @@ def remove_cleanup_callback(func): try: cleanup_callbacks.remove(func) except ValueError as e: - log.ODM_EXCEPTION("Tried to remove %s from cleanup_callbacks but got: %s" % (str(func), str(e))) + log.EXCEPTION("Tried to remove %s from cleanup_callbacks but got: %s" % (str(func), str(e))) def exit_gracefully(): global running_subprocesses global cleanup_callbacks - log.ODM_WARNING("Caught TERM/INT signal, attempting to exit gracefully...") + log.WARNING("Caught TERM/INT signal, attempting to exit gracefully...") for cb in cleanup_callbacks: cb() for sp in running_subprocesses: - log.ODM_WARNING("Sending TERM signal to PID %s..." % sp.pid) + log.WARNING("Sending TERM signal to PID %s..." % sp.pid) if sys.platform == 'win32': os.kill(sp.pid, signal.CTRL_C_EVENT) else: @@ -71,7 +71,7 @@ def run(cmd, env_paths=[context.superbuild_bin_path], env_vars={}, packages_path global running_subprocesses if not quiet: - log.ODM_INFO('running %s' % cmd) + log.INFO('running %s' % cmd) env = os.environ.copy() sep = ":" diff --git a/opendm/thermal.py b/opendm/thermal.py index fd835a85..659faf9e 100644 --- a/opendm/thermal.py +++ b/opendm/thermal.py @@ -60,11 +60,11 @@ def dn_to_temperature(photo, image, images_path): image = sensor_vals_to_temp(image, **params) image = image.astype("float32") except Exception as e: - log.ODM_WARNING("Cannot radiometrically calibrate %s: %s" % (photo.filename, str(e))) + log.WARNING("Cannot radiometrically calibrate %s: %s" % (photo.filename, str(e))) return image else: image = image.astype("float32") - log.ODM_WARNING("Tried to radiometrically calibrate a non-thermal image with temperature values (%s)" % photo.filename) + log.WARNING("Tried to radiometrically calibrate a non-thermal image with temperature values (%s)" % photo.filename) return image diff --git a/opendm/thermal_tools/dji_unpack.py b/opendm/thermal_tools/dji_unpack.py index eb706a4c..4ebf19db 100644 --- a/opendm/thermal_tools/dji_unpack.py +++ b/opendm/thermal_tools/dji_unpack.py @@ -39,9 +39,9 @@ def extract_temperatures_dji(photo, image, dataset_tree): try: img = Image.frombytes("I;16L", (640, 512), a) except ValueError as e: - log.ODM_ERROR("Error during extracting temperature values for file %s : %s" % photo.filename, e) + log.ERROR("Error during extracting temperature values for file %s : %s" % photo.filename, e) else: - log.ODM_WARNING("Only DJI M2EA currently supported, please wait for new updates") + log.WARNING("Only DJI M2EA currently supported, please wait for new updates") return image # Extract raw sensor values from generated image into numpy array raw_sensor_np = np.array(img) diff --git a/opendm/tiles/tiler.py b/opendm/tiles/tiler.py index b29742ca..e3a8954a 100644 --- a/opendm/tiles/tiler.py +++ b/opendm/tiles/tiler.py @@ -19,7 +19,7 @@ def generate_tiles(geotiff, output_dir, max_concurrency, resolution): if os.path.isdir(output_dir): shutil.rmtree(output_dir) - log.ODM_INFO("Generating static tiles for %s" % geotiff) + log.INFO("Generating static tiles for %s" % geotiff) with StaticTiler(geotiff, output_dir, px_per_tile, tms=True) as tiler: for z in range(z_min, z_max + 1): tiles = tiler.get_tiles_for_zoom(z) @@ -30,7 +30,7 @@ def generate_orthophoto_tiles(geotiff, output_dir, max_concurrency, resolution): try: generate_tiles(geotiff, output_dir, max_concurrency, resolution) except Exception as e: - log.ODM_WARNING("Cannot generate orthophoto tiles: %s" % str(e)) + log.WARNING("Cannot generate orthophoto tiles: %s" % str(e)) def generate_colored_hillshade(geotiff): relief_file = os.path.join(os.path.dirname(__file__), "color_relief.txt") @@ -52,7 +52,7 @@ def generate_colored_hillshade(geotiff): return outputs except Exception as e: - log.ODM_WARNING("Cannot generate colored hillshade: %s" % str(e)) + log.WARNING("Cannot generate colored hillshade: %s" % str(e)) return (None, None, None) def generate_dem_tiles(geotiff, output_dir, max_concurrency, resolution): @@ -65,4 +65,4 @@ def generate_dem_tiles(geotiff, output_dir, max_concurrency, resolution): if os.path.isfile(f): os.remove(f) except Exception as e: - log.ODM_WARNING("Cannot generate DEM tiles: %s" % str(e)) + log.WARNING("Cannot generate DEM tiles: %s" % str(e)) diff --git a/opendm/types.py b/opendm/types.py index 9e1e0c8b..f683c52e 100644 --- a/opendm/types.py +++ b/opendm/types.py @@ -62,7 +62,7 @@ def detect_multi_camera(self): filter_missing = False for band in band_photos: if len(band_photos[band]) < img_per_band: - log.ODM_WARNING("Multi-camera setup detected, but band \"%s\" (identified from \"%s\") has only %s images (instead of %s), perhaps images are missing or are corrupted." % (band, band_photos[band][0].filename, len(band_photos[band]), len(band_photos[max_band_name]))) + log.WARNING("Multi-camera setup detected, but band \"%s\" (identified from \"%s\") has only %s images (instead of %s), perhaps images are missing or are corrupted." % (band, band_photos[band][0].filename, len(band_photos[band]), len(band_photos[max_band_name]))) filter_missing = True if filter_missing: @@ -78,13 +78,13 @@ def detect_multi_camera(self): if len(p2s[filename]) < max_files_per_band: photos_to_remove = p2s[filename] + [p for p in self.photos if p.filename == filename] for photo in photos_to_remove: - log.ODM_WARNING("Excluding %s" % photo.filename) + log.WARNING("Excluding %s" % photo.filename) self.photos = [p for p in self.photos if p != photo] for i in range(len(mc)): mc[i]['photos'] = [p for p in mc[i]['photos'] if p != photo] - log.ODM_INFO("New image count: %s" % len(self.photos)) + log.INFO("New image count: %s" % len(self.photos)) # We enforce a normalized band order for all bands that we can identify # and rely on the manufacturer's band_indexes as a fallback for all others @@ -115,13 +115,13 @@ def detect_multi_camera(self): for band_name in band_indexes: if band_name.upper() not in normalized_band_order: - log.ODM_WARNING(f"Cannot identify order for {band_name} band, using manufacturer suggested index instead") + log.WARNING(f"Cannot identify order for {band_name} band, using manufacturer suggested index instead") # Sort mc.sort(key=lambda x: normalized_band_order.get(x['name'].upper(), '9' + band_indexes[x['name']])) for c, d in enumerate(mc): - log.ODM_INFO(f"Band {c + 1}: {d['name']}") + log.INFO(f"Band {c + 1}: {d['name']}") return mc @@ -156,7 +156,7 @@ def filter_photos(self): bands_to_remove.append(bands[b]) if len(bands_to_remove) > 0: - log.ODM_WARNING("Redundant bands detected, probably because RGB images are mixed with single band images. We will trim some bands as needed") + log.WARNING("Redundant bands detected, probably because RGB images are mixed with single band images. We will trim some bands as needed") for band_to_remove in bands_to_remove: self.multi_camera = [b for b in self.multi_camera if b['name'] != band_to_remove] @@ -164,7 +164,7 @@ def filter_photos(self): self.photos = [p for p in self.photos if p.band_name != band_to_remove] photos_after = len(self.photos) - log.ODM_WARNING("Skipping %s band (%s images)" % (band_to_remove, photos_before - photos_after)) + log.WARNING("Skipping %s band (%s images)" % (band_to_remove, photos_before - photos_after)) def is_georeferenced(self): return self.georef is not None @@ -197,10 +197,10 @@ def georeference_with_gcp(self, gcp_file, output_coords_file, output_gcp_file, o raise RuntimeError("Could not project GCP file to UTM. Please double check your GCP file for mistakes.") for re in rejected_entries: - log.ODM_WARNING("GCP line ignored (image not found): %s" % str(re)) + log.WARNING("GCP line ignored (image not found): %s" % str(re)) if utm_gcp.entries_count() > 0: - log.ODM_INFO("%s GCP points will be used for georeferencing" % utm_gcp.entries_count()) + log.INFO("%s GCP points will be used for georeferencing" % utm_gcp.entries_count()) else: raise RuntimeError("A GCP file was provided, but no valid GCP entries could be used. Note that the GCP file is case sensitive (\".JPG\" is not the same as \".jpg\").") @@ -217,18 +217,18 @@ def georeference_with_gcp(self, gcp_file, output_coords_file, output_gcp_file, o coords_header = gcp.wgs84_utm_zone() f.write(coords_header + "\n") f.write("{} {}\n".format(x_off, y_off)) - log.ODM_INFO("Generated coords file from GCP: %s" % coords_header) + log.INFO("Generated coords file from GCP: %s" % coords_header) # Deprecated: This is mostly for backward compatibility and should be # be removed at some point shutil.copyfile(output_coords_file, output_model_txt_geo) - log.ODM_INFO("Wrote %s" % output_model_txt_geo) + log.INFO("Wrote %s" % output_model_txt_geo) else: - log.ODM_WARNING("GCP file does not exist: %s" % gcp_file) + log.WARNING("GCP file does not exist: %s" % gcp_file) return else: - log.ODM_INFO("Coordinates file already exist: %s" % output_coords_file) - log.ODM_INFO("GCP file already exist: %s" % output_gcp_file) + log.INFO("Coordinates file already exist: %s" % output_coords_file) + log.INFO("GCP file already exist: %s" % output_gcp_file) self.gcp = GCPFile(output_gcp_file) self.georef = ODM_GeoRef.FromCoordsFile(output_coords_file) @@ -239,7 +239,7 @@ def georeference_with_gps(self, images_path, output_coords_file, output_model_tx if not io.file_exists(output_coords_file) or rerun: location.extract_utm_coords(self.photos, images_path, output_coords_file) else: - log.ODM_INFO("Coordinates file already exist: %s" % output_coords_file) + log.INFO("Coordinates file already exist: %s" % output_coords_file) # Deprecated: This is mostly for backward compatibility and should be # be removed at some point @@ -249,11 +249,11 @@ def georeference_with_gps(self, images_path, output_coords_file, output_model_tx w.write(f.readline()) # CRS w.write(f.readline()) # Offset else: - log.ODM_INFO("Model geo file already exist: %s" % output_model_txt_geo) + log.INFO("Model geo file already exist: %s" % output_model_txt_geo) self.georef = ODM_GeoRef.FromCoordsFile(output_coords_file) except: - log.ODM_WARNING('Could not generate coordinates file. The orthophoto will not be georeferenced.') + log.WARNING('Could not generate coordinates file. The orthophoto will not be georeferenced.') self.gcp = GCPFile(None) return self.georef @@ -285,7 +285,7 @@ class ODM_GeoRef(object): def FromCoordsFile(coords_file): # check for coordinate file existence if not io.file_exists(coords_file): - log.ODM_WARNING('Could not find file %s' % coords_file) + log.WARNING('Could not find file %s' % coords_file) return srs = None @@ -439,7 +439,7 @@ def run(self, outputs = {}): start_time = system.now_raw() log.logger.log_json_stage_run(self.name, start_time) - log.ODM_INFO('Running %s stage' % self.name) + log.INFO('Running %s stage' % self.name) self.process(self.args, outputs) @@ -450,14 +450,14 @@ def run(self, outputs = {}): try: system.benchmark(start_time, outputs['tree'].benchmarking, self.name) except Exception as e: - log.ODM_WARNING("Cannot write benchmark file: %s" % str(e)) + log.WARNING("Cannot write benchmark file: %s" % str(e)) - log.ODM_INFO('Finished %s stage' % self.name) + log.INFO('Finished %s stage' % self.name) self.update_progress_end() # Last stage? if self.args.end_with == self.name or self.args.rerun == self.name: - log.ODM_INFO("No more stages to run") + log.INFO("No more stages to run") return # Run next stage? diff --git a/opendm/utils.py b/opendm/utils.py index b73086db..f2112111 100644 --- a/opendm/utils.py +++ b/opendm/utils.py @@ -43,7 +43,7 @@ def get_depthmap_resolution(args, photos): return max(min_dim, int(max_dim * pc_quality_scale[args.pc_quality] * multiplier)) else: - log.ODM_WARNING("Cannot compute max image dimensions, going with default depthmap_resolution of 640") + log.WARNING("Cannot compute max image dimensions, going with default depthmap_resolution of 640") return 640 # Sensible default def get_raster_stats(geotiff): @@ -94,15 +94,15 @@ def copy_paths(paths, destination, rerun): elif os.path.isdir(dst_path): shutil.rmtree(dst_path) except Exception as e: - log.ODM_WARNING("Cannot remove file %s: %s, skipping..." % (dst_path, str(e))) + log.WARNING("Cannot remove file %s: %s, skipping..." % (dst_path, str(e))) if not os.path.exists(dst_path): if os.path.isfile(p): - log.ODM_INFO("Copying %s --> %s" % (p, dst_path)) + log.INFO("Copying %s --> %s" % (p, dst_path)) shutil.copy(p, dst_path) elif os.path.isdir(p): shutil.copytree(p, dst_path) - log.ODM_INFO("Copying %s --> %s" % (p, dst_path)) + log.INFO("Copying %s --> %s" % (p, dst_path)) def rm_r(path): try: @@ -111,7 +111,7 @@ def rm_r(path): elif os.path.exists(path): os.remove(path) except: - log.ODM_WARNING("Cannot remove %s" % path) + log.WARNING("Cannot remove %s" % path) def np_to_json(arr): return json.dumps(arr, cls=NumpyEncoder) @@ -127,11 +127,11 @@ def add_raster_meta_tags(raster, reconstruction, tree, embed_gcp_meta=True): if mean_capture_time is not None: mean_capture_dt = datetime.fromtimestamp(mean_capture_time).strftime('%Y:%m:%d %H:%M:%S') + '+00:00' - log.ODM_INFO("Adding TIFFTAGs to {}".format(raster)) + log.INFO("Adding TIFFTAGs to {}".format(raster)) with rasterio.open(raster, 'r+') as rst: if mean_capture_dt is not None: rst.update_tags(TIFFTAG_DATETIME=mean_capture_dt) - rst.update_tags(TIFFTAG_SOFTWARE='ODX {}'.format(log.odm_version())) + rst.update_tags(TIFFTAG_SOFTWARE='ODX {}'.format(log.get_version())) if embed_gcp_meta: # Embed GCP info in 2D results via @@ -149,10 +149,10 @@ def add_raster_meta_tags(raster, reconstruction, tree, embed_gcp_meta=True): if ds.GetMetadata('xml:GROUND_CONTROL_POINTS') is None or self.rerun(): ds.SetMetadata(gcp_xml, 'xml:GROUND_CONTROL_POINTS') ds = None - log.ODM_INFO("Wrote xml:GROUND_CONTROL_POINTS metadata to %s" % raster) + log.INFO("Wrote xml:GROUND_CONTROL_POINTS metadata to %s" % raster) else: - log.ODM_WARNING("Already embedded ground control point information") + log.WARNING("Already embedded ground control point information") else: - log.ODM_WARNING("Cannot open %s for writing, skipping GCP embedding" % raster) + log.WARNING("Cannot open %s for writing, skipping GCP embedding" % raster) except Exception as e: - log.ODM_WARNING("Cannot write raster meta tags to %s: %s" % (raster, str(e))) + log.WARNING("Cannot write raster meta tags to %s: %s" % (raster, str(e))) diff --git a/opendm/video/srtparser.py b/opendm/video/srtparser.py index 75e82513..8c22eb3b 100644 --- a/opendm/video/srtparser.py +++ b/opendm/video/srtparser.py @@ -23,7 +23,7 @@ def match_single(regexes, line, dtype=int): res = match.group(1) return transform(res) except Exception as e: - log.ODM_WARNING("Cannot parse SRT line \"%s\": %s", (line, str(e))) + log.WARNING("Cannot parse SRT line \"%s\": %s", (line, str(e))) return None diff --git a/opendm/video/video2dataset.py b/opendm/video/video2dataset.py index 50f87eee..2d3fdfae 100644 --- a/opendm/video/video2dataset.py +++ b/opendm/video/video2dataset.py @@ -43,11 +43,11 @@ def ProcessVideo(self): for input_file in self.parameters.input: # get file name file_name = os.path.basename(input_file) - log.ODM_INFO("Processing video: {}".format(input_file)) + log.INFO("Processing video: {}".format(input_file)) # get video info video_info = get_video_info(input_file) - log.ODM_INFO(video_info) + log.INFO(video_info) # Set pseudo start time if self.date_now is None: @@ -58,7 +58,7 @@ def ProcessVideo(self): else: self.date_now += datetime.timedelta(seconds=video_info.total_frames / video_info.frame_rate) - log.ODM_INFO("Use pseudo start time: %s" % self.date_now) + log.INFO("Use pseudo start time: %s" % self.date_now) if self.parameters.use_srt: @@ -69,31 +69,31 @@ def ProcessVideo(self): for srt_file in srt_files: if os.path.exists(srt_file): - log.ODM_INFO("Loading SRT file: {}".format(srt_file)) + log.INFO("Loading SRT file: {}".format(srt_file)) try: srt_parser = SrtFileParser(srt_file) srt_parser.parse() break except Exception as e: - log.ODM_INFO("Error parsing SRT file: {}".format(e)) + log.INFO("Error parsing SRT file: {}".format(e)) srt_parser = None else: srt_parser = None if (self.black_checker is not None and self.black_checker.NeedPreProcess()): start2 = time.time() - log.ODM_INFO("Preprocessing for black frame checker... this might take a bit") + log.INFO("Preprocessing for black frame checker... this might take a bit") self.black_checker.PreProcess(input_file, self.parameters.start, self.parameters.end) end = time.time() - log.ODM_INFO("Preprocessing time: {:.2f}s".format(end - start2)) - log.ODM_INFO("Calculated luminance_range_size is {}".format(self.black_checker.luminance_range_size)) - log.ODM_INFO("Calculated luminance_minimum_value is {}".format(self.black_checker.luminance_minimum_value)) - log.ODM_INFO("Calculated absolute_threshold is {}".format(self.black_checker.absolute_threshold)) + log.INFO("Preprocessing time: {:.2f}s".format(end - start2)) + log.INFO("Calculated luminance_range_size is {}".format(self.black_checker.luminance_range_size)) + log.INFO("Calculated luminance_minimum_value is {}".format(self.black_checker.luminance_minimum_value)) + log.INFO("Calculated absolute_threshold is {}".format(self.black_checker.absolute_threshold)) # open video file cap = cv2.VideoCapture(input_file) if (not cap.isOpened()): - log.ODM_INFO("Error opening video stream or file") + log.INFO("Error opening video stream or file") return if (self.parameters.start is not None): @@ -136,11 +136,11 @@ def ProcessVideo(self): self.f.close() if self.parameters.limit is not None and self.parameters.limit > 0 and self.global_idx >= self.parameters.limit: - log.ODM_INFO("Limit of {} frames reached, trimming dataset".format(self.parameters.limit)) + log.INFO("Limit of {} frames reached, trimming dataset".format(self.parameters.limit)) output_file_paths = limit_files(output_file_paths, self.parameters.limit) end = time.time() - log.ODM_INFO("Total processing time: {:.2f}s".format(end - start)) + log.INFO("Total processing time: {:.2f}s".format(end - start)) return output_file_paths diff --git a/run.py b/run.py index 9d4f870d..52b7f831 100755 --- a/run.py +++ b/run.py @@ -22,32 +22,32 @@ if __name__ == '__main__': args = config.config() - log.ODM_INFO('Initializing ODX %s - %s' % (log.odm_version(), system.now())) + log.INFO('Initializing ODX %s - %s' % (log.get_version(), system.now())) progressbc.set_project_name(args.name) args.project_path = os.path.join(args.project_path, args.name) if not io.dir_exists(args.project_path): - log.ODM_ERROR('Directory %s does not exist.' % args.name) + log.ERROR('Directory %s does not exist.' % args.name) exit(1) opts_json = os.path.join(args.project_path, "options.json") auto_rerun_stage, opts_diff = find_rerun_stage(opts_json, args, config.rerun_stages, config.processopts) if auto_rerun_stage is not None and len(auto_rerun_stage) > 0: - log.ODM_INFO("Rerunning from: %s" % auto_rerun_stage[0]) + log.INFO("Rerunning from: %s" % auto_rerun_stage[0]) args.rerun_from = auto_rerun_stage # Print args args_dict = args_to_dict(args) - log.ODM_INFO('==============') + log.INFO('==============') for k in args_dict.keys(): - log.ODM_INFO('%s: %s%s' % (k, args_dict[k], ' [changed]' if k in opts_diff else '')) - log.ODM_INFO('==============') + log.INFO('%s: %s%s' % (k, args_dict[k], ' [changed]' if k in opts_diff else '')) + log.INFO('==============') # If user asks to rerun everything, delete all of the existing progress directories. if args.rerun_all: - log.ODM_INFO("Rerun all -- Removing old data") + log.INFO("Rerun all -- Removing old data") for d in [os.path.join(args.project_path, p) for p in get_processing_results_paths()] + [ os.path.join(args.project_path, "odm_meshing"), os.path.join(args.project_path, "opensfm"), @@ -64,6 +64,6 @@ # Do not show end message for local submodels runs if retcode == 0 and not "submodels" in args.project_path: - log.ODM_INFO('ODX app finished - %s' % system.now()) + log.INFO('ODX app finished - %s' % system.now()) else: exit(retcode) \ No newline at end of file diff --git a/stages/dataset.py b/stages/dataset.py index a6920533..9f3979b8 100644 --- a/stages/dataset.py +++ b/stages/dataset.py @@ -21,7 +21,7 @@ def save_images_database(photos, database_file): with open(database_file, 'w') as f: f.write(json.dumps([p.__dict__ for p in photos])) - log.ODM_INFO("Wrote images database: %s" % database_file) + log.INFO("Wrote images database: %s" % database_file) def load_images_database(database_file): # Empty is used to create types.ODM_Photo class @@ -31,7 +31,7 @@ class Empty: result = [] - log.ODM_INFO("Loading images database: %s" % database_file) + log.INFO("Loading images database: %s" % database_file) with open(database_file, 'r') as f: photos_json = json.load(f) @@ -57,7 +57,7 @@ def process(self, args, outputs): with open(tree.benchmarking, 'a') as b: b.write('ODX Benchmarking file created %s\nNumber of Cores: %s\n\n' % (system.now(), context.num_cores)) except Exception as e: - log.ODM_WARNING("Cannot write benchmark file: %s" % str(e)) + log.WARNING("Cannot write benchmark file: %s" % str(e)) def valid_filename(filename, supported_extensions): (pathfn, ext) = os.path.splitext(filename) @@ -88,7 +88,7 @@ def find_mask(photo_path, masks): if not " " in mask: return mask else: - log.ODM_WARNING("Image mask {} has a space. Spaces are currently not supported for image masks.".format(mask)) + log.WARNING("Image mask {} has a space. Spaces are currently not supported for image masks.".format(mask)) @@ -98,7 +98,7 @@ def find_mask(photo_path, masks): # define paths and create working directories system.mkdir_p(tree.odm_georeferencing) - log.ODM_INFO('Loading dataset from: %s' % images_dir) + log.INFO('Loading dataset from: %s' % images_dir) # check if we rerun cell or not images_database_file = os.path.join(tree.root_path, 'images.json') @@ -114,13 +114,13 @@ def find_mask(photo_path, masks): # If we're re-running the pipeline, and frames have been extracted during a previous run # we need to remove those before re-extracting them if len(video_files) > 0 and os.path.exists(frames_db_file) and self.rerun(): - log.ODM_INFO("Re-run, removing previously extracted video frames") + log.INFO("Re-run, removing previously extracted video frames") frames = [] try: with open(frames_db_file, 'r') as f: frames = json.loads(f.read()) except Exception as e: - log.ODM_WARNING("Cannot check previous video extraction: %s" % str(e)) + log.WARNING("Cannot check previous video extraction: %s" % str(e)) for f in frames: fp = os.path.join(images_dir, f) @@ -128,7 +128,7 @@ def find_mask(photo_path, masks): os.remove(fp) if len(video_files) > 0: - log.ODM_INFO("Found video files (%s), extracting frames" % len(video_files)) + log.INFO("Found video files (%s), extracting frames" % len(video_files)) try: params = Parameters({ @@ -149,7 +149,7 @@ def find_mask(photo_path, masks): with open(frames_db_file, 'w') as f: f.write(json.dumps([os.path.basename(f) for f in frames])) except Exception as e: - log.ODM_WARNING("Could not extract video frames: %s" % str(e)) + log.WARNING("Could not extract video frames: %s" % str(e)) files, rejects = get_images(images_dir) if files: @@ -165,7 +165,7 @@ def find_mask(photo_path, masks): photos = [] with open(tree.dataset_list, 'w') as dataset_list: - log.ODM_INFO("Loading %s images" % len(path_files)) + log.INFO("Loading %s images" % len(path_files)) for f in path_files: try: p = types.ODM_Photo(f) @@ -173,11 +173,11 @@ def find_mask(photo_path, masks): photos.append(p) dataset_list.write(photos[-1].filename + '\n') except PhotoCorruptedException: - log.ODM_WARNING("%s seems corrupted and will not be used" % os.path.basename(f)) + log.WARNING("%s seems corrupted and will not be used" % os.path.basename(f)) # Check if a geo file is available if tree.odm_geo_file is not None and os.path.isfile(tree.odm_geo_file): - log.ODM_INFO("Found image geolocation file") + log.INFO("Found image geolocation file") gf = GeoFile(tree.odm_geo_file) updated = 0 for p in photos: @@ -186,21 +186,21 @@ def find_mask(photo_path, masks): p.update_with_geo_entry(entry) p.compute_opk() updated += 1 - log.ODM_INFO("Updated %s image positions" % updated) + log.INFO("Updated %s image positions" % updated) # Warn if a file path is specified but it does not exist elif tree.odm_geo_file is not None and not os.path.isfile(tree.odm_geo_file): - log.ODM_WARNING("Image geolocation file %s does not exist" % tree.odm_geo_file) + log.WARNING("Image geolocation file %s does not exist" % tree.odm_geo_file) # GPSDOP override if we have GPS accuracy information (such as RTK) if 'gps_accuracy_is_set' in args: - log.ODM_INFO("Forcing GPS DOP to %s for all images" % args.gps_accuracy) + log.INFO("Forcing GPS DOP to %s for all images" % args.gps_accuracy) for p in photos: p.override_gps_dop(args.gps_accuracy) # Override projection type if args.camera_lens != "auto": - log.ODM_INFO("Setting camera lens to %s for all images" % args.camera_lens) + log.INFO("Setting camera lens to %s for all images" % args.camera_lens) for p in photos: p.override_camera_projection(args.camera_lens) @@ -220,7 +220,7 @@ def find_mask(photo_path, masks): sky_images.append({'file': os.path.join(images_dir, p.filename), 'p': p}) if len(sky_images) > 0: - log.ODM_INFO("Automatically generating sky masks for %s images" % len(sky_images)) + log.INFO("Automatically generating sky masks for %s images" % len(sky_images)) model = ai.get_model("skyremoval", "https://github.com/WebODM/ODX/releases/download/v3.7.1/skyremoval.zip", "v1.0.5") if model is not None: sf = SkyFilter(model=model) @@ -232,19 +232,19 @@ def parallel_sky_filter(item): # Check and set if mask_file is not None and os.path.isfile(mask_file): item['p'].set_mask(os.path.basename(mask_file)) - log.ODM_INFO("Wrote %s" % os.path.basename(mask_file)) + log.INFO("Wrote %s" % os.path.basename(mask_file)) else: - log.ODM_WARNING("Cannot generate mask for %s" % item['file']) + log.WARNING("Cannot generate mask for %s" % item['file']) except Exception as e: - log.ODM_WARNING("Cannot generate mask for %s: %s" % (item['file'], str(e))) + log.WARNING("Cannot generate mask for %s: %s" % (item['file'], str(e))) parallel_map(parallel_sky_filter, sky_images, max_workers=args.max_concurrency) - log.ODM_INFO("Sky masks generation completed!") + log.INFO("Sky masks generation completed!") else: - log.ODM_WARNING("Cannot load AI model (you might need to be connected to the internet?)") + log.WARNING("Cannot load AI model (you might need to be connected to the internet?)") else: - log.ODM_INFO("No sky masks will be generated (masks already provided, or images are nadir)") + log.INFO("No sky masks will be generated (masks already provided, or images are nadir)") # End sky removal @@ -261,7 +261,7 @@ def parallel_sky_filter(item): bg_images.append({'file': os.path.join(images_dir, p.filename), 'p': p}) if len(bg_images) > 0: - log.ODM_INFO("Automatically generating background masks for %s images" % len(bg_images)) + log.INFO("Automatically generating background masks for %s images" % len(bg_images)) model = ai.get_model("bgremoval", "https://github.com/WebODM/ODX/releases/download/v3.7.1/u2net.zip", "v2.9.0") if model is not None: bg = BgFilter(model=model) @@ -273,19 +273,19 @@ def parallel_bg_filter(item): # Check and set if mask_file is not None and os.path.isfile(mask_file): item['p'].set_mask(os.path.basename(mask_file)) - log.ODM_INFO("Wrote %s" % os.path.basename(mask_file)) + log.INFO("Wrote %s" % os.path.basename(mask_file)) else: - log.ODM_WARNING("Cannot generate mask for %s" % img) + log.WARNING("Cannot generate mask for %s" % img) except Exception as e: - log.ODM_WARNING("Cannot generate mask for %s: %s" % (img, str(e))) + log.WARNING("Cannot generate mask for %s: %s" % (img, str(e))) parallel_map(parallel_bg_filter, bg_images, max_workers=args.max_concurrency) - log.ODM_INFO("Background masks generation completed!") + log.INFO("Background masks generation completed!") else: - log.ODM_WARNING("Cannot load AI model (you might need to be connected to the internet?)") + log.WARNING("Cannot load AI model (you might need to be connected to the internet?)") else: - log.ODM_INFO("No background masks will be generated (masks already provided)") + log.INFO("No background masks will be generated (masks already provided)") # End bg removal @@ -297,7 +297,7 @@ def parallel_bg_filter(item): # We have an images database, just load it photos = load_images_database(images_database_file) - log.ODM_INFO('Found %s usable images' % len(photos)) + log.INFO('Found %s usable images' % len(photos)) log.logger.log_json_images(len(photos)) # Create reconstruction object @@ -310,7 +310,7 @@ def parallel_bg_filter(item): tree.odm_georeferencing_model_txt_geo, rerun=self.rerun()) if reconstruction.gcp is not None and reconstruction.gcp.only_checkpoints(): - log.ODM_WARNING("Only checkpoints in this GCP file. Enabling --force-gps") + log.WARNING("Only checkpoints in this GCP file. Enabling --force-gps") args.force_gps = True else: reconstruction.georeference_with_gps(tree.dataset_raw, @@ -327,24 +327,24 @@ def parallel_bg_filter(item): outputs['boundary'] = boundary.load_boundary(args.boundary, reconstruction.get_proj_srs()) else: args.boundary = None - log.ODM_WARNING("Reconstruction is not georeferenced, but boundary file provided (will ignore boundary file)") + log.WARNING("Reconstruction is not georeferenced, but boundary file provided (will ignore boundary file)") # If sfm-algorithm is triangulation, check if photos have OPK if args.sfm_algorithm == 'triangulation': for p in photos: if not p.has_opk(): - log.ODM_WARNING("No omega/phi/kappa angles found in input photos (%s), switching sfm-algorithm to incremental" % p.filename) + log.WARNING("No omega/phi/kappa angles found in input photos (%s), switching sfm-algorithm to incremental" % p.filename) args.sfm_algorithm = 'incremental' break # Rolling shutter cannot be done in non-georeferenced datasets if args.rolling_shutter and not reconstruction.is_georeferenced(): - log.ODM_WARNING("Reconstruction is not georeferenced, disabling rolling shutter correction") + log.WARNING("Reconstruction is not georeferenced, disabling rolling shutter correction") args.rolling_shutter = False # GPS Z offset if 'gps_z_offset_is_set' in args: - log.ODM_INFO("Adjusting GPS Z offset by %s for all images" % args.gps_z_offset) + log.INFO("Adjusting GPS Z offset by %s for all images" % args.gps_z_offset) for p in photos: p.adjust_z_offset(args.gps_z_offset) diff --git a/stages/mvstex.py b/stages/mvstex.py index b0006ebd..de2045e3 100644 --- a/stages/mvstex.py +++ b/stages/mvstex.py @@ -18,7 +18,7 @@ def process(self, args, outputs): max_texture_size = 8 * 1024 # default if max_dim > 8000: - log.ODM_INFO("Large input images (%s pixels), increasing maximum texture size." % max_dim) + log.INFO("Large input images (%s pixels), increasing maximum texture size." % max_dim) max_texture_size *= 3 class nonloc: @@ -72,7 +72,7 @@ def add_run(nvm_file, primary=True, band=None): unaligned_obj = io.related_file_path(odm_textured_model_obj, postfix="_unaligned") if not io.file_exists(odm_textured_model_obj) or self.rerun(): - log.ODM_INFO('Writing MVS Textured file in: %s' + log.INFO('Writing MVS Textured file in: %s' % odm_textured_model_obj) if os.path.isfile(unaligned_obj): @@ -112,7 +112,7 @@ def add_run(nvm_file, primary=True, band=None): # mvstex creates a tmp directory, so make sure it is empty if io.dir_exists(mvs_tmp_dir): - log.ODM_INFO("Removing old tmp directory {}".format(mvs_tmp_dir)) + log.INFO("Removing old tmp directory {}".format(mvs_tmp_dir)) shutil.rmtree(mvs_tmp_dir) # run texturing binary @@ -130,15 +130,15 @@ def add_run(nvm_file, primary=True, band=None): if r['primary'] and (not r['nadir'] or args.skip_3dmodel): # Single material? if args.texturing_single_material: - log.ODM_INFO("Packing to single material") + log.INFO("Packing to single material") packed_dir = os.path.join(r['out_dir'], 'packed') if io.dir_exists(packed_dir): - log.ODM_INFO("Removing old packed directory {}".format(packed_dir)) + log.INFO("Removing old packed directory {}".format(packed_dir)) shutil.rmtree(packed_dir) try: - obj_pack(os.path.join(r['out_dir'], tree.odm_textured_model_obj), packed_dir, _info=log.ODM_INFO) + obj_pack(os.path.join(r['out_dir'], tree.odm_textured_model_obj), packed_dir, _info=log.INFO) # Move packed/* into texturing folder system.delete_files(r['out_dir'], (".vec", )) @@ -146,7 +146,7 @@ def add_run(nvm_file, primary=True, band=None): if os.path.isdir(packed_dir): os.rmdir(packed_dir) except Exception as e: - log.ODM_WARNING(str(e)) + log.WARNING(str(e)) # Backward compatibility: copy odm_textured_model_geo.mtl to odm_textured_model.mtl @@ -161,7 +161,7 @@ def add_run(nvm_file, primary=True, band=None): progress += progress_per_run self.update_progress(progress) else: - log.ODM_WARNING('Found a valid texture file in: %s' + log.WARNING('Found a valid texture file in: %s' % odm_textured_model_obj) if args.optimize_disk_space: diff --git a/stages/odm_app.py b/stages/odm_app.py index 5b745906..899be1ad 100644 --- a/stages/odm_app.py +++ b/stages/odm_app.py @@ -96,21 +96,21 @@ def execute(self): if code == 139 or code == 134 or code == 1 or code == 3221225477: # Segfault - log.ODM_ERROR("Uh oh! Processing stopped because of strange values in the reconstruction. This is often a sign that the input data has some issues or the software cannot deal with it. Have you followed best practices for data acquisition? See https://docs.webodm.org/flying-tips/") + log.ERROR("Uh oh! Processing stopped because of strange values in the reconstruction. This is often a sign that the input data has some issues or the software cannot deal with it. Have you followed best practices for data acquisition? See https://docs.webodm.org/flying-tips/") elif code == 137 or code == 3221226505: - log.ODM_ERROR("Whoops! You ran out of memory! Add more RAM to your computer, if you're using docker configure it to use more memory, for WSL2 make use of .wslconfig (https://docs.microsoft.com/en-us/windows/wsl/wsl-config#configure-global-options-with-wslconfig), resize your images, lower the quality settings or process the images using a cloud provider (e.g. https://webodm.net).") + log.ERROR("Whoops! You ran out of memory! Add more RAM to your computer, if you're using docker configure it to use more memory, for WSL2 make use of .wslconfig (https://docs.microsoft.com/en-us/windows/wsl/wsl-config#configure-global-options-with-wslconfig), resize your images, lower the quality settings or process the images using a cloud provider (e.g. https://webodm.net).") elif code == 132: - log.ODM_ERROR("Oh no! It looks like your CPU is not supported (is it fairly old?). You can still use ODX, but you will need to build your own docker image. See https://github.com/WebODM/ODX#build-from-source") + log.ERROR("Oh no! It looks like your CPU is not supported (is it fairly old?). You can still use ODX, but you will need to build your own docker image. See https://github.com/WebODM/ODX#build-from-source") elif code == 3: - log.ODM_ERROR("Can't find a program that is required for processing to run! Did you do a custom build of ODX? (cool!) Make sure that all programs required by ODX are in the right place and are built correctly.") + log.ERROR("Can't find a program that is required for processing to run! Did you do a custom build of ODX? (cool!) Make sure that all programs required by ODX are in the right place and are built correctly.") else: - log.ODM_ERROR("The program exited with a strange error code. Please report it") + log.ERROR("The program exited with a strange error code. Please report it") # TODO: more? return code except system.ExitException as e: - log.ODM_ERROR(str(e)) + log.ERROR(str(e)) log.logger.log_json_stage_error(str(e), 1, traceback.format_exc()) sys.exit(1) except Exception as e: diff --git a/stages/odm_dem.py b/stages/odm_dem.py index d7a0f643..00790599 100755 --- a/stages/odm_dem.py +++ b/stages/odm_dem.py @@ -26,7 +26,7 @@ def process(self, args, outputs): pseudo_georeference = False if not reconstruction.is_georeferenced(): - log.ODM_WARNING("Not georeferenced, using ungeoreferenced point cloud...") + log.WARNING("Not georeferenced, using ungeoreferenced point cloud...") ignore_resolution = True pseudo_georeference = True @@ -36,9 +36,9 @@ def process(self, args, outputs): ignore_resolution=ignore_resolution and args.ignore_gsd, has_gcp=reconstruction.has_gcp()) - log.ODM_INFO('Create DSM: ' + str(args.dsm)) - log.ODM_INFO('Create DTM: ' + str(args.dtm)) - log.ODM_INFO('DEM input file {0} found: {1}'.format(dem_input, str(pc_model_found))) + log.INFO('Create DSM: ' + str(args.dsm)) + log.INFO('Create DTM: ' + str(args.dtm)) + log.INFO('DEM input file {0} found: {1}'.format(dem_input, str(pc_model_found))) # define paths and create working directories odm_dem_root = tree.path('odm_dem') @@ -100,6 +100,6 @@ def process(self, args, outputs): progress += 40 self.update_progress(progress) else: - log.ODM_WARNING('Found existing outputs in: %s' % odm_dem_root) + log.WARNING('Found existing outputs in: %s' % odm_dem_root) else: - log.ODM_WARNING('DEM will not be generated') + log.WARNING('DEM will not be generated') diff --git a/stages/odm_filterpoints.py b/stages/odm_filterpoints.py index 6823699f..c453af87 100644 --- a/stages/odm_filterpoints.py +++ b/stages/odm_filterpoints.py @@ -41,13 +41,13 @@ def process(self, args, outputs): if boundary_distance is not None: outputs['boundary'] = compute_boundary_from_shots(tree.opensfm_reconstruction, boundary_distance, reconstruction.get_proj_offset()) if outputs['boundary'] is None: - log.ODM_WARNING("Cannot compute boundary from camera shots") + log.WARNING("Cannot compute boundary from camera shots") else: - log.ODM_WARNING("Cannot compute boundary (GSD cannot be estimated)") + log.WARNING("Cannot compute boundary (GSD cannot be estimated)") else: - log.ODM_WARNING("--auto-boundary set but so is --boundary, will use --boundary") + log.WARNING("--auto-boundary set but so is --boundary, will use --boundary") else: - log.ODM_WARNING("Not a georeferenced reconstruction, will ignore --auto-boundary") + log.WARNING("Not a georeferenced reconstruction, will ignore --auto-boundary") point_cloud.filter(inputPointCloud, tree.filtered_point_cloud, tree.filtered_point_cloud_stats, standard_deviation=args.pc_filter, @@ -63,7 +63,7 @@ def process(self, args, outputs): extra_msg = '. Also, since you used a boundary setting, make sure that the boundary polygon you specified covers the reconstruction area correctly.' raise system.ExitException("Uh oh! We ended up with an empty point cloud. This means that the reconstruction did not succeed. Have you followed best practices for data acquisition? See https://docs.webodm.org/flying-tips/%s" % extra_msg) else: - log.ODM_WARNING('Found a valid point cloud file in: %s' % + log.WARNING('Found a valid point cloud file in: %s' % tree.filtered_point_cloud) if args.optimize_disk_space and inputPointCloud: diff --git a/stages/odm_georeferencing.py b/stages/odm_georeferencing.py index 93660c2c..1995a3d4 100644 --- a/stages/odm_georeferencing.py +++ b/stages/odm_georeferencing.py @@ -83,7 +83,7 @@ def process(self, args, outputs): for feature in src: dst.write(feature) except Exception as e: - log.ODM_WARNING("Cannot generate ground control points GML file: %s" % str(e)) + log.WARNING("Cannot generate ground control points GML file: %s" % str(e)) # Write GeoJSON geojson = { @@ -115,7 +115,7 @@ def process(self, args, outputs): f.write(gcp_geojson_export_file, arcname=os.path.basename(gcp_geojson_export_file)) else: - log.ODM_WARNING("GCPs could not be loaded for writing to %s" % gcp_export_file) + log.WARNING("GCPs could not be loaded for writing to %s" % gcp_export_file) if not io.file_exists(tree.odm_georeferencing_model_laz) or self.rerun(): cmd = f'pdal translate -i "{tree.filtered_point_cloud}" -o \"{tree.odm_georeferencing_model_laz}\"' @@ -125,7 +125,7 @@ def process(self, args, outputs): ] if reconstruction.is_georeferenced(): - log.ODM_INFO("Georeferencing point cloud") + log.INFO("Georeferencing point cloud") stages.append("transformation") utmoffset = reconstruction.georef.utm_offset() @@ -144,12 +144,12 @@ def powerr(r): with open(filtered_point_cloud_stats, 'r') as stats: las_stats = json.load(stats) spacing = powerr(las_stats['spacing']) - log.ODM_INFO("las scale calculated as the minimum of 1/10 estimated spacing or %s, which ever is less." % las_scale) + log.INFO("las scale calculated as the minimum of 1/10 estimated spacing or %s, which ever is less." % las_scale) las_scale = min(spacing, 0.001) except Exception as e: - log.ODM_WARNING("Cannot find file point_cloud_stats.json. Using default las scale: %s" % las_scale) + log.WARNING("Cannot find file point_cloud_stats.json. Using default las scale: %s" % las_scale) else: - log.ODM_INFO("No point_cloud_stats.json found. Using default las scale: %s" % las_scale) + log.INFO("No point_cloud_stats.json found. Using default las scale: %s" % las_scale) params += [ f'--filters.transformation.matrix="1 0 0 {utmoffset[0]} 0 1 0 {utmoffset[1]} 0 0 1 0 0 0 0 1"', @@ -164,19 +164,19 @@ def powerr(r): if reconstruction.has_gcp() and io.file_exists(gcp_geojson_zip_export_file): if os.path.getsize(gcp_geojson_zip_export_file) <= 65535: - log.ODM_INFO("Embedding GCP info in point cloud") + log.INFO("Embedding GCP info in point cloud") params += [ '--writers.las.vlrs="{\\\"filename\\\": \\\"%s\\\", \\\"user_id\\\": \\\"ODX\\\", \\\"record_id\\\": 2, \\\"description\\\": \\\"Ground Control Points (zip)\\\"}"' % gcp_geojson_zip_export_file.replace(os.sep, "/") ] else: - log.ODM_WARNING("Cannot embed GCP info in point cloud, %s is too large" % gcp_geojson_zip_export_file) + log.WARNING("Cannot embed GCP info in point cloud, %s is too large" % gcp_geojson_zip_export_file) system.run(cmd + ' ' + ' '.join(stages) + ' ' + ' '.join(params)) self.update_progress(50) if args.crop > 0: - log.ODM_INFO("Calculating cropping area and generating bounds shapefile from point cloud") + log.INFO("Calculating cropping area and generating bounds shapefile from point cloud") cropper = Cropper(tree.odm_georeferencing, 'odm_georeferenced_model') if args.fast_orthophoto: @@ -193,18 +193,18 @@ def powerr(r): cropper.create_bounds_gpkg(tree.odm_georeferencing_model_laz, args.crop, decimation_step=decimation_step) except: - log.ODM_WARNING("Cannot calculate crop bounds! We will skip cropping") + log.WARNING("Cannot calculate crop bounds! We will skip cropping") args.crop = 0 if 'boundary' in outputs and args.crop == 0: - log.ODM_INFO("Using boundary JSON as cropping area") + log.INFO("Using boundary JSON as cropping area") bounds_base, _ = os.path.splitext(tree.odm_georeferencing_model_laz) bounds_json = bounds_base + ".bounds.geojson" bounds_gpkg = bounds_base + ".bounds.gpkg" export_to_bounds_files(outputs['boundary'], reconstruction.get_proj_srs(), bounds_json, bounds_gpkg) else: - log.ODM_INFO("Converting point cloud (non-georeferenced)") + log.INFO("Converting point cloud (non-georeferenced)") system.run(cmd + ' ' + ' '.join(stages) + ' ' + ' '.join(params)) @@ -223,10 +223,10 @@ def powerr(r): try: a_matrix = compute_alignment_matrix(tree.odm_georeferencing_model_laz, tree.odm_align_file, stats_dir) except Exception as e: - log.ODM_WARNING("Cannot compute alignment matrix: %s" % str(e)) + log.WARNING("Cannot compute alignment matrix: %s" % str(e)) if a_matrix is not None: - log.ODM_INFO("Alignment matrix: %s" % a_matrix) + log.INFO("Alignment matrix: %s" % a_matrix) # Align point cloud if os.path.isfile(unaligned_model): @@ -235,9 +235,9 @@ def powerr(r): try: transform_point_cloud(unaligned_model, a_matrix, tree.odm_georeferencing_model_laz) - log.ODM_INFO("Transformed %s" % tree.odm_georeferencing_model_laz) + log.INFO("Transformed %s" % tree.odm_georeferencing_model_laz) except Exception as e: - log.ODM_WARNING("Cannot transform point cloud: %s" % str(e)) + log.WARNING("Cannot transform point cloud: %s" % str(e)) os.rename(unaligned_model, tree.odm_georeferencing_model_laz) # Align textured models @@ -249,9 +249,9 @@ def transform_textured_model(obj): os.rename(obj, unaligned_obj) try: transform_obj(unaligned_obj, a_matrix, [reconstruction.georef.utm_east_offset, reconstruction.georef.utm_north_offset], obj) - log.ODM_INFO("Transformed %s" % obj) + log.INFO("Transformed %s" % obj) except Exception as e: - log.ODM_WARNING("Cannot transform textured model: %s" % str(e)) + log.WARNING("Cannot transform textured model: %s" % str(e)) os.rename(unaligned_obj, obj) for texturing in [tree.odm_texturing, tree.odm_25dtexturing]: @@ -268,15 +268,15 @@ def transform_textured_model(obj): with open(tree.odm_georeferencing_alignment_matrix, "w") as f: f.write(np_to_json(a_matrix)) else: - log.ODM_WARNING("Alignment to %s will be skipped." % tree.odm_align_file) + log.WARNING("Alignment to %s will be skipped." % tree.odm_align_file) else: - log.ODM_WARNING("Already computed alignment") + log.WARNING("Already computed alignment") elif io.file_exists(tree.odm_georeferencing_alignment_matrix): os.unlink(tree.odm_georeferencing_alignment_matrix) point_cloud.post_point_cloud_steps(args, tree, self.rerun()) else: - log.ODM_WARNING('Found a valid georeferenced model in: %s' + log.WARNING('Found a valid georeferenced model in: %s' % tree.odm_georeferencing_model_laz) if args.optimize_disk_space and io.file_exists(tree.odm_georeferencing_model_laz) and io.file_exists(tree.filtered_point_cloud): diff --git a/stages/odm_meshing.py b/stages/odm_meshing.py index 1b0e5c54..b5856dfe 100644 --- a/stages/odm_meshing.py +++ b/stages/odm_meshing.py @@ -20,7 +20,7 @@ def process(self, args, outputs): # Create full 3D model unless --skip-3dmodel is set if not args.skip_3dmodel: if not io.file_exists(tree.odm_mesh) or self.rerun(): - log.ODM_INFO('Writing mesh file in: %s' % tree.odm_mesh) + log.INFO('Writing mesh file in: %s' % tree.odm_mesh) mesh.screened_poisson_reconstruction(tree.filtered_point_cloud, tree.odm_mesh, @@ -30,7 +30,7 @@ def process(self, args, outputs): pointWeight=self.params.get('point_weight'), threads=max(1, self.params.get('max_concurrency') - 1)), # poissonrecon can get stuck on some machines if --threads == all cores else: - log.ODM_WARNING('Found a valid mesh file in: %s' % + log.WARNING('Found a valid mesh file in: %s' % tree.odm_mesh) self.update_progress(50) @@ -40,13 +40,13 @@ def process(self, args, outputs): if not args.use_3dmesh: if not io.file_exists(tree.odm_25dmesh) or self.rerun(): - log.ODM_INFO('Writing 2.5D mesh file in: %s' % tree.odm_25dmesh) + log.INFO('Writing 2.5D mesh file in: %s' % tree.odm_25dmesh) multiplier = math.pi / 2.0 radius_steps = commands.get_dem_radius_steps(tree.filtered_point_cloud_stats, 3, args.orthophoto_resolution, multiplier=multiplier) dsm_resolution = radius_steps[0] / multiplier - log.ODM_INFO('2.5D DSM resolution: %s' % dsm_resolution) + log.INFO('2.5D DSM resolution: %s' % dsm_resolution) if args.fast_orthophoto: dsm_resolution *= 8.0 @@ -62,6 +62,6 @@ def process(self, args, outputs): smooth_dsm=True, max_tiles=None if reconstruction.has_geotagged_photos() else math.ceil(len(reconstruction.photos) / 2)) else: - log.ODM_WARNING('Found a valid 2.5D mesh file in: %s' % + log.WARNING('Found a valid 2.5D mesh file in: %s' % tree.odm_25dmesh) diff --git a/stages/odm_orthophoto.py b/stages/odm_orthophoto.py index 0b5bfee2..b44c85a1 100644 --- a/stages/odm_orthophoto.py +++ b/stages/odm_orthophoto.py @@ -24,7 +24,7 @@ def process(self, args, outputs): system.mkdir_p(tree.odm_orthophoto) if args.skip_orthophoto: - log.ODM_WARNING("--skip-orthophoto is set, no orthophoto will be generated") + log.WARNING("--skip-orthophoto is set, no orthophoto will be generated") return if not io.file_exists(tree.odm_orthophoto_tif) or self.rerun(): @@ -103,7 +103,7 @@ def process(self, args, outputs): kwargs['ortho'] = tree.odm_orthophoto_tif # Render directly to final file # run odm_orthophoto - log.ODM_INFO('Creating GeoTIFF') + log.INFO('Creating GeoTIFF') system.run('"{odm_ortho_bin}" -inputFiles {models} ' '-logFile "{log}" -outputFile "{ortho}" -resolution {res} -verbose ' '-outputCornerFile "{corners}" {bands} {depth_idx} {inpaint} ' @@ -130,7 +130,7 @@ def process(self, args, outputs): os.path.join(tree.odm_orthophoto, "odm_orthophoto_cut.tif"), blend_distance=20, only_max_coords_feature=True) else: - log.ODM_INFO("Not a submodel run, skipping mask raster generation") + log.INFO("Not a submodel run, skipping mask raster generation") orthophoto.post_orthophoto_steps(args, bounds_file_path, tree.odm_orthophoto_tif, tree.orthophoto_tiles, resolution, reconstruction, tree, not outputs["large"]) @@ -145,12 +145,12 @@ def process(self, args, outputs): else: if io.file_exists(tree.odm_orthophoto_render): pseudogeo.add_pseudo_georeferencing(tree.odm_orthophoto_render) - log.ODM_INFO("Renaming %s --> %s" % (tree.odm_orthophoto_render, tree.odm_orthophoto_tif)) + log.INFO("Renaming %s --> %s" % (tree.odm_orthophoto_render, tree.odm_orthophoto_tif)) os.replace(tree.odm_orthophoto_render, tree.odm_orthophoto_tif) else: - log.ODM_WARNING("Could not generate an orthophoto (it did not render)") + log.WARNING("Could not generate an orthophoto (it did not render)") else: - log.ODM_WARNING('Found a valid orthophoto in: %s' % tree.odm_orthophoto_tif) + log.WARNING('Found a valid orthophoto in: %s' % tree.odm_orthophoto_tif) if io.file_exists(tree.odm_orthophoto_render): os.remove(tree.odm_orthophoto_render) diff --git a/stages/odm_postprocess.py b/stages/odm_postprocess.py index 45b27fab..d417b48f 100644 --- a/stages/odm_postprocess.py +++ b/stages/odm_postprocess.py @@ -12,7 +12,7 @@ def process(self, args, outputs): tree = outputs['tree'] reconstruction = outputs['reconstruction'] - log.ODM_INFO("Post Processing") + log.INFO("Post Processing") if args.gltf: textured_model = os.path.join(tree.odm_texturing, tree.odm_textured_model_obj) @@ -28,11 +28,11 @@ def process(self, args, outputs): odm_textured_model_glb = os.path.join(os.path.dirname(input_obj), tree.odm_textured_model_glb) if not os.path.exists(odm_textured_model_glb) or self.rerun(): - log.ODM_INFO("Generating glTF Binary") + log.INFO("Generating glTF Binary") try: - obj2glb(input_obj, odm_textured_model_glb, rtc=reconstruction.get_proj_offset(), _info=log.ODM_INFO) + obj2glb(input_obj, odm_textured_model_glb, rtc=reconstruction.get_proj_offset(), _info=log.INFO) except Exception as e: - log.ODM_WARNING(str(e)) + log.WARNING(str(e)) if getattr(args, '3d_tiles'): build_3dtiles(args, tree, reconstruction, self.rerun()) @@ -41,5 +41,5 @@ def process(self, args, outputs): try: copy_paths([os.path.join(args.project_path, p) for p in get_processing_results_paths()], args.copy_to, self.rerun()) except Exception as e: - log.ODM_WARNING("Cannot copy to %s: %s" % (args.copy_to, str(e))) + log.WARNING("Cannot copy to %s: %s" % (args.copy_to, str(e))) diff --git a/stages/odm_report.py b/stages/odm_report.py index 83a6fa86..6302275d 100644 --- a/stages/odm_report.py +++ b/stages/odm_report.py @@ -43,7 +43,7 @@ def process(self, args, outputs): if not os.path.exists(tree.odm_report): system.mkdir_p(tree.odm_report) - log.ODM_INFO("Exporting shots.geojson") + log.INFO("Exporting shots.geojson") shots_geojson = os.path.join(tree.odm_report, "shots.geojson") if not io.file_exists(shots_geojson) or self.rerun(): @@ -54,7 +54,7 @@ def process(self, args, outputs): if io.file_exists(tree.odm_georeferencing_alignment_matrix): with open(tree.odm_georeferencing_alignment_matrix, 'r') as f: a_matrix = np_from_json(f.read()) - log.ODM_INFO("Aligning shots to %s" % a_matrix) + log.INFO("Aligning shots to %s" % a_matrix) shots = get_geojson_shots_from_opensfm(tree.opensfm_reconstruction, utm_srs=reconstruction.get_proj_srs(), utm_offset=reconstruction.georef.utm_offset(), a_matrix=a_matrix) else: @@ -65,27 +65,27 @@ def process(self, args, outputs): with open(shots_geojson, "w") as fout: fout.write(json.dumps(shots)) - log.ODM_INFO("Wrote %s" % shots_geojson) + log.INFO("Wrote %s" % shots_geojson) else: - log.ODM_WARNING("Cannot extract shots") + log.WARNING("Cannot extract shots") else: - log.ODM_WARNING('Found a valid shots file in: %s' % shots_geojson) + log.WARNING('Found a valid shots file in: %s' % shots_geojson) camera_mappings = os.path.join(tree.odm_report, "camera_mappings.npz") if not io.file_exists(camera_mappings) or self.rerun(): src_cm = os.path.join(tree.opensfm, "camera_mappings.npz") if io.file_exists(src_cm): shutil.copy(src_cm, camera_mappings) - log.ODM_INFO("Copied %s --> %s" % (src_cm, camera_mappings)) + log.INFO("Copied %s --> %s" % (src_cm, camera_mappings)) else: - log.ODM_WARNING("Cannot copy camera mappings") + log.WARNING("Cannot copy camera mappings") else: - log.ODM_WARNING("Found a valid camera mappings file in: %s" % camera_mappings) + log.WARNING("Found a valid camera mappings file in: %s" % camera_mappings) if args.skip_report: # Stop right here - log.ODM_WARNING("Skipping report generation as requested") + log.WARNING("Skipping report generation as requested") return # Augment OpenSfM stats file with our own stats @@ -119,7 +119,7 @@ def process(self, args, outputs): pc_info_file = os.path.join(tree.odm_filterpoints, "point_cloud.info.json") odm_stats['point_cloud_statistics'] = generate_point_cloud_stats(ply_pc, pc_info_file, self.rerun()) else: - log.ODM_WARNING("No point cloud found") + log.WARNING("No point cloud found") odm_stats['point_cloud_statistics']['dense'] = not args.fast_orthophoto @@ -139,9 +139,9 @@ def process(self, args, outputs): with open(odm_stats_json, 'w') as f: f.write(json.dumps(odm_stats)) else: - log.ODM_WARNING("Cannot generate report, OpenSfM stats are missing") + log.WARNING("Cannot generate report, OpenSfM stats are missing") else: - log.ODM_WARNING("Reading existing stats %s" % odm_stats_json) + log.WARNING("Reading existing stats %s" % odm_stats_json) with open(odm_stats_json, 'r') as f: odm_stats = json.loads(f.read()) @@ -204,7 +204,7 @@ def process(self, args, outputs): resized_dem_file = io.related_file_path(dem_file, postfix=".preview") system.run("gdal_translate -outsize {} 0 \"{}\" \"{}\" --config GDAL_CACHEMAX {}%".format(image_target_size, dem_file, resized_dem_file, get_max_memory())) - log.ODM_INFO("Computing raster stats for %s" % resized_dem_file) + log.INFO("Computing raster stats for %s" % resized_dem_file) dem_stats = get_raster_stats(resized_dem_file) if len(dem_stats) > 0: odm_stats[dem + '_statistics'] = dem_stats[0] @@ -216,8 +216,8 @@ def process(self, args, outputs): if os.path.isfile(f): os.remove(f) else: - log.ODM_WARNING("Cannot generate overlap diagram, cannot compute point cloud bounds") + log.WARNING("Cannot generate overlap diagram, cannot compute point cloud bounds") else: - log.ODM_WARNING("Cannot generate overlap diagram, point cloud stats missing") + log.WARNING("Cannot generate overlap diagram, point cloud stats missing") octx.export_report(os.path.join(tree.odm_report, "report.pdf"), odm_stats, self.rerun()) diff --git a/stages/openmvs.py b/stages/openmvs.py index 55e13a1c..e40a66b6 100644 --- a/stages/openmvs.py +++ b/stages/openmvs.py @@ -36,7 +36,7 @@ def process(self, args, outputs): cmd = 'export_openmvs' octx.run(cmd) else: - log.ODM_WARNING("Found existing %s" % openmvs_scene_file) + log.WARNING("Found existing %s" % openmvs_scene_file) self.update_progress(10) @@ -49,16 +49,16 @@ def process(self, args, outputs): os.mkdir(depthmaps_dir) depthmap_resolution = get_depthmap_resolution(args, photos) - log.ODM_INFO("Depthmap resolution set to: %spx" % depthmap_resolution) + log.INFO("Depthmap resolution set to: %spx" % depthmap_resolution) if outputs["undist_image_max_size"] <= depthmap_resolution: resolution_level = 0 else: resolution_level = int(round(math.log(outputs['undist_image_max_size'] / float(depthmap_resolution)) / math.log(2))) - log.ODM_INFO("Running dense reconstruction. This might take a while.") + log.INFO("Running dense reconstruction. This might take a while.") - log.ODM_INFO("Estimating depthmaps") + log.INFO("Estimating depthmaps") number_views_fuse = 2 densify_ini_file = os.path.join(tree.openmvs, 'Densify.ini') subres_levels = 2 # The number of lower resolutions to process before estimating output resolution depthmap. @@ -109,11 +109,11 @@ def run_densify(): # If the GPU was enabled and the program failed, # try to run it again without GPU if e.errorCode == 1 and use_gpu: - log.ODM_WARNING("OpenMVS failed with GPU, is your graphics card driver up to date? Falling back to CPU.") + log.WARNING("OpenMVS failed with GPU, is your graphics card driver up to date? Falling back to CPU.") gpu_config = ["--cuda-device -2"] run_densify() elif (e.errorCode == 137 or e.errorCode == 143 or e.errorCode == 3221226505) and not pc_tile: - log.ODM_WARNING("OpenMVS ran out of memory, we're going to turn on tiling to see if we can process this.") + log.WARNING("OpenMVS ran out of memory, we're going to turn on tiling to see if we can process this.") pc_tile = True config.append("--fusion-mode 1") run_densify() @@ -125,7 +125,7 @@ def run_densify(): scene_dense = os.path.join(tree.openmvs, 'scene_dense.mvs') if pc_tile: - log.ODM_INFO("Computing sub-scenes") + log.INFO("Computing sub-scenes") subscene_densify_ini_file = os.path.join(tree.openmvs, 'subscene-config.ini') with open(subscene_densify_ini_file, 'w+') as f: @@ -145,7 +145,7 @@ def run_densify(): if len(scene_files) == 0: raise system.ExitException("No OpenMVS scenes found. This could be a bug, or the reconstruction could not be processed.") - log.ODM_INFO("Fusing depthmaps for %s scenes" % len(scene_files)) + log.INFO("Fusing depthmaps for %s scenes" % len(scene_files)) scene_ply_files = [] @@ -177,30 +177,30 @@ def run_densify(): try: system.run('"%s" "%s" %s' % (context.omvs_densify_path, sf, ' '.join(config + gpu_config + extra_config))) except: - log.ODM_WARNING("Sub-scene %s could not be reconstructed, skipping..." % sf) + log.WARNING("Sub-scene %s could not be reconstructed, skipping..." % sf) if not io.file_exists(scene_ply_unfiltered): scene_ply_files.pop() - log.ODM_WARNING("Could not compute PLY for subscene %s" % sf) + log.WARNING("Could not compute PLY for subscene %s" % sf) else: # Filter if args.pc_filter > 0: system.run('"%s" "%s" --filter-point-cloud %s -v 0 --archive-type 3 %s' % (context.omvs_densify_path, scene_dense_mvs, filter_point_th, ' '.join(gpu_config))) else: # Just rename - log.ODM_INFO("Skipped filtering, %s --> %s" % (scene_ply_unfiltered, scene_ply)) + log.INFO("Skipped filtering, %s --> %s" % (scene_ply_unfiltered, scene_ply)) os.rename(scene_ply_unfiltered, scene_ply) else: - log.ODM_WARNING("Found existing dense scene file %s" % scene_ply) + log.WARNING("Found existing dense scene file %s" % scene_ply) # Merge - log.ODM_INFO("Merging %s scene files" % len(scene_ply_files)) + log.INFO("Merging %s scene files" % len(scene_ply_files)) if len(scene_ply_files) == 0: raise system.ExitException("Could not compute dense point cloud (no PLY files available).") if len(scene_ply_files) == 1: # Simply rename os.replace(scene_ply_files[0], tree.openmvs_model) - log.ODM_INFO("%s --> %s"% (scene_ply_files[0], tree.openmvs_model)) + log.INFO("%s --> %s"% (scene_ply_files[0], tree.openmvs_model)) else: # Merge fast_merge_ply(scene_ply_files, tree.openmvs_model) @@ -211,7 +211,7 @@ def skip_filtering(): if not os.path.exists(scene_dense_ply): raise system.ExitException("Dense reconstruction failed. This could be due to poor georeferencing or insufficient image overlap.") - log.ODM_INFO("Skipped filtering, %s --> %s" % (scene_dense_ply, tree.openmvs_model)) + log.INFO("Skipped filtering, %s --> %s" % (scene_dense_ply, tree.openmvs_model)) os.rename(scene_dense_ply, tree.openmvs_model) # Filter all at once @@ -226,7 +226,7 @@ def skip_filtering(): system.run('"%s" %s' % (context.omvs_densify_path, ' '.join(config + gpu_config + extra_config))) except system.SubprocessException as e: if e.errorCode == 137 or e.errorCode == 143 or e.errorCode == 3221226505: - log.ODM_WARNING("OpenMVS filtering ran out of memory, visibility checks will be skipped.") + log.WARNING("OpenMVS filtering ran out of memory, visibility checks will be skipped.") skip_filtering() else: raise e @@ -249,5 +249,5 @@ def skip_filtering(): os.remove(f) shutil.rmtree(depthmaps_dir) else: - log.ODM_WARNING('Found a valid OpenMVS reconstruction file in: %s' % + log.WARNING('Found a valid OpenMVS reconstruction file in: %s' % tree.openmvs_model) diff --git a/stages/run_opensfm.py b/stages/run_opensfm.py index 850c6ce8..7e21adff 100644 --- a/stages/run_opensfm.py +++ b/stages/run_opensfm.py @@ -51,7 +51,7 @@ def cleanup_disk_space(): # If we find a special flag file for split/merge we stop right here if os.path.exists(octx.path("split_merge_stop_at_reconstruction.txt")): - log.ODM_INFO("Stopping OpenSfM early because we found: %s" % octx.path("split_merge_stop_at_reconstruction.txt")) + log.INFO("Stopping OpenSfM early because we found: %s" % octx.path("split_merge_stop_at_reconstruction.txt")) self.next_stage = None cleanup_disk_space() return @@ -75,7 +75,7 @@ def cleanup_disk_space(): shutil.move(tree.opensfm_reconstruction, tree.opensfm_topocentric_reconstruction) shutil.move(tree.opensfm_geocoords_reconstruction, tree.opensfm_reconstruction) else: - log.ODM_WARNING("Will skip exporting %s" % tree.opensfm_geocoords_reconstruction) + log.WARNING("Will skip exporting %s" % tree.opensfm_geocoords_reconstruction) self.update_progress(80) @@ -134,7 +134,7 @@ def align_to_primary_band(shot_id, image): if ainfo is not None: return multispectral.align_image(image, ainfo['warp_matrix'], ainfo['dimension']) else: - log.ODM_WARNING("Cannot align %s, no alignment matrix could be computed. Band alignment quality might be affected." % (shot_id)) + log.WARNING("Cannot align %s, no alignment matrix could be computed. Band alignment quality might be affected." % (shot_id)) return image if reconstruction.multi_camera: @@ -167,10 +167,10 @@ def align_to_primary_band(shot_id, image): if not args.skip_band_alignment: alignment_info = multispectral.compute_alignment_matrices(reconstruction.multi_camera, primary_band_name, tree.dataset_raw, s2p, p2s, max_concurrency=args.max_concurrency) else: - log.ODM_WARNING("Skipping band alignment") + log.WARNING("Skipping band alignment") alignment_info = {} - log.ODM_INFO("Adding shots to reconstruction") + log.INFO("Adding shots to reconstruction") octx.backup_reconstruction() octx.add_shots_to_reconstruction(p2s) @@ -192,11 +192,11 @@ def align_to_primary_band(shot_id, image): if not io.file_exists(tree.opensfm_reconstruction_nvm) or self.rerun(): octx.run('export_visualsfm --points') else: - log.ODM_WARNING('Found a valid OpenSfM NVM reconstruction file in: %s' % + log.WARNING('Found a valid OpenSfM NVM reconstruction file in: %s' % tree.opensfm_reconstruction_nvm) if reconstruction.multi_camera: - log.ODM_INFO("Multiple bands found") + log.INFO("Multiple bands found") # Write NVM files for the various bands for band in reconstruction.multi_camera: @@ -221,11 +221,11 @@ def align_to_primary_band(shot_id, image): if band_filename is not None: img_map[add_image_format_extension(fname, 'tif')] = add_image_format_extension(band_filename, 'tif') else: - log.ODM_WARNING("Cannot find %s band equivalent for %s" % (band, fname)) + log.WARNING("Cannot find %s band equivalent for %s" % (band, fname)) nvm.replace_nvm_images(tree.opensfm_reconstruction_nvm, img_map, nvm_file) else: - log.ODM_WARNING("Found existing NVM file %s" % nvm_file) + log.WARNING("Found existing NVM file %s" % nvm_file) # Skip dense reconstruction if necessary and export # sparse reconstruction instead @@ -235,7 +235,7 @@ def align_to_primary_band(shot_id, image): if not io.file_exists(output_file) or self.rerun(): octx.run('export_ply --no-cameras --point-num-views') else: - log.ODM_WARNING("Found a valid PLY reconstruction in %s" % output_file) + log.WARNING("Found a valid PLY reconstruction in %s" % output_file) cleanup_disk_space() diff --git a/stages/splitmerge.py b/stages/splitmerge.py index d9d2e7d4..52ae98e4 100644 --- a/stages/splitmerge.py +++ b/stages/splitmerge.py @@ -36,7 +36,7 @@ def process(self, args, outputs): if reconstruction.has_geotagged_photos(): outputs['large'] = True else: - log.ODM_WARNING('Could not perform split-merge as GPS information in photos or image_groups.txt is missing.') + log.WARNING('Could not perform split-merge as GPS information in photos or image_groups.txt is missing.') if outputs['large']: # If we have a cluster address, we'll use a distributed workflow @@ -49,9 +49,9 @@ def process(self, args, outputs): orig_max_concurrency = args.max_concurrency if not local_workflow: args.max_concurrency = max(1, args.max_concurrency - 1) - log.ODM_INFO("Setting max-concurrency to %s to better handle remote splits" % args.max_concurrency) + log.INFO("Setting max-concurrency to %s to better handle remote splits" % args.max_concurrency) - log.ODM_INFO("Large dataset detected (%s photos) and split set at %s. Preparing split merge." % (len(photos), args.split)) + log.INFO("Large dataset detected (%s photos) and split set at %s. Preparing split merge." % (len(photos), args.split)) multiplier = (1.0 / len(reconstruction.multi_camera)) if reconstruction.multi_camera else 1.0 config = [ @@ -75,12 +75,12 @@ def process(self, args, outputs): # Create submodels if not io.dir_exists(tree.submodels_path) or self.rerun(): if io.dir_exists(tree.submodels_path): - log.ODM_WARNING("Removing existing submodels directory: %s" % tree.submodels_path) + log.WARNING("Removing existing submodels directory: %s" % tree.submodels_path) shutil.rmtree(tree.submodels_path) octx.run("create_submodels") else: - log.ODM_WARNING("Submodels directory already exist at: %s" % tree.submodels_path) + log.WARNING("Submodels directory already exist at: %s" % tree.submodels_path) # Find paths of all submodels mds = metadataset.MetaDataSet(tree.opensfm) @@ -96,16 +96,16 @@ def process(self, args, outputs): submodel_gcp_file = os.path.abspath(sp_octx.path("..", "gcp_list.txt")) if reconstruction.gcp.make_filtered_copy(submodel_gcp_file, submodel_images_dir): - log.ODM_INFO("Copied filtered GCP file to %s" % submodel_gcp_file) + log.INFO("Copied filtered GCP file to %s" % submodel_gcp_file) io.copy(submodel_gcp_file, os.path.abspath(sp_octx.path("gcp_list.txt"))) else: - log.ODM_INFO("No GCP will be copied for %s, not enough images in the submodel are referenced by the GCP" % sp_octx.name()) + log.INFO("No GCP will be copied for %s, not enough images in the submodel are referenced by the GCP" % sp_octx.name()) # Copy GEO file if needed (one for each submodel project directory) if tree.odm_geo_file is not None and os.path.isfile(tree.odm_geo_file): geo_dst_path = os.path.abspath(sp_octx.path("..", "geo.txt")) io.copy(tree.odm_geo_file, geo_dst_path) - log.ODM_INFO("Copied GEO file to %s" % geo_dst_path) + log.INFO("Copied GEO file to %s" % geo_dst_path) # If this is a multispectral dataset, # we need to link the multispectral images @@ -121,12 +121,12 @@ def process(self, args, outputs): system.link_file(os.path.join(tree.dataset_raw, p.filename), submodel_images_dir) # Reconstruct each submodel - log.ODM_INFO("Dataset has been split into %s submodels. Reconstructing each submodel..." % len(submodel_paths)) + log.INFO("Dataset has been split into %s submodels. Reconstructing each submodel..." % len(submodel_paths)) self.update_progress(25) if local_workflow: for sp in submodel_paths: - log.ODM_INFO("Reconstructing %s" % sp) + log.INFO("Reconstructing %s" % sp) local_sp_octx = OSFMContext(sp) local_sp_octx.create_tracks(self.rerun()) local_sp_octx.reconstruct(args.rolling_shutter, not args.sfm_no_partial, self.rerun()) @@ -155,11 +155,11 @@ def process(self, args, outputs): main_recon = sp_octx.path('reconstruction.json') if io.file_exists(main_recon) and io.file_exists(unaligned_recon) and not self.rerun(): - log.ODM_INFO("Submodel %s has already been aligned." % sp_octx.name()) + log.INFO("Submodel %s has already been aligned." % sp_octx.name()) continue if not io.file_exists(aligned_recon): - log.ODM_WARNING("Submodel %s does not have an aligned reconstruction (%s). " + log.WARNING("Submodel %s does not have an aligned reconstruction (%s). " "This could mean that the submodel could not be reconstructed " " (are there enough features to reconstruct it?). Skipping." % (sp_octx.name(), aligned_recon)) remove_paths.append(sp) @@ -169,7 +169,7 @@ def process(self, args, outputs): shutil.move(main_recon, unaligned_recon) shutil.move(aligned_recon, main_recon) - log.ODM_INFO("%s is now %s" % (aligned_recon, main_recon)) + log.INFO("%s is now %s" % (aligned_recon, main_recon)) # Remove invalid submodels submodel_paths = [p for p in submodel_paths if not p in remove_paths] @@ -179,9 +179,9 @@ def process(self, args, outputs): for sp in submodel_paths: sp_octx = OSFMContext(sp) - log.ODM_INFO("========================") - log.ODM_INFO("Processing %s" % sp_octx.name()) - log.ODM_INFO("========================") + log.INFO("========================") + log.INFO("Processing %s" % sp_octx.name()) + log.INFO("========================") argv = get_submodel_argv(args, tree.submodels_path, sp_octx.name()) @@ -196,9 +196,9 @@ def process(self, args, outputs): octx.touch(split_done_file) else: - log.ODM_WARNING('Found a split done file in: %s' % split_done_file) + log.WARNING('Found a split done file in: %s' % split_done_file) else: - log.ODM_INFO("Normal dataset, will process all at once.") + log.INFO("Normal dataset, will process all at once.") self.progress = 0.0 @@ -221,9 +221,9 @@ def process(self, args, outputs): point_cloud.merge(all_point_clouds, tree.odm_georeferencing_model_laz, rerun=self.rerun()) point_cloud.post_point_cloud_steps(args, tree, self.rerun()) except Exception as e: - log.ODM_WARNING("Could not merge point cloud: %s (skipping)" % str(e)) + log.WARNING("Could not merge point cloud: %s (skipping)" % str(e)) else: - log.ODM_WARNING("Found merged point cloud in %s" % tree.odm_georeferencing_model_laz) + log.WARNING("Found merged point cloud in %s" % tree.odm_georeferencing_model_laz) self.update_progress(25) @@ -232,14 +232,14 @@ def process(self, args, outputs): merged_bounds_file = os.path.join(tree.odm_georeferencing, 'odm_georeferenced_model.bounds.gpkg') if not io.file_exists(merged_bounds_file) or self.rerun(): all_bounds = get_submodel_paths(tree.submodels_path, 'odm_georeferencing', 'odm_georeferenced_model.bounds.gpkg') - log.ODM_INFO("Merging all crop bounds: %s" % all_bounds) + log.INFO("Merging all crop bounds: %s" % all_bounds) if len(all_bounds) > 0: # Calculate a new crop area # based on the convex hull of all crop areas of all submodels # (without a buffer, otherwise we are double-cropping) Cropper.merge_bounds(all_bounds, merged_bounds_file, 0) else: - log.ODM_WARNING("No bounds found for any submodel.") + log.WARNING("No bounds found for any submodel.") # Merge orthophotos if args.merge in ['all', 'orthophoto']: @@ -253,7 +253,7 @@ def process(self, args, outputs): ) if len(all_orthos_and_ortho_cuts) > 1: - log.ODM_INFO("Found %s submodels with valid orthophotos and cutlines" % len(all_orthos_and_ortho_cuts)) + log.INFO("Found %s submodels with valid orthophotos and cutlines" % len(all_orthos_and_ortho_cuts)) # TODO: histogram matching via rasterio # currently parts have different color tones @@ -267,12 +267,12 @@ def process(self, args, outputs): reconstruction, tree, False) elif len(all_orthos_and_ortho_cuts) == 1: # Simply copy - log.ODM_WARNING("A single orthophoto/cutline pair was found between all submodels.") + log.WARNING("A single orthophoto/cutline pair was found between all submodels.") shutil.copyfile(all_orthos_and_ortho_cuts[0][0], tree.odm_orthophoto_tif) else: - log.ODM_WARNING("No orthophoto/cutline pairs were found in any of the submodels. No orthophoto will be generated.") + log.WARNING("No orthophoto/cutline pairs were found in any of the submodels. No orthophoto will be generated.") else: - log.ODM_WARNING("Found merged orthophoto in %s" % tree.odm_orthophoto_tif) + log.WARNING("Found merged orthophoto in %s" % tree.odm_orthophoto_tif) self.update_progress(75) @@ -284,7 +284,7 @@ def merge_dems(dem_filename, human_name): dem_file = tree.path("odm_dem", dem_filename) if not io.file_exists(dem_file) or self.rerun(): all_dems = get_submodel_paths(tree.submodels_path, "odm_dem", dem_filename) - log.ODM_INFO("Merging %ss" % human_name) + log.INFO("Merging %ss" % human_name) # Merge dem_vars = utils.get_dem_vars(args) @@ -301,7 +301,7 @@ def merge_dems(dem_filename, human_name): # Crop if args.crop > 0 or args.boundary: Cropper.crop(merged_bounds_file, dem_file, dem_vars, keep_original=not args.optimize_disk_space) - log.ODM_INFO("Created %s" % dem_file) + log.INFO("Created %s" % dem_file) if args.tiles: generate_dem_tiles(dem_file, tree.path("%s_tiles" % human_name.lower()), args.max_concurrency, args.dem_resolution) @@ -311,10 +311,10 @@ def merge_dems(dem_filename, human_name): if args.cog: convert_to_cogeo(dem_file, max_workers=args.max_concurrency) else: - log.ODM_WARNING("Cannot merge %s, %s was not created" % (human_name, dem_file)) + log.WARNING("Cannot merge %s, %s was not created" % (human_name, dem_file)) else: - log.ODM_WARNING("Found merged %s in %s" % (human_name, dem_filename)) + log.WARNING("Found merged %s in %s" % (human_name, dem_filename)) if args.merge in ['all', 'dem'] and args.dsm: merge_dems("dsm.tif", "DSM") @@ -331,25 +331,25 @@ def merge_dems(dem_filename, human_name): geojson_shots = tree.path(tree.odm_report, "shots.geojson") if not io.file_exists(geojson_shots) or self.rerun(): geojson_shots_files = get_submodel_paths(tree.submodels_path, "odm_report", "shots.geojson") - log.ODM_INFO("Merging %s shots.geojson files" % len(geojson_shots_files)) + log.INFO("Merging %s shots.geojson files" % len(geojson_shots_files)) merge_geojson_shots(geojson_shots_files, geojson_shots) else: - log.ODM_WARNING("Found merged shots.geojson in %s" % tree.odm_report) + log.WARNING("Found merged shots.geojson in %s" % tree.odm_report) # Merge cameras cameras_json = tree.path("cameras.json") if not io.file_exists(cameras_json) or self.rerun(): cameras_json_files = get_submodel_paths(tree.submodels_path, "cameras.json") - log.ODM_INFO("Merging %s cameras.json files" % len(cameras_json_files)) + log.INFO("Merging %s cameras.json files" % len(cameras_json_files)) merge_cameras(cameras_json_files, cameras_json) else: - log.ODM_WARNING("Found merged cameras.json in %s" % tree.root_path) + log.WARNING("Found merged cameras.json in %s" % tree.root_path) # Stop the pipeline short by skipping to the postprocess stage. # Afterwards, we're done. self.next_stage = self.last_stage() else: - log.ODM_INFO("Normal dataset, nothing to merge.") + log.INFO("Normal dataset, nothing to merge.") self.progress = 0.0