From c835ade2e4a62693f287ed1790946591b56b2f58 Mon Sep 17 00:00:00 2001 From: Michel Daab Date: Thu, 24 Oct 2024 14:11:02 +0200 Subject: [PATCH 1/9] first changes to make the docker works with lidar_selecter and on GPAO --- README.md | 15 ++++++++++++++- configs/configs_patchwork.yaml | 21 ++++++++++++++++----- exemples/lidar_selecter_example.sh | 15 +++++++++++---- lidar_selecter.py | 9 ++++----- test/test_lidar_selecter.py | 8 ++++++-- version.py | 2 +- 6 files changed, 52 insertions(+), 18 deletions(-) diff --git a/README.md b/README.md index 92af5f0..22dadd4 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,7 @@ conda activate patchwork ``` ## utilisation -Le script peut être lancé via : +Le script d'ajout de points peut être lancé via : ``` python main.py filepath.DONOR_FILE=[chemin fichier donneur] filepath.RECIPIENT_FILE=[chemin fichier receveur] filepath.OUTPUT_FILE=[chemin fichier de sortie] [autres options] ``` @@ -41,3 +41,16 @@ DONOR_CLASS_LIST : Défaut [2, 9]. La liste des classes des points du fichier do RECIPIENT_CLASS_LIST : Défaut [2, 3, 9, 17]. La liste des classes des points du fichier receveur qui, s'ils sont absents dans une cellule, justifirons de prendre les points du fichier donneur de la même cellule TILE_SIZE : Défaut 1000. Taille du côté de l'emprise carrée représentée par les fichiers lidar d'entrée PATCH_SIZE : Défaut 1. taille en mètre du côté d'une cellule (doit être un diviseur de TILE_SIZE, soit pour 1000 : 0.25, 0.5, 2, 4, 5, 10, 25...) + +Le script de sélection/découpe de fichier lidar peut être lancé via : +``` +python lidar_filepath.py filepath.DONOR_DIRECTORY=[répertoire_fichiers_donneurs] filepath.RECIPIENT_DIRECTORY=[répertoire_fichiers_receveurs] filepath.SHP_NAME=[nom_shapefile] filepath.SHP_DIRECTORY=[répertoire_shapefile] filepath.CSV_NAME=[nom_fichier_csv] filepath.CSV_DIRECTORY=[répertoire_fichier_csv] filepath.OUTPUT_DIRECTORY=[chemin_de_sortie] +``` + +filepath.DONOR_DIRECTORY: The directory containing all the lidar files that could provide points +filepath.RECIPIENT_DIRECTORY: The directory containing all the lidar files that could receive points +filepath.SHP_NAME: Le nom du shapefile contenant l'emprise du chantier qui délimite les fichiers lidar qui nous intéressent +filepath.SHP_DIRECTORY: Le répertoire du fichier shapefile +filepath.CSV_NAME: Le nom du fichier csv qui lie les différents fichiers donneurs et receveurs +filepath.CSV_DIRECTORY: Le répertoire du fichier csv +filepath.OUTPUT_DIRECTORY: le répertoire recevant les fichiers lidar découpés \ No newline at end of file diff --git a/configs/configs_patchwork.yaml b/configs/configs_patchwork.yaml index 93d2443..d7340ac 100644 --- a/configs/configs_patchwork.yaml +++ b/configs/configs_patchwork.yaml @@ -19,16 +19,21 @@ defaults: - _self_ filepath: - SHAPEFILE_PATH: null # shapefile for lidar selecter, to determine the lidar file to select + SHP_NAME: null # name of the shapefile for lidar selecter, to determine the lidar file to select + SHP_DIRECTORY: null # path to the directory containing the shapefile DONOR_DIRECTORY: null # directory containing all potential donor lidar files, for lidar selecter RECIPIENT_DIRECTORY: null # directory containing all potential donor lidar files, for lidar selecter - OUTPUT_DIRECTORY_PATH: null # directory containing all potential donor lidar files, for lidar selecter - RECIPIENT_FILE: null # path to the file that receives points. If done after lidar selecter, is in a subdirectory of OUTPUT_DIRECTORY_PATH - DONOR_FILE: null # path to the file that gives points. If done after lidar selecter, is in a subdirectory of OUTPUT_DIRECTORY_PATH + OUTPUT_DIRECTORY: null # directory containing all potential donor lidar files, for lidar selecter + RECIPIENT_FILE: null # path to the file that receives points. If done after lidar selecter, is in a subdirectory of OUTPUT_DIRECTORY + DONOR_FILE: null # path to the file that gives points. If done after lidar selecter, is in a subdirectory of OUTPUT_DIRECTORY OUTPUT_FILE: null # path to the (resulting) file with added points. INPUT_INDICES_MAP: null # path for the indices map reflecting the changes to the recipient OUTPUT_INDICES_MAP: null - CSV_PATH: null # path to the csv file that log the lidar files to process with patchwork + # INPUT_DIRECTORY: null # directory for input (shapefile) + CSV_NAME: null # name of the csv file that log the lidar files to process with patchwork + CSV_DIRECTORY: null # path to the directory that will contain the csv + + CRS: 2154 @@ -43,3 +48,9 @@ NEW_COLUMN_SIZE: 8 # must be 8, 16, 32 or 64 VALUE_ADDED_POINTS: 1 # in case of a new column, value of the new point (the other are set to 0) VIRTUAL_CLASS_TRANSLATION: {2: 69, 22: 70} # if there is no new column, translate the class of DONOR_CLASS_LIST into those values # each value of DONOR_CLASS_LIST must be a key in VIRTUAL_CLASS_TRANSLATION. Not used if NEW_COLUMN is not None (or "") + +gpao: + LOCAL_STORE: "" + WIN_STORE: "//store.ign.fr/store-lidarhd" + UNIX_STORE: "/var/data/store-lidarhd" + URL_GPAO: lhd-dev-gpao \ No newline at end of file diff --git a/exemples/lidar_selecter_example.sh b/exemples/lidar_selecter_example.sh index 253b3c8..1f0892b 100644 --- a/exemples/lidar_selecter_example.sh +++ b/exemples/lidar_selecter_example.sh @@ -1,11 +1,18 @@ # for selecting, cutting and dispatching lidar files for patchwork python lidar_filepath.py \ -filepath.SHAPEFILE_PATH=[path_to_shapfile] \ filepath.DONOR_DIRECTORY=[path_to_directory_with_donor_files] \ filepath.RECIPIENT_DIRECTORY=[path_to_directory_with_recipient_files] \ -filepath.OUTPUT_DIRECTORY_PATH=[output_directory_path] +filepath.SHP_NAME=[shapefile_name] \ +filepath.SHP_DIRECTORY=[path_to_shapefile_file] \ +filepath.CSV_NAME=[csv_file_name] \ +filepath.CSV_DIRECTORY=[path_to_csv_file] \ +filepath.OUTPUT_DIRECTORY=[output_directory_path] -# filepath.SHAPEFILE_PATH: the shapefile that contains the geometry we want to work on # filepath.DONOR_DIRECTORY: The directory containing all the lidar files that could provide points # filepath.RECIPIENT_DIRECTORY: The directory containing all the lidar files that could receive points -# filepath.OUTPUT_DIRECTORY_PATH: the directory to put all the selected/cut lidar files \ No newline at end of file +# filepath.SHP_NAME: the name of the shapefile defining the area used to select the lidar files +# filepath.SHP_DIRECTORY: the directory of the shapefile +# filepath.CSV_NAME: the name of the csv file tin which we link donor and recipient files +# filepath.CSV_DIRECTORY: the directory of the csv file +# filepath.OUTPUT_DIRECTORY: the directory to put all the cut lidar files + diff --git a/lidar_selecter.py b/lidar_selecter.py index 2077488..925b634 100644 --- a/lidar_selecter.py +++ b/lidar_selecter.py @@ -28,7 +28,7 @@ def patchwork_dispatcher(config: DictConfig): # preparing donor files: select_lidar(config, config.filepath.DONOR_DIRECTORY, - config.filepath.OUTPUT_DIRECTORY_PATH, + config.filepath.OUTPUT_DIRECTORY, c.DONOR_SUBDIRECTORY_NAME, df_result, c.DONOR_FILE_KEY, @@ -37,14 +37,13 @@ def patchwork_dispatcher(config: DictConfig): # preparing recipient files: select_lidar(config, config.filepath.RECIPIENT_DIRECTORY, - config.filepath.OUTPUT_DIRECTORY_PATH, + config.filepath.OUTPUT_DIRECTORY, c.RECIPIENT_SUBDIRECTORY_NAME, df_result, c.RECIPIENT_FILE_KEY, False, ) - - df_result.to_csv(config.filepath.CSV_PATH, index=False) + df_result.to_csv(os.path.join(config.filepath.CSV_DIRECTORY, config.filepath.CSV_NAME), index=False) def cut_lidar(las_points: ScaleAwarePointRecord, shapefile_geometry: MultiPolygon) -> ScaleAwarePointRecord: @@ -79,7 +78,7 @@ def select_lidar(config: DictConfig, Finally, df_result is updated with the path for each file """ - worksite = gpd.GeoDataFrame.from_file(config.filepath.SHAPEFILE_PATH) + worksite = gpd.GeoDataFrame.from_file(os.path.join(config.filepath.SHP_DIRECTORY, config.filepath.SHP_NAME)) shapefile_geometry = worksite.dissolve().geometry.item() time_old = timeit.default_timer() diff --git a/test/test_lidar_selecter.py b/test/test_lidar_selecter.py index 5478153..7538e88 100644 --- a/test/test_lidar_selecter.py +++ b/test/test_lidar_selecter.py @@ -46,7 +46,10 @@ def test_cut_lidar(): def test_select_lidar(tmp_path_factory): # shapefile creation - shapefile_path = tmp_path_factory.mktemp("shapefile") / "shapefile.shp" + shp_dir = tmp_path_factory.mktemp("shapefile") + shp_name = "shapefile.shp" + shapefile_path = shp_dir / shp_name + shapefile_geometry = MultiPolygon([([SHAPE_CORNER_1, SHAPE_CORNER_2, SHAPE_CORNER_3],),]) gpd_shapefile_geometry = gpd.GeoDataFrame({'geometry': [shapefile_geometry]}, crs=CRS) gpd_shapefile_geometry.to_file(shapefile_path) @@ -80,7 +83,8 @@ def test_select_lidar(tmp_path_factory): config = compose( config_name="configs_patchwork.yaml", overrides=[ - f"filepath.SHAPEFILE_PATH={shapefile_path}", + f"filepath.SHP_DIRECTORY={shp_dir}", + f"filepath.SHP_NAME={shp_name}", f"TILE_SIZE={TILE_SIZE}" ] ) diff --git a/version.py b/version.py index 0afd4be..b544b70 100644 --- a/version.py +++ b/version.py @@ -1,4 +1,4 @@ -__version__ = "1.0.0" +__version__ = "1.1.0" if __name__ == "__main__": print(__version__) From 218902660ae24440bd6fa3f0773500e0e7ffc1af Mon Sep 17 00:00:00 2001 From: Michel Daab Date: Thu, 24 Oct 2024 14:36:56 +0200 Subject: [PATCH 2/9] fix mix french/english, add gpao_builder.py file --- README.md | 4 +-- gpao_builder.py | 88 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 90 insertions(+), 2 deletions(-) create mode 100644 gpao_builder.py diff --git a/README.md b/README.md index 22dadd4..75b33f1 100644 --- a/README.md +++ b/README.md @@ -47,8 +47,8 @@ Le script de sélection/découpe de fichier lidar peut être lancé via : python lidar_filepath.py filepath.DONOR_DIRECTORY=[répertoire_fichiers_donneurs] filepath.RECIPIENT_DIRECTORY=[répertoire_fichiers_receveurs] filepath.SHP_NAME=[nom_shapefile] filepath.SHP_DIRECTORY=[répertoire_shapefile] filepath.CSV_NAME=[nom_fichier_csv] filepath.CSV_DIRECTORY=[répertoire_fichier_csv] filepath.OUTPUT_DIRECTORY=[chemin_de_sortie] ``` -filepath.DONOR_DIRECTORY: The directory containing all the lidar files that could provide points -filepath.RECIPIENT_DIRECTORY: The directory containing all the lidar files that could receive points +filepath.DONOR_DIRECTORY: Le répertoire contenant les fichiers lidar donneurs +filepath.RECIPIENT_DIRECTORY: Le répertoire contenant les fichiers lidar receveurs filepath.SHP_NAME: Le nom du shapefile contenant l'emprise du chantier qui délimite les fichiers lidar qui nous intéressent filepath.SHP_DIRECTORY: Le répertoire du fichier shapefile filepath.CSV_NAME: Le nom du fichier csv qui lie les différents fichiers donneurs et receveurs diff --git a/gpao_builder.py b/gpao_builder.py new file mode 100644 index 0000000..7e955e2 --- /dev/null +++ b/gpao_builder.py @@ -0,0 +1,88 @@ +import json +import os +import requests + +import hydra +from omegaconf import DictConfig +from gpao.builder import Builder, Project +from gpao_utils.store import Store +from gpao.job import Job + +from patchwork import patchwork +from version import __version__ + +docker_run = "docker run --userns=host --rm " + +TAGS = ["docker"] +LOCAL = "/home/MDaab/code/patchwork" + + +def build_url_api(hostname: str): + return f"http://{hostname}:8080/api/" + + +def send_project(url_api: str, filename: str): + """send a gpao project""" + headers = { + "Content-type": "application/json", + } + with open(filename, "rb") as data: + response = requests.put(url_api + "project", headers=headers, data=data) + return response + + +@hydra.main(config_path="configs/", config_name="configs_patchwork.yaml", version_base="1.2") +def run_gpao(config: DictConfig): + # patchwork(config) + _project_json = LOCAL + url_gpao = config.gpao.URL_GPAO + job_lidar_selecter = [lidar_selecter_job(config)] + projet_list = [Project("lidar selecter", job_lidar_selecter)] + + builder = Builder(projet_list) + builder.save_as_json(_project_json) + + url_api = url_gpao + if not url_api.lower().startswith("http"): + url_api = build_url_api(url_gpao) + response = send_project(url_api, _project_json) + + if response.status_code != 200: + print("erreur de requête : ", response.status_code) + assert response.status_code == 200 + print("Projet GPAO mis en base (" + url_api + ")") + + +def lidar_selecter_job(config: DictConfig): + store_lidarhd = Store(config.gpao.LOCAL_STORE, config.gpao.WIN_STORE, config.gpao.UNIX_STORE) + job_name = "Sélection/découpe des fichiers lidar" + donor_dir = config.filepath.DONOR_DIRECTORY + recipient_dir = config.filepath.RECIPIENT_DIRECTORY + + shp_name = config.filepath.SHP_NAME + shp_dir = config.filepath.SHP_DIRECTORY + csv_name = config.filepath.CSV_NAME + csv_dir = config.filepath.CSV_DIRECTORY + + version_patchwork = __version__ + + command = f"{docker_run} " + \ + f"-v {store_lidarhd.to_unix(donor_dir)}:/donor_dir " + \ + f"-v {store_lidarhd.to_unix(recipient_dir)}:/recipient_dir " + \ + f"-v {store_lidarhd.to_unix(shp_dir)}:/shp_dir " + \ + f"-v {store_lidarhd.to_unix(csv_dir)}:/csv_dir " + \ + f"patchwork:v{version_patchwork} " + \ + "python lidar_selecter.py " + \ + "filepath.DONOR_DIRECTORY=/donor_dir " + \ + "filepath.RECIPIENT_DIRECTORY=/recipient_dir " + \ + "filepath.SHP_DIRECTORY=/shp_dir " + \ + f"filepath.SHP_NAME={shp_name} " + \ + "filepath.CSV_DIRECTORY=/csv_dir " + \ + f"filepath.CSV_NAME={csv_name} " + \ + "filepath.OUTPUT_DIRECTORY=/output_dir " + + Job(job_name, command, tags=TAGS) + + +if __name__ == "__main__": + run_gpao() \ No newline at end of file From fffeea45d40f1fb28004fadfb4962a300b9c9907 Mon Sep 17 00:00:00 2001 From: Michel Daab Date: Thu, 24 Oct 2024 15:53:04 +0200 Subject: [PATCH 3/9] remove gpao links --- configs/configs_patchwork.yaml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/configs/configs_patchwork.yaml b/configs/configs_patchwork.yaml index d7340ac..efa7e66 100644 --- a/configs/configs_patchwork.yaml +++ b/configs/configs_patchwork.yaml @@ -48,9 +48,3 @@ NEW_COLUMN_SIZE: 8 # must be 8, 16, 32 or 64 VALUE_ADDED_POINTS: 1 # in case of a new column, value of the new point (the other are set to 0) VIRTUAL_CLASS_TRANSLATION: {2: 69, 22: 70} # if there is no new column, translate the class of DONOR_CLASS_LIST into those values # each value of DONOR_CLASS_LIST must be a key in VIRTUAL_CLASS_TRANSLATION. Not used if NEW_COLUMN is not None (or "") - -gpao: - LOCAL_STORE: "" - WIN_STORE: "//store.ign.fr/store-lidarhd" - UNIX_STORE: "/var/data/store-lidarhd" - URL_GPAO: lhd-dev-gpao \ No newline at end of file From 3629daee47e6f02ca0fab49c055afccbb9ba88fa Mon Sep 17 00:00:00 2001 From: Michel Daab Date: Fri, 25 Oct 2024 10:56:51 +0200 Subject: [PATCH 4/9] Patchwork now check if there is a csv file. if ther is, get the donor file matching the recipient file from that csv file --- CHANGELOG.md | 4 ++ gpao_builder.py | 88 ------------------------------------------ patchwork.py | 23 +++++++++++ test/test_patchwork.py | 18 ++++++++- 4 files changed, 44 insertions(+), 89 deletions(-) delete mode 100644 gpao_builder.py diff --git a/CHANGELOG.md b/CHANGELOG.md index a4df0b7..23eb5cf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ # CHANGELOG +## 1.1.0 +- modification de chemin pour pouvoir passer dans la gpao +- patchwork vérifie maintenant s'il y a un ficheir csv en entrée. Si c'est le cas, le fichier donneur utilisé est celui qui correspond au fichier receveur dans le ficheir csv. S'il n'y a pas de fichier donneur correspondant, patchwork termine sans rien faire + ## 1.0.0 version initiale : - découpe et sélection des fichiers lidar diff --git a/gpao_builder.py b/gpao_builder.py deleted file mode 100644 index 7e955e2..0000000 --- a/gpao_builder.py +++ /dev/null @@ -1,88 +0,0 @@ -import json -import os -import requests - -import hydra -from omegaconf import DictConfig -from gpao.builder import Builder, Project -from gpao_utils.store import Store -from gpao.job import Job - -from patchwork import patchwork -from version import __version__ - -docker_run = "docker run --userns=host --rm " - -TAGS = ["docker"] -LOCAL = "/home/MDaab/code/patchwork" - - -def build_url_api(hostname: str): - return f"http://{hostname}:8080/api/" - - -def send_project(url_api: str, filename: str): - """send a gpao project""" - headers = { - "Content-type": "application/json", - } - with open(filename, "rb") as data: - response = requests.put(url_api + "project", headers=headers, data=data) - return response - - -@hydra.main(config_path="configs/", config_name="configs_patchwork.yaml", version_base="1.2") -def run_gpao(config: DictConfig): - # patchwork(config) - _project_json = LOCAL - url_gpao = config.gpao.URL_GPAO - job_lidar_selecter = [lidar_selecter_job(config)] - projet_list = [Project("lidar selecter", job_lidar_selecter)] - - builder = Builder(projet_list) - builder.save_as_json(_project_json) - - url_api = url_gpao - if not url_api.lower().startswith("http"): - url_api = build_url_api(url_gpao) - response = send_project(url_api, _project_json) - - if response.status_code != 200: - print("erreur de requête : ", response.status_code) - assert response.status_code == 200 - print("Projet GPAO mis en base (" + url_api + ")") - - -def lidar_selecter_job(config: DictConfig): - store_lidarhd = Store(config.gpao.LOCAL_STORE, config.gpao.WIN_STORE, config.gpao.UNIX_STORE) - job_name = "Sélection/découpe des fichiers lidar" - donor_dir = config.filepath.DONOR_DIRECTORY - recipient_dir = config.filepath.RECIPIENT_DIRECTORY - - shp_name = config.filepath.SHP_NAME - shp_dir = config.filepath.SHP_DIRECTORY - csv_name = config.filepath.CSV_NAME - csv_dir = config.filepath.CSV_DIRECTORY - - version_patchwork = __version__ - - command = f"{docker_run} " + \ - f"-v {store_lidarhd.to_unix(donor_dir)}:/donor_dir " + \ - f"-v {store_lidarhd.to_unix(recipient_dir)}:/recipient_dir " + \ - f"-v {store_lidarhd.to_unix(shp_dir)}:/shp_dir " + \ - f"-v {store_lidarhd.to_unix(csv_dir)}:/csv_dir " + \ - f"patchwork:v{version_patchwork} " + \ - "python lidar_selecter.py " + \ - "filepath.DONOR_DIRECTORY=/donor_dir " + \ - "filepath.RECIPIENT_DIRECTORY=/recipient_dir " + \ - "filepath.SHP_DIRECTORY=/shp_dir " + \ - f"filepath.SHP_NAME={shp_name} " + \ - "filepath.CSV_DIRECTORY=/csv_dir " + \ - f"filepath.CSV_NAME={csv_name} " + \ - "filepath.OUTPUT_DIRECTORY=/output_dir " - - Job(job_name, command, tags=TAGS) - - -if __name__ == "__main__": - run_gpao() \ No newline at end of file diff --git a/patchwork.py b/patchwork.py index 5bffba1..68cec3a 100644 --- a/patchwork.py +++ b/patchwork.py @@ -1,6 +1,7 @@ from shutil import copy2 from typing import List, Tuple +import os from omegaconf import DictConfig @@ -9,6 +10,7 @@ import laspy from laspy import ScaleAwarePointRecord, LasReader +import constants as c from tools import get_tile_origin_from_pointcloud, crop_tile from indices_map import create_indices_map from constants import CLASSIFICATION_STR, PATCH_X_STR, PATCH_Y_STR @@ -183,7 +185,28 @@ def append_points(config: DictConfig, extra_points: pd.DataFrame): output_las.append_points(new_points) +def get_donor_from_csv(recipient_file_path:str, csv_file_path:str)-> str: + """ + check if there is a donor file, in the csv file, matching the recipient file + return the path to that file if it exists + return "" otherwise + """ + df_csv_data = pd.read_csv(csv_file_path) + donor_file_paths = df_csv_data.loc[df_csv_data[c.RECIPIENT_FILE_KEY] == recipient_file_path, c.DONOR_FILE_KEY] + if len(donor_file_paths) > 0: + return donor_file_paths.loc[0] # there should be only one donor file for a given recipient file + return "" + def patchwork(config: DictConfig): + + # if there is a csv_file, we don't use the DONOR_FILE defined by the config but + # the file matching the recipinet file from the csv file + if config.filepath.CSV_DIRECTORY and config.filepath.CSV_NAME : + csv_file_path = os.path.join(config.filepath.CSV_DIRECTORY, config.filepath.CSV_NAME) + config.filepath.DONOR_FILE = get_donor_from_csv(config.filepath.RECIPIENT_FILE, csv_file_path) + if not config.filepath.DONOR_FILE: # if the re is no matching donor file, we do nothing + return + complementary_bd_points = get_complementary_points(config) append_points(config, complementary_bd_points) create_indices_map(config, complementary_bd_points) diff --git a/test/test_patchwork.py b/test/test_patchwork.py index 8af6da0..e1e8300 100644 --- a/test/test_patchwork.py +++ b/test/test_patchwork.py @@ -5,11 +5,13 @@ import laspy import numpy as np import pandas as pd +from pandas import DataFrame sys.path.append('../patchwork') +import constants as c from patchwork import get_complementary_points, get_field_from_header, get_selected_classes_points -from patchwork import get_type, append_points +from patchwork import get_type, append_points, get_donor_from_csv from tools import get_tile_origin_from_pointcloud from constants import CLASSIFICATION_STR @@ -30,6 +32,9 @@ RECIPIENT_SLIDED_TEST_PATH = "test/data/recipient_slided_test.laz" +COORDINATES = "1234_6789" + + def test_get_field_from_header(): with laspy.open(RECIPIENT_TEST_PATH) as recipient_file: @@ -233,3 +238,14 @@ def test_append_points_new_column(tmp_path_factory): assert new_column[-1] == VALUE_ADDED_POINTS assert new_column[-2] == VALUE_ADDED_POINTS assert max(new_column[:-2]) == 0 + +def test_get_donor_from_csv(tmp_path_factory): + csv_file_path = tmp_path_factory.mktemp("csv") / "recipients_donors_links.csv" + data = {c.COORDINATES_KEY: [COORDINATES, ], + c.DONOR_FILE_KEY: [DONOR_MORE_FIELDS_TEST_PATH, ], + c.RECIPIENT_FILE_KEY: [RECIPIENT_MORE_FIELDS_TEST_PATH, ] + } + DataFrame(data=data).to_csv(csv_file_path) + + donor_file_path = get_donor_from_csv(RECIPIENT_MORE_FIELDS_TEST_PATH, csv_file_path) + assert donor_file_path == DONOR_MORE_FIELDS_TEST_PATH From 18da04fbc5c6d8d1f556f22245483fbd96baa112 Mon Sep 17 00:00:00 2001 From: Michel Daab Date: Tue, 29 Oct 2024 15:08:18 +0100 Subject: [PATCH 5/9] cut file path into file_name and directory_path better use of the csv file to get a donor file --- CHANGELOG.md | 5 +- README.md | 14 ++- configs/configs_patchwork.yaml | 19 +++- exemples/patchwork_example.sh | 24 ++-- indices_map.py | 7 +- patchwork.py | 44 +++++--- test/test_indices_map.py | 19 +++- test/test_patchwork.py | 196 +++++++++++++++++++++++++-------- version.py | 2 +- 9 files changed, 243 insertions(+), 87 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 23eb5cf..3235836 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,11 @@ # CHANGELOG +## 1.2.0 +- coupure des chemins de fichiers en chemins de répertoires/nom de fichiers pour pouvoir les utiliser sur docker + store +- meilleur vérification du fichier donneur via csv ## 1.1.0 - modification de chemin pour pouvoir passer dans la gpao -- patchwork vérifie maintenant s'il y a un ficheir csv en entrée. Si c'est le cas, le fichier donneur utilisé est celui qui correspond au fichier receveur dans le ficheir csv. S'il n'y a pas de fichier donneur correspondant, patchwork termine sans rien faire +- patchwork vérifie maintenant s'il y a un ficheir csv en entrée. Si c'est le cas, le fichier donneur utilisé est celui qui correspond au fichier receveur dans le fichier csv. S'il n'y a pas de fichier donneur correspondant, patchwork termine sans rien faire ## 1.0.0 version initiale : diff --git a/README.md b/README.md index 75b33f1..7f9ab2f 100644 --- a/README.md +++ b/README.md @@ -32,11 +32,15 @@ python main.py filepath.DONOR_FILE=[chemin fichier donneur] filepath.RECIPIENT_F ``` Les différentes options, modifiables soit dans le fichierconfigs/configs_patchwork.yaml, soit en ligne de commande comme indiqué juste au-dessus : -filepath.DONOR_FILE : Le chemin du fichier qui peut donner des points à ajouter -filepath.RECIPIENT_FILE : Le chemin du fichier qui va obtenir des points en plus -filepath.OUTPUT_FILE : Le chemin du fichier en sortie -filepath.OUTPUT_INDICES_MAP : Le chemin de sortie du fichier d'indice -filepath.INPUT_INDICES_MAP : Le chemin vers le fichier d'indice en entrée, si on en a un. Autrement, à laisser à "null" +filepath.DONOR_DIRECTORY : Le répertoire du fichier qui peut donner des points à ajouter +filepath.DONOR_NAME : Le nom du fichier qui peut donner des points à ajouter +filepath.RECIPIENT_DIRECTORY : Le répertoire du fichier qui va obtenir des points en plus +filepath.RECIPIENT_NAME : Le nom du fichier qui va obtenir des points en plus +filepath.OUTPUT_DIR : Le répertoire du fichier en sortie +filepath.OUTPUT_NAME : Le nom du fichier en sortie +filepath.OUTPUT_INDICES_MAP_DIR : Le répertoire de sortie du fichier d'indice +filepath.OUTPUT_INDICES_MAP_NAME : Le nom de sortie du fichier d'indice + DONOR_CLASS_LIST : Défaut [2, 9]. La liste des classes des points du fichier donneur qui peuvent être ajoutés. RECIPIENT_CLASS_LIST : Défaut [2, 3, 9, 17]. La liste des classes des points du fichier receveur qui, s'ils sont absents dans une cellule, justifirons de prendre les points du fichier donneur de la même cellule TILE_SIZE : Défaut 1000. Taille du côté de l'emprise carrée représentée par les fichiers lidar d'entrée diff --git a/configs/configs_patchwork.yaml b/configs/configs_patchwork.yaml index efa7e66..c377052 100644 --- a/configs/configs_patchwork.yaml +++ b/configs/configs_patchwork.yaml @@ -24,14 +24,23 @@ filepath: DONOR_DIRECTORY: null # directory containing all potential donor lidar files, for lidar selecter RECIPIENT_DIRECTORY: null # directory containing all potential donor lidar files, for lidar selecter OUTPUT_DIRECTORY: null # directory containing all potential donor lidar files, for lidar selecter - RECIPIENT_FILE: null # path to the file that receives points. If done after lidar selecter, is in a subdirectory of OUTPUT_DIRECTORY - DONOR_FILE: null # path to the file that gives points. If done after lidar selecter, is in a subdirectory of OUTPUT_DIRECTORY - OUTPUT_FILE: null # path to the (resulting) file with added points. - INPUT_INDICES_MAP: null # path for the indices map reflecting the changes to the recipient - OUTPUT_INDICES_MAP: null + + # OUTPUT_FILE: null # path to the (resulting) file with added points. + OUTPUT_DIR: null # directory of the file with added points, from patchwork. + OUTPUT_NAME: null # name of the file with added points, from patchwork. + + INPUT_INDICES_MAP_DIR: null + INPUT_INDICES_MAP_NAME: null + + OUTPUT_INDICES_MAP_DIR: null # path to the directory for the indices map reflecting the changes to the recipient, from patchwork + OUTPUT_INDICES_MAP_NAME: null # name of the indices map reflecting the changes to the recipient, from patchwork + # INPUT_DIRECTORY: null # directory for input (shapefile) CSV_NAME: null # name of the csv file that log the lidar files to process with patchwork CSV_DIRECTORY: null # path to the directory that will contain the csv + + DONOR_NAME: null # name of the donor file for patchwork + RECIPIENT_NAME: null # name of the recipient file for patchwork diff --git a/exemples/patchwork_example.sh b/exemples/patchwork_example.sh index d9c0410..00fa870 100644 --- a/exemples/patchwork_example.sh +++ b/exemples/patchwork_example.sh @@ -1,12 +1,20 @@ # for selecting, cutting and dispatching lidar files for patchwork python main.py \ -filepath.DONOR_FILE=[donor_file_path] -filepath.RECIPIENT_FILE=[recipient_file_path] -filepath.OUTPUT_FILE=[output_file_path] -filepath.OUTPUT_INDICES_MAP=[output_indices_map_path] +filepath.DONOR_DIRECTORY=[donor_file_dir] +filepath.DONOR_NAME=[donor_file_name] +filepath.RECIPIENT_DIRECTORY=[recipient_file_dir] +filepath.RECIPIENT_NAME=[recipient_file_name] +filepath.OUTPUT_DIR=[output_file_dir] +filepath.OUTPUT_NAME=[output_file_name] +filepath.OUTPUT_INDICES_MAP_DIR=[output_indices_map_dir] +filepath.OUTPUT_INDICES_MAP_NAME=[output_indices_map_name] -# filepath.DONOR_FILE: the path to the lidar file we will add points from -# filepath.RECIPIENT_FILE: the path to the lidar file we will add points to -# filepath.OUTPUT_FILE: the path to the resulting lidar file -# filepath.OUTPUT_INDICES_MAP: the path to the map with indices displaying where points have been added \ No newline at end of file +# filepath.DONOR_DIRECTORY: the directory to the lidar file we will add points from +# filepath.DONOR_NAME: the name of the lidar file we will add points from +# filepath.RECIPIENT_DIRECTORY: the directory to the lidar file we will add points to +# filepath.RECIPIENT_NAME: the name of the lidar file we will add points to +# filepath.OUTPUT_DIR: the directory to the resulting lidar file +# filepath.OUTPUT_NAME: the directory of the resulting lidar file +# filepath.OUTPUT_INDICES_MAP_DIR: the directory to the map with indices displaying where points have been added +# filepath.OUTPUT_INDICES_MAP_NAME: the name of the map with indices displaying where points have been added \ No newline at end of file diff --git a/indices_map.py b/indices_map.py index 136fd76..214b83d 100644 --- a/indices_map.py +++ b/indices_map.py @@ -1,3 +1,5 @@ +import os + import numpy as np from omegaconf import DictConfig import rasterio as rs @@ -38,9 +40,10 @@ def create_indices_map(config: DictConfig, df_points: DataFrame): corner_x, corner_y = get_tile_origin_from_pointcloud(config, df_points) grid = create_indices_grid(config, df_points) + output_indices_map_path = os.path.join(config.filepath.OUTPUT_INDICES_MAP_DIR, config.filepath.OUTPUT_INDICES_MAP_NAME) transform = from_origin(corner_x, corner_y, config.PATCH_SIZE, config.PATCH_SIZE) - indices_map = rs.open(config.filepath.OUTPUT_INDICES_MAP, 'w', driver='GTiff', + indices_map = rs.open(output_indices_map_path, 'w', driver='GTiff', height=grid.shape[0], width=grid.shape[1], count=1, dtype=str(grid.dtype), crs=config.CRS, @@ -50,7 +53,7 @@ def create_indices_map(config: DictConfig, df_points: DataFrame): def read_indices_map(config: DictConfig): - indices_map = rs.open(config.filepath.INPUT_INDICES_MAP) + indices_map = rs.open(os.path.join(config.filepath.INPUT_INDICES_MAP_DIR, config.filepath.INPUT_INDICES_MAP_NAME)) transformer = indices_map.get_transform() grid = indices_map.read() grid = grid[0] diff --git a/patchwork.py b/patchwork.py index 68cec3a..5d57bcb 100644 --- a/patchwork.py +++ b/patchwork.py @@ -2,6 +2,7 @@ from shutil import copy2 from typing import List, Tuple import os +from pathlib import Path from omegaconf import DictConfig @@ -69,8 +70,12 @@ def get_type(new_column_size: int): def get_complementary_points(config: DictConfig) -> pd.DataFrame: - with laspy.open(config.filepath.DONOR_FILE) as donor_file, \ - laspy.open(config.filepath.RECIPIENT_FILE) as recipient_file: + donor_dir, donor_name = get_donor_path(config) + donor_file_path = os.path.join(donor_dir, donor_name) + recipient_file_path = os.path.join(config.filepath.RECIPIENT_DIRECTORY, config.filepath.RECIPIENT_NAME) + + with laspy.open(donor_file_path) as donor_file, \ + laspy.open(recipient_file_path) as recipient_file: raw_donor_points = donor_file.read().points donor_points = crop_tile(config, raw_donor_points) raw_recipient_points = recipient_file.read().points @@ -80,8 +85,8 @@ def get_complementary_points(config: DictConfig) -> pd.DataFrame: tile_origin_donor = get_tile_origin_from_pointcloud(config, donor_points) tile_origin_recipient = get_tile_origin_from_pointcloud(config, recipient_points) if tile_origin_donor != tile_origin_recipient: - raise ValueError(f"{config.filepath.DONOR_FILE} and \ - {config.filepath.RECIPIENT_FILE} are not on the same area") + raise ValueError(f"{donor_file_path} and \ + {recipient_file_path} are not on the same area") donor_columns = get_field_from_header(donor_file) df_donor_points = get_selected_classes_points(config, @@ -131,8 +136,8 @@ def test_field_exists(file_path: str, colmun: str) -> bool: def append_points(config: DictConfig, extra_points: pd.DataFrame): # get field to copy : - recipient_filepath = config.filepath.RECIPIENT_FILE - ouput_filepath = config.filepath.OUTPUT_FILE + recipient_filepath = os.path.join(config.filepath.RECIPIENT_DIRECTORY, config.filepath.RECIPIENT_NAME) + ouput_filepath = os.path.join(config.filepath.OUTPUT_DIR, config.filepath.OUTPUT_NAME) with laspy.open(recipient_filepath) as recipient_file: recipient_fields_list = get_field_from_header(recipient_file) @@ -156,9 +161,9 @@ def append_points(config: DictConfig, extra_points: pd.DataFrame): # if we want a new column, we start by adding its name if config.NEW_COLUMN: - if test_field_exists(config.filepath.RECIPIENT_FILE, config.NEW_COLUMN): + if test_field_exists(recipient_filepath, config.NEW_COLUMN): raise ValueError(f"{config.NEW_COLUMN} already exists as \ - column name in {config.filepath.RECIPIENT_FILE}") + column name in {recipient_filepath}") new_column_type = get_type(config.NEW_COLUMN_SIZE) output_las = laspy.read(ouput_filepath) output_las.add_extra_dim(laspy.ExtraBytesParams(name=config.NEW_COLUMN, type=new_column_type)) @@ -197,15 +202,26 @@ def get_donor_from_csv(recipient_file_path:str, csv_file_path:str)-> str: return donor_file_paths.loc[0] # there should be only one donor file for a given recipient file return "" -def patchwork(config: DictConfig): - # if there is a csv_file, we don't use the DONOR_FILE defined by the config but - # the file matching the recipinet file from the csv file +def get_donor_path(config: DictConfig) -> Tuple[str, str]: + """Return a donor directory and a name: + If there is no csv file provided in config, return DONOR_DIRECTORY and DONOR_NAME + if there is a csv file provided, return DONOR_DIRECTORY and DONOR_NAME matching the given RECIPIENT + if there is a csv file provided but no matching DONOR, return "" twice """ if config.filepath.CSV_DIRECTORY and config.filepath.CSV_NAME : csv_file_path = os.path.join(config.filepath.CSV_DIRECTORY, config.filepath.CSV_NAME) - config.filepath.DONOR_FILE = get_donor_from_csv(config.filepath.RECIPIENT_FILE, csv_file_path) - if not config.filepath.DONOR_FILE: # if the re is no matching donor file, we do nothing - return + recipient_file_path = os.path.join(config.filepath.RECIPIENT_DIRECTORY, config.filepath.RECIPIENT_NAME) + donor_file_path = get_donor_from_csv(recipient_file_path, csv_file_path) + if not donor_file_path: # if there is no matching donor file, we do nothing + return "", "" + return str(Path(donor_file_path).parent), str(Path(donor_file_path).name) + return config.filepath.DONOR_DIRECTORY, config.filepath.DONOR_NAME + + +def patchwork(config: DictConfig): + _, donor_name = get_donor_path(config) + if not donor_name: + return complementary_bd_points = get_complementary_points(config) append_points(config, complementary_bd_points) diff --git a/test/test_indices_map.py b/test/test_indices_map.py index b439bba..27bbc88 100644 --- a/test/test_indices_map.py +++ b/test/test_indices_map.py @@ -1,4 +1,5 @@ import sys +import os from hydra import compose, initialize import numpy as np @@ -41,20 +42,23 @@ def test_create_indices_points(): def test_create_indices_map(tmp_path_factory): - tmp_file_path = tmp_path_factory.mktemp("data") / "indices.tif" + tmp_file_dir = tmp_path_factory.mktemp("data") + tmp_file_name = "indices.tif" + with initialize(version_base="1.2", config_path="../configs"): config = compose( config_name="configs_patchwork.yaml", overrides=[ f"PATCH_SIZE={PATCH_SIZE}", f"TILE_SIZE={TILE_SIZE}", - f"filepath.OUTPUT_INDICES_MAP={tmp_file_path}", + f"filepath.OUTPUT_INDICES_MAP_DIR={tmp_file_dir}", + f"filepath.OUTPUT_INDICES_MAP_NAME={tmp_file_name}", ] ) df_points = pd.DataFrame(data=DATA_POINTS) create_indices_map(config, df_points) - raster = rs.open(tmp_file_path) + raster = rs.open(os.path.join(tmp_file_dir, tmp_file_name)) grid = raster.read() grid = grid.transpose() # indices aren't read the way we want otherwise @@ -66,7 +70,8 @@ def test_create_indices_map(tmp_path_factory): def test_read_indices_map(tmp_path_factory): - tmp_file_path = tmp_path_factory.mktemp("data") / "indices.tif" + tmp_file_dir = tmp_path_factory.mktemp("data") + tmp_file_name = "indices.tif" with initialize(version_base="1.2", config_path="../configs"): config = compose( @@ -74,7 +79,8 @@ def test_read_indices_map(tmp_path_factory): overrides=[ f"PATCH_SIZE={PATCH_SIZE}", f"TILE_SIZE={TILE_SIZE}", - f"filepath.INPUT_INDICES_MAP={tmp_file_path}", + f"filepath.INPUT_INDICES_MAP_DIR={tmp_file_dir}", + f"filepath.INPUT_INDICES_MAP_NAME={tmp_file_name}", ] ) @@ -84,7 +90,8 @@ def test_read_indices_map(tmp_path_factory): [1, 1, 1],]) transform = from_origin(0, 3, config.PATCH_SIZE, config.PATCH_SIZE) - indices_map = rs.open(config.filepath.INPUT_INDICES_MAP, + output_indices_map_path = os.path.join(config.filepath.INPUT_INDICES_MAP_DIR, config.filepath.INPUT_INDICES_MAP_NAME) + indices_map = rs.open(output_indices_map_path, 'w', driver='GTiff', height=grid.shape[0], diff --git a/test/test_patchwork.py b/test/test_patchwork.py index e1e8300..d5d695d 100644 --- a/test/test_patchwork.py +++ b/test/test_patchwork.py @@ -1,4 +1,5 @@ import sys +import os import pytest from hydra import compose, initialize @@ -11,11 +12,13 @@ import constants as c from patchwork import get_complementary_points, get_field_from_header, get_selected_classes_points -from patchwork import get_type, append_points, get_donor_from_csv +from patchwork import get_type, append_points, get_donor_from_csv, get_donor_path from tools import get_tile_origin_from_pointcloud from constants import CLASSIFICATION_STR -RECIPIENT_TEST_PATH = "test/data/recipient_test.laz" +RECIPIENT_TEST_DIR = "test/data/" +RECIPIENT_TEST_NAME = "recipient_test.laz" + DONOR_CLASS_LIST = [2, 9] RECIPIENT_CLASS_LIST = [2, 3, 9, 17] VIRTUAL_CLASS_TRANSLATION = {2: 69, 9: 70} @@ -25,19 +28,25 @@ NEW_COLUMN_SIZE = 8 VALUE_ADDED_POINTS = 1 -DONOR_TEST_PATH = "test/data/donor_test.las" +DONOR_TEST_DIR = "test/data/" +DONOR_TEST_NAME = "donor_test.las" + +RECIPIENT_MORE_FIELDS_TEST_DIR = "test/data" +RECIPIENT_MORE_FIELDS_TEST_NAME = "recipient_more_fields_test.laz" + +DONOR_MORE_FIELDS_TEST_DIR = "test/data" +DONOR_MORE_FIELDS_TEST_NAME = "donor_more_fields_test.las" -RECIPIENT_MORE_FIELDS_TEST_PATH = "test/data/recipient_more_fields_test.laz" -DONOR_MORE_FIELDS_TEST_PATH = "test/data/donor_more_fields_test.las" +RECIPIENT_SLIDED_TEST_DIR = "test/data" +RECIPIENT_SLIDED_TEST_NAME = "recipient_slided_test.laz" -RECIPIENT_SLIDED_TEST_PATH = "test/data/recipient_slided_test.laz" COORDINATES = "1234_6789" def test_get_field_from_header(): - with laspy.open(RECIPIENT_TEST_PATH) as recipient_file: + with laspy.open(os.path.join(RECIPIENT_TEST_DIR, RECIPIENT_TEST_NAME)) as recipient_file: recipient_fields_list = get_field_from_header(recipient_file) assert len(recipient_fields_list) == 18 # check if all fields are lower case @@ -50,12 +59,13 @@ def test_get_selected_classes_points(): config = compose( config_name="configs_patchwork.yaml", overrides=[ - f"filepath.RECIPIENT_FILE={RECIPIENT_TEST_PATH}", + f"filepath.RECIPIENT_DIRECTORY={RECIPIENT_TEST_DIR}", + f"filepath.RECIPIENT_NAME={RECIPIENT_TEST_NAME}", f"RECIPIENT_CLASS_LIST={RECIPIENT_CLASS_LIST}" ] ) - with laspy.open(config.filepath.RECIPIENT_FILE) as recipient_file: + with laspy.open(os.path.join(config.filepath.RECIPIENT_DIRECTORY, config.filepath.RECIPIENT_NAME)) as recipient_file: recipient_points = recipient_file.read().points tile_origin_recipient = get_tile_origin_from_pointcloud(config, recipient_points) @@ -76,8 +86,10 @@ def test_get_complementary_points(): config = compose( config_name="configs_patchwork.yaml", overrides=[ - f"filepath.DONOR_FILE={DONOR_TEST_PATH}", - f"filepath.RECIPIENT_FILE={RECIPIENT_TEST_PATH}", + f"filepath.DONOR_DIRECTORY={DONOR_TEST_DIR}", + f"filepath.DONOR_NAME={DONOR_TEST_NAME}", + f"filepath.RECIPIENT_DIRECTORY={RECIPIENT_TEST_DIR}", + f"filepath.RECIPIENT_NAME={RECIPIENT_TEST_NAME}", f"DONOR_CLASS_LIST={DONOR_CLASS_LIST}", f"RECIPIENT_CLASS_LIST={RECIPIENT_CLASS_LIST}", f"+VIRTUAL_CLASS_TRANSLATION={VIRTUAL_CLASS_TRANSLATION}", @@ -91,23 +103,25 @@ def test_get_complementary_points(): def test_get_complementary_points_2(): """test selected_classes_points with more fields in files, different from each other's""" extra_fields_for_recipient = ["f1", "f2"] - las = laspy.read(RECIPIENT_TEST_PATH) + las = laspy.read(os.path.join(RECIPIENT_TEST_DIR, RECIPIENT_TEST_NAME)) for field in extra_fields_for_recipient: las.add_extra_dim(laspy.ExtraBytesParams(name=field, type=np.uint64)) - las.write(RECIPIENT_MORE_FIELDS_TEST_PATH) + las.write(os.path.join(RECIPIENT_MORE_FIELDS_TEST_DIR, RECIPIENT_MORE_FIELDS_TEST_NAME)) extra_fields_for_donor = ["f3", "f4"] - las = laspy.read(DONOR_TEST_PATH) + las = laspy.read(os.path.join(DONOR_TEST_DIR, DONOR_TEST_NAME)) for field in extra_fields_for_donor: las.add_extra_dim(laspy.ExtraBytesParams(name=field, type=np.uint64)) - las.write(DONOR_MORE_FIELDS_TEST_PATH) + las.write(os.path.join(DONOR_MORE_FIELDS_TEST_DIR, DONOR_MORE_FIELDS_TEST_NAME)) with initialize(version_base="1.2", config_path="../configs"): config = compose( config_name="configs_patchwork.yaml", overrides=[ - f"filepath.DONOR_FILE={DONOR_MORE_FIELDS_TEST_PATH}", - f"filepath.RECIPIENT_FILE={RECIPIENT_MORE_FIELDS_TEST_PATH}", + f"filepath.RECIPIENT_DIRECTORY={RECIPIENT_MORE_FIELDS_TEST_DIR}", + f"filepath.RECIPIENT_NAME={RECIPIENT_MORE_FIELDS_TEST_NAME}", + f"filepath.DONOR_DIRECTORY={DONOR_MORE_FIELDS_TEST_DIR}", + f"filepath.DONOR_NAME={DONOR_MORE_FIELDS_TEST_NAME}", f"DONOR_CLASS_LIST={DONOR_CLASS_LIST}", f"RECIPIENT_CLASS_LIST={RECIPIENT_CLASS_LIST}", f"+VIRTUAL_CLASS_TRANSLATION={VIRTUAL_CLASS_TRANSLATION}", @@ -130,14 +144,16 @@ def test_get_complementary_points_3(): config = compose( config_name="configs_patchwork.yaml", overrides=[ - f"filepath.DONOR_FILE={DONOR_TEST_PATH}", - f"filepath.RECIPIENT_FILE={RECIPIENT_SLIDED_TEST_PATH}", + f"filepath.DONOR_DIRECTORY={DONOR_TEST_DIR}", + f"filepath.DONOR_NAME={DONOR_TEST_NAME}", + f"filepath.RECIPIENT_DIRECTORY={RECIPIENT_SLIDED_TEST_DIR}", + f"filepath.RECIPIENT_NAME={RECIPIENT_SLIDED_TEST_NAME}", ] ) - las = laspy.read(RECIPIENT_TEST_PATH) + las = laspy.read(os.path.join(RECIPIENT_TEST_DIR, RECIPIENT_TEST_NAME)) las.points['x'] = las.points['x'] + config.TILE_SIZE - las.write(RECIPIENT_SLIDED_TEST_PATH) + las.write(os.path.join(RECIPIENT_SLIDED_TEST_DIR, RECIPIENT_SLIDED_TEST_NAME)) with pytest.raises(Exception): get_complementary_points(config) @@ -158,33 +174,39 @@ def get_point_count(file_path): def test_append_points(tmp_path_factory): - tmp_file_path = tmp_path_factory.mktemp("data") / "result.laz" + tmp_file_dir = tmp_path_factory.mktemp("data") + tmp_file_name = "result.laz" with initialize(version_base="1.2", config_path="../configs"): config = compose( config_name="configs_patchwork.yaml", overrides=[ - f"filepath.RECIPIENT_FILE={RECIPIENT_TEST_PATH}", - f"filepath.OUTPUT_FILE={tmp_file_path}" + f"filepath.RECIPIENT_DIRECTORY={RECIPIENT_TEST_DIR}", + f"filepath.RECIPIENT_NAME={RECIPIENT_TEST_NAME}", + f"filepath.OUTPUT_DIR={tmp_file_dir}", + f"filepath.OUTPUT_NAME={tmp_file_name}", ] ) + recipient_file_path = os.path.join(config.filepath.RECIPIENT_DIRECTORY, config.filepath.RECIPIENT_NAME) + output_file = os.path.join(config.filepath.OUTPUT_DIR, config.filepath.OUTPUT_NAME) + # add 2 points extra_points = pd.DataFrame(data=[POINT_1, POINT_2]) append_points(config, extra_points) # assert a point has been added - point_count = get_point_count(config.filepath.RECIPIENT_FILE) - assert get_point_count(config.filepath.OUTPUT_FILE) == point_count + 2 + point_count = get_point_count(recipient_file_path) + assert get_point_count(output_file) == point_count + 2 # assert fields are the same - fields_recipient = get_field_from_header(laspy.read(config.filepath.RECIPIENT_FILE)) - fields_output = get_field_from_header(laspy.read(config.filepath.OUTPUT_FILE)) + fields_recipient = get_field_from_header(laspy.read(recipient_file_path)) + fields_output = get_field_from_header(laspy.read(output_file)) assert set(fields_recipient) == set(fields_output) # assert all points are here - las_recipient = laspy.read(config.filepath.RECIPIENT_FILE) - las_output = laspy.read(config.filepath.OUTPUT_FILE) + las_recipient = laspy.read(recipient_file_path) + las_output = laspy.read(output_file) for point in las_recipient.points[:10]: # only 10 points, otherwise it takes too long assert point in las_output.points @@ -193,47 +215,51 @@ def test_append_points(tmp_path_factory): append_points(config, extra_points) # assert a point has been added - point_count = get_point_count(config.filepath.RECIPIENT_FILE) - assert get_point_count(config.filepath.OUTPUT_FILE) == point_count + 1 + point_count = get_point_count(recipient_file_path) + assert get_point_count(output_file) == point_count + 1 # # add 0 point extra_points = pd.DataFrame(data={'x': [], 'y': [], 'z': [], CLASSIFICATION_STR: []}) append_points(config, extra_points) # assert a point has been added - point_count = get_point_count(config.filepath.RECIPIENT_FILE) - assert get_point_count(config.filepath.OUTPUT_FILE) == point_count + point_count = get_point_count(recipient_file_path) + assert get_point_count(output_file) == point_count def test_append_points_new_column(tmp_path_factory): - tmp_file_path = tmp_path_factory.mktemp("data") / "result.laz" + tmp_file_dir = tmp_path_factory.mktemp("data") + tmp_file_name = "result.laz" with initialize(version_base="1.2", config_path="../configs"): config = compose( config_name="configs_patchwork.yaml", overrides=[ - f"filepath.RECIPIENT_FILE={RECIPIENT_TEST_PATH}", - f"filepath.OUTPUT_FILE={tmp_file_path}", + f"filepath.RECIPIENT_DIRECTORY={RECIPIENT_TEST_DIR}", + f"filepath.RECIPIENT_NAME={RECIPIENT_TEST_NAME}", + f"filepath.OUTPUT_DIR={tmp_file_dir}", + f"filepath.OUTPUT_NAME={tmp_file_name}", f"NEW_COLUMN={NEW_COLUMN}", f"NEW_COLUMN_SIZE={NEW_COLUMN_SIZE}", f"VALUE_ADDED_POINTS={VALUE_ADDED_POINTS}" ] ) + output_file = os.path.join(config.filepath.OUTPUT_DIR, config.filepath.OUTPUT_NAME) # add 2 points extra_points = pd.DataFrame(data=[POINT_1, POINT_2]) append_points(config, extra_points) # assert a point has been added - point_count = get_point_count(config.filepath.RECIPIENT_FILE) - assert get_point_count(config.filepath.OUTPUT_FILE) == point_count + 2 + point_count = get_point_count(os.path.join(config.filepath.RECIPIENT_DIRECTORY, config.filepath.RECIPIENT_NAME)) + assert get_point_count(output_file) == point_count + 2 # assert the new column is here - fields_output = get_field_from_header(laspy.read(config.filepath.OUTPUT_FILE)) + fields_output = get_field_from_header(laspy.read(output_file)) assert NEW_COLUMN in fields_output # assert both points added, and only them, have NEW_COLUMN == VALUE_ADDED_POINTS - las_output = laspy.read(config.filepath.OUTPUT_FILE) + las_output = laspy.read(output_file) new_column = las_output.points[NEW_COLUMN] assert new_column[-1] == VALUE_ADDED_POINTS assert new_column[-2] == VALUE_ADDED_POINTS @@ -241,11 +267,91 @@ def test_append_points_new_column(tmp_path_factory): def test_get_donor_from_csv(tmp_path_factory): csv_file_path = tmp_path_factory.mktemp("csv") / "recipients_donors_links.csv" + donor_more_fields_test_path = os.path.join(DONOR_MORE_FIELDS_TEST_DIR, DONOR_MORE_FIELDS_TEST_NAME) + recipient_more_fields_test_path = os.path.join(RECIPIENT_TEST_DIR, RECIPIENT_TEST_NAME) data = {c.COORDINATES_KEY: [COORDINATES, ], - c.DONOR_FILE_KEY: [DONOR_MORE_FIELDS_TEST_PATH, ], - c.RECIPIENT_FILE_KEY: [RECIPIENT_MORE_FIELDS_TEST_PATH, ] + c.DONOR_FILE_KEY: [donor_more_fields_test_path, ], + c.RECIPIENT_FILE_KEY: [recipient_more_fields_test_path, ] } DataFrame(data=data).to_csv(csv_file_path) - donor_file_path = get_donor_from_csv(RECIPIENT_MORE_FIELDS_TEST_PATH, csv_file_path) - assert donor_file_path == DONOR_MORE_FIELDS_TEST_PATH + donor_file_path = get_donor_from_csv(recipient_more_fields_test_path, csv_file_path) + assert donor_file_path == donor_more_fields_test_path + +def test_get_donor_path(tmp_path_factory): + # check get_donor_path when no csv + with initialize(version_base="1.2", config_path="../configs"): + config = compose( + config_name="configs_patchwork.yaml", + overrides=[ + f"filepath.DONOR_DIRECTORY={DONOR_TEST_DIR}", + f"filepath.DONOR_NAME={DONOR_TEST_NAME}", + f"filepath.RECIPIENT_DIRECTORY={RECIPIENT_SLIDED_TEST_DIR}", + f"filepath.RECIPIENT_NAME={RECIPIENT_SLIDED_TEST_NAME}", + ] + ) + donor_dir, donor_name = get_donor_path(config) + assert donor_dir == DONOR_TEST_DIR + assert donor_name == DONOR_TEST_NAME + + # check get_donor_path when csv but no matching donor in it + csv_file_dir = tmp_path_factory.mktemp("csv") + csv_file_name = "recipients_donors_links.csv" + csv_file_path = os.path.join(csv_file_dir, csv_file_name) + + data = {c.COORDINATES_KEY: [], + c.DONOR_FILE_KEY: [], + c.RECIPIENT_FILE_KEY: [] + } + DataFrame(data=data).to_csv(csv_file_path) + + with initialize(version_base="1.2", config_path="../configs"): + config = compose( + config_name="configs_patchwork.yaml", + overrides=[ + f"filepath.DONOR_DIRECTORY={DONOR_TEST_DIR}", + f"filepath.DONOR_NAME={DONOR_TEST_NAME}", + f"filepath.RECIPIENT_DIRECTORY={RECIPIENT_SLIDED_TEST_DIR}", + f"filepath.RECIPIENT_NAME={RECIPIENT_SLIDED_TEST_NAME}", + f"filepath.CSV_DIRECTORY={csv_file_dir}", + f"filepath.CSV_NAME={csv_file_name}", + ] + ) + + donor_dir, donor_name = get_donor_path(config) + assert donor_dir == "" + assert donor_name == "" + + # check get_donor_path when csv but with a matching donor in it + donor_more_fields_test_path = os.path.join(DONOR_MORE_FIELDS_TEST_DIR, DONOR_MORE_FIELDS_TEST_NAME) + recipient_more_fields_test_path = os.path.join(RECIPIENT_TEST_DIR, RECIPIENT_TEST_NAME) + data = {c.COORDINATES_KEY: [COORDINATES, ], + c.DONOR_FILE_KEY: [donor_more_fields_test_path, ], + c.RECIPIENT_FILE_KEY: [recipient_more_fields_test_path, ] + } + DataFrame(data=data).to_csv(csv_file_path) + + with initialize(version_base="1.2", config_path="../configs"): + config = compose( + config_name="configs_patchwork.yaml", + overrides=[ + f"filepath.DONOR_DIRECTORY={DONOR_TEST_DIR}", + f"filepath.DONOR_NAME={DONOR_TEST_NAME}", + f"filepath.RECIPIENT_DIRECTORY={RECIPIENT_TEST_DIR}", + f"filepath.RECIPIENT_NAME={RECIPIENT_TEST_NAME}", + f"filepath.CSV_DIRECTORY={csv_file_dir}", + f"filepath.CSV_NAME={csv_file_name}", + ] + ) + + donor_dir, donor_name = get_donor_path(config) + assert donor_dir == DONOR_MORE_FIELDS_TEST_DIR + assert donor_name == DONOR_MORE_FIELDS_TEST_NAME + + # donor_more_fields_test_path = os.path.join(DONOR_MORE_FIELDS_TEST_DIR, DONOR_MORE_FIELDS_TEST_NAME) + # recipient_more_fields_test_path = os.path.join(RECIPIENT_TEST_DIR, RECIPIENT_TEST_NAME) + # data = {c.COORDINATES_KEY: [COORDINATES, ], + # c.DONOR_FILE_KEY: [donor_more_fields_test_path, ], + # c.RECIPIENT_FILE_KEY: [recipient_more_fields_test_path, ] + # } + # DataFrame(data=data).to_csv(csv_file_path) diff --git a/version.py b/version.py index b544b70..796809f 100644 --- a/version.py +++ b/version.py @@ -1,4 +1,4 @@ -__version__ = "1.1.0" +__version__ = "1.2.0" if __name__ == "__main__": print(__version__) From 12f51d740bdc34100606646288e4765acabed6d0 Mon Sep 17 00:00:00 2001 From: Michel Daab Date: Tue, 29 Oct 2024 16:00:56 +0100 Subject: [PATCH 6/9] changed version number --- CHANGELOG.md | 5 +---- version.py | 2 +- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3235836..0f4b408 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,10 +1,7 @@ # CHANGELOG -## 1.2.0 -- coupure des chemins de fichiers en chemins de répertoires/nom de fichiers pour pouvoir les utiliser sur docker + store -- meilleur vérification du fichier donneur via csv - ## 1.1.0 - modification de chemin pour pouvoir passer dans la gpao +- coupure des chemins de fichiers en chemins de répertoires/nom de fichiers pour pouvoir les utiliser sur docker + store - patchwork vérifie maintenant s'il y a un ficheir csv en entrée. Si c'est le cas, le fichier donneur utilisé est celui qui correspond au fichier receveur dans le fichier csv. S'il n'y a pas de fichier donneur correspondant, patchwork termine sans rien faire ## 1.0.0 diff --git a/version.py b/version.py index 796809f..b544b70 100644 --- a/version.py +++ b/version.py @@ -1,4 +1,4 @@ -__version__ = "1.2.0" +__version__ = "1.1.0" if __name__ == "__main__": print(__version__) From d68255ebda99ef67cddfb593693a2c836877120c Mon Sep 17 00:00:00 2001 From: Michel Daab Date: Wed, 30 Oct 2024 07:43:09 +0100 Subject: [PATCH 7/9] remove comment --- configs/configs_patchwork.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/configs/configs_patchwork.yaml b/configs/configs_patchwork.yaml index c377052..b556c60 100644 --- a/configs/configs_patchwork.yaml +++ b/configs/configs_patchwork.yaml @@ -25,7 +25,6 @@ filepath: RECIPIENT_DIRECTORY: null # directory containing all potential donor lidar files, for lidar selecter OUTPUT_DIRECTORY: null # directory containing all potential donor lidar files, for lidar selecter - # OUTPUT_FILE: null # path to the (resulting) file with added points. OUTPUT_DIR: null # directory of the file with added points, from patchwork. OUTPUT_NAME: null # name of the file with added points, from patchwork. From 554691a8fb878a235a920ad08750f996f8af127f Mon Sep 17 00:00:00 2001 From: Michel Daab Date: Wed, 30 Oct 2024 07:49:25 +0100 Subject: [PATCH 8/9] removed comment --- test/test_patchwork.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/test/test_patchwork.py b/test/test_patchwork.py index d5d695d..760239c 100644 --- a/test/test_patchwork.py +++ b/test/test_patchwork.py @@ -347,11 +347,3 @@ def test_get_donor_path(tmp_path_factory): donor_dir, donor_name = get_donor_path(config) assert donor_dir == DONOR_MORE_FIELDS_TEST_DIR assert donor_name == DONOR_MORE_FIELDS_TEST_NAME - - # donor_more_fields_test_path = os.path.join(DONOR_MORE_FIELDS_TEST_DIR, DONOR_MORE_FIELDS_TEST_NAME) - # recipient_more_fields_test_path = os.path.join(RECIPIENT_TEST_DIR, RECIPIENT_TEST_NAME) - # data = {c.COORDINATES_KEY: [COORDINATES, ], - # c.DONOR_FILE_KEY: [donor_more_fields_test_path, ], - # c.RECIPIENT_FILE_KEY: [recipient_more_fields_test_path, ] - # } - # DataFrame(data=data).to_csv(csv_file_path) From 1b35a2963bd03e190e30aec74a21ae862dc47afb Mon Sep 17 00:00:00 2001 From: Michel Daab Date: Wed, 30 Oct 2024 11:15:53 +0100 Subject: [PATCH 9/9] when no matching donor in the csv, copy the recipient without doing anything --- patchwork.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/patchwork.py b/patchwork.py index 5d57bcb..b9eea01 100644 --- a/patchwork.py +++ b/patchwork.py @@ -220,7 +220,10 @@ def get_donor_path(config: DictConfig) -> Tuple[str, str]: def patchwork(config: DictConfig): _, donor_name = get_donor_path(config) - if not donor_name: + if not donor_name: # if no matching donor, we simply copy the recipient to the output without doing anything + recipient_filepath = os.path.join(config.filepath.RECIPIENT_DIRECTORY, config.filepath.RECIPIENT_NAME) + ouput_filepath = os.path.join(config.filepath.OUTPUT_DIR, config.filepath.OUTPUT_NAME) + copy2(recipient_filepath, ouput_filepath) return complementary_bd_points = get_complementary_points(config)