diff --git a/.ebextensions/aws-autoscaling.config b/.ebextensions/aws-autoscaling.config index 44bd2e5e1..25ef134e3 100644 --- a/.ebextensions/aws-autoscaling.config +++ b/.ebextensions/aws-autoscaling.config @@ -1,6 +1,6 @@ option_settings: aws:ec2:instances: - InstanceTypes: r5.large + InstanceTypes: m6i.2xlarge aws:autoscaling:asg: Availability Zones: Any MaxSize: 1 diff --git a/.github/workflows/github-actions-PR-validation.yml b/.github/workflows/github-actions-PR-validation.yml index 8509ac86e..0f5e97f98 100644 --- a/.github/workflows/github-actions-PR-validation.yml +++ b/.github/workflows/github-actions-PR-validation.yml @@ -75,10 +75,10 @@ jobs: run: | git fetch -q origin ${{ github.base_ref }} ${{ github.head_ref }} git diff --name-only origin/${{ github.base_ref }} origin/${{ github.head_ref }} - - name: Setup Node.js (v16) + - name: Setup Node.js (v18) uses: actions/setup-node@v3 with: - node-version: 16 + node-version: 18 - name: Report Node version run: | node -v diff --git a/Dockerfile b/Dockerfile index 78366b56e..66010e5fd 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,7 @@ ARG OVERWRITE_VERSION ### Stage 1: build UI -FROM node:16 AS BUILD_UI_STAGE +FROM node:18 AS BUILD_UI_STAGE WORKDIR /agr_curation COPY src/main/cliapp ./cliapp @@ -48,4 +48,4 @@ ENV QUARKUS_HIBERNATE_SEARCH_ORM_ELASTICSEARCH_HOSTS opensearch:9200 ENV QUARKUS_HIBERNATE_SEARCH_ORM_ELASTICSEARCH_PROTOCOL http # Start the application -CMD ["java", "-Xmx15g", "-jar", "agr_curation_api-runner.jar"] +CMD ["java", "-Xms30g", "-Xmx30g", "-jar", "agr_curation_api-runner.jar"] diff --git a/README.md b/README.md index ed009814f..670bf24dc 100644 --- a/README.md +++ b/README.md @@ -28,6 +28,7 @@ These instructions will get you a copy of the project and the API up and running * [Additional deployment steps](#additional-deployment-steps) * [Release versioning](#release-versioning) * [Release Creation](#release-creation) + * [Database](#database) - [Loading Data](#loading-data) - [Submitting Data](#submitting-data) @@ -443,27 +444,41 @@ after merging into the respective branch to trigger deployment and to ensure the new version of the application can function in a consistent state upon and after deployment. 1. Compare the environment variables set in the Elastic Beanstalk environment between the environment you want to deploy to and from (e.g. compare curation-beta to curation-alpha for deployment to beta, or curation-production to curation-beta for deployment to production). This can be done through the [EB console](https://console.aws.amazon.com/elasticbeanstalk/home?region=us-east-1#/application/overview?applicationName=curation-app), or by using the `eb printenv` CLI. Scan for relevant change: - * New variables should be added to the environment to be deployed to, **before** initiating the deployment - * ENV-specific value changes should be ignored (for example, datasource host will be different for each) - * Other variable value changes should be propagated as appropriate, **before** initiating the deployment - * Removed variables should be cleaned up **after** successfull deployment -2. Connect to the Environment's Elastic search domain by entering its domain endpoint in Cerebro, and delete all indexes. - The domain endpoint URL can be found through the [Amazon OpenSearch console](https://console.aws.amazon.com/esv3/home?region=us-east-1#opensearch/domains), the cerebro UI is available on the application server through HTTP at port 9000. -3. When wanting to deploy a prerelease to the beta environment, reset the beta postgres DB and roll down the latest production DB backup - (see the [agr_db_backups README](https://github.com/alliance-genome/agr_db_backups#manual-invocation)). + * New variables should be added to the environment to be deployed to, **before** initiating the deployment + * ENV-specific value changes should be ignored (for example, datasource host will be different for each) + * Other variable value changes should be propagated as appropriate, **before** initiating the deployment + * Removed variables should be cleaned up **after** successfull deployment +2. Connect to the Environment's search domain and delete all indexes. A link to the Cerebro view into each environment's search indexes is available in the curation interface under `Other Links` > `Elastic Search UI` (VPN connection required). + Alternatively, you can reach this UI manually by browsing to the [AGR Cerebro interface](http://cerebro.alliancegenome.org:9000) and entering the environment's domain endpoint manually. The domain endpoint URL can be found through the [Amazon OpenSearch console](https://us-east-1.console.aws.amazon.com/aos/home?region=us-east-1#opensearch/domains). +3. When wanting to deploy a prerelease to the beta environment, reset the beta postgres DB and roll down the latest production DB backup. This must be done to catch any potentially problems that could be caused by new data available only on the production environment, before the code causing it would get deployed to the production environment. - To ensure users or loads can not write to the database while it is being reloaded, stop the (beta) application before - initiating the DB roll-down and restart it once the DB roll-down completed. - ```bash - > eb ssh curation-beta -e 'ssh -i $AGR_SSH_PEM_FILE' #Connect to relevant application server - > cd /var/app/current - > sudo docker-compose stop #Stop the application - #Trigger DB roll-down locally and wait for completion - > sudo docker-compose start #(Re)start the application - ``` + The restore function automatically prevents users from writing to the database while it is being reloaded, + by temporarily making the target database read-only and restoring in a separated database before renaming. + 1. For instructions on how to trigger the restore, see the [agr_db_backups README](https://github.com/alliance-genome/agr_db_backups#manual-invocation) + 2. After the restore completed, restart the beta environment app-server to re-apply all flyway migrations + that were not yet present in the restored (production) database. + ```bash + > aws elasticbeanstalk restart-app-server --environment-name curation-beta + ``` + 3. Check the logs for errors after app-server restart, which could indicate a DB restore failure + and troubleshoot accordingly if necessary to fix any errors. 4. Tag and create the release in git and gitHub, as described in the [Release creation](#release-creation) section. -5. Check the logs for the environment that you're releasing too and ensure that all migrations complete successfully. +5. Check the logs for the environment that you're releasing to and ensure the application started successfully + and that all flyway migrations completed successfully. If errors occured, troubleshoot and fix accordingly. + * If the application failed to start due to incompatible indexes (the indexes were not deleted before deployment), + delete the indexes and restart the app-server by running + ```bash + > aws elasticbeanstalk restart-app-server --environment-name curation- + ``` + * If any of the migrations failed, try restarting the application once more, + or create a new release to fix the failing migration if the second restart did not resolve the issue: + 1. Create a new `release/v*` branch from the beta or production branch as appropriate. + * Update to the next release candidate (`a` in `vx.y.z-rca`) for fixes to beta, using the beta branch as starting commit + * Update to the next patch release (`z` in `vx.y.z`) for fixes to production, using the production branch as starting commit + 2. Fix the required migration files + 3. Create a PR to merge the new `release/v*` branch back into beta/production + 4. repeat the deployment process for this new release (candidate) from the [additional-deployment-steps](#additional-deployment-steps) step 5 (release creation). 6. Reindex all data types by calling the `system/reindexeverything` endpoint with default arguments (found in the System Endpoints section in the swagger UI) and follow-up through the log server to check for progress and errors. 7. Once reindexing completed, look at the dashboard page (where you deployed to) @@ -525,6 +540,35 @@ Once published, github actions kicks in and the release will get deployed to the Completion of these deployments is reported in the #a-team-code slack channel. After receiving a successful deployment notification, continue the remaining steps described in the [additional deployment steps section](#additional-deployment-steps). +### Database +The Curation application connects to the postgres DB using a user called `curation_app`, which is member of a role called `curation_admins`. +This role and user are used to ensure the database can be closed for all but admin users on initiating a DB restore, +such that no accidential writes can happen to the postgres database during the restore process to ensure data integrity. +When restoring to or creating a new postgres server, ensure the `curation_admins` role exists and has a member user called `curation_app`, + to ensure the database can be restored with the correct ownerships, and give the `curation_admins` role all permissions to + the curation database, the `public` schema, and all tables and sequences in it, and change the default privileges to allow + the curation_admins role all permissions on all newly created tables and sequences in the `public` schema. + +This can be achieved by connecting to the curation database using the admin (postgres) user (using `psql`) +and executing the following queries: +```sql +-- Create the role +CREATE ROLE curation_admins; + +-- Create user (change the password) +CREATE USER curation_app WITH PASSWORD '...'; +-- Grant role to user +GRANT curation_admins TO curation_app; + +-- Grant required privileges to group (role) +GRANT ALL ON DATABASE curation TO GROUP curation_admins; +GRANT ALL ON SCHEMA public TO GROUP curation_admins; +GRANT ALL ON ALL TABLES IN SCHEMA public TO GROUP curation_admins; +GRANT ALL ON ALL SEQUENCES IN SCHEMA public TO GROUP curation_admins; +ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON TABLES TO GROUP curation_admins; +ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON SEQUENCES TO GROUP curation_admins; +``` + ## Loading Data Assuming the API is running on localhost, check the Swagger interface for the correct endpoint to post data to. Here is an example of loading the Gene BGI file from the Alliance. diff --git a/RELEASE-NOTES.md b/RELEASE-NOTES.md index 5afb71cf1..b594daccf 100644 --- a/RELEASE-NOTES.md +++ b/RELEASE-NOTES.md @@ -2,6 +2,21 @@ https://agr-jira.atlassian.net/wiki/spaces/ATEAM/overview +## v0.21.0 + * New features + * Duplicate Disease Annotation popup (SCRUM-2911) + * Updated display of ontology terms in Tree View (SCRUM-3021) + * Improved display of notes popups (SCRUM-3005) + * Switch to single consensus set of defining properties for disease annotations (SCRUM-3008) + * Loaded Vertebrate Trait (VT) Ontology (SCRUM-2453) + * Loaded Ascomycete Phenotype Ontology (APO) (SCRUM-2447) + * Loaded Measurement Method Ontology (MMO) (SCRUM-1206) + * Added Data Provider column to Alleles table (SCRUM-3006) + * Added Data Provider column to Genes and AGMs tables (SCRUM-3007) + * Prevented loading of duplicate notes (SCRUM-2828) + * Fixes and maintenance + * Allele indexing optimizations (SCRUM-2982) + ## v0.20.1 * Fixes * Temporary endpoint to reset data provider on all disease annotations (SCRUM-3037) diff --git a/docker/run_os_cluster b/docker/run_os_cluster new file mode 100755 index 000000000..3585305db --- /dev/null +++ b/docker/run_os_cluster @@ -0,0 +1,23 @@ +docker rm -f os01 os02 os03 os04 + +NODES=os01,os02,os03,os04,os05,os06,os07,os08,os09,os10,os11,os12,os13,os14,os15,os16 + +docker run -it --rm --net curation -p 9200:9200 -p 9300:9300 -e plugins.security.disabled=true -e cluster.initial_master_nodes=os00,os01,os02,os03,os04,os05,os06,os07,os08,os09,os10,os11,os12,os13,os14,os15 -e discovery.seed_hosts=os00,os01,os02,os03,os04,os05,os06,os07,os08,os09,os10,os11,os12,os13,os14,os15 -e node.name=os00 --name os00 opensearchproject/opensearch:1.2.4 +docker run -it --rm --net curation -p 9201:9200 -p 9301:9300 -e plugins.security.disabled=true -e cluster.initial_master_nodes=os00,os01,os02,os03,os04,os05,os06,os07,os08,os09,os10,os11,os12,os13,os14,os15 -e discovery.seed_hosts=os00,os01,os02,os03,os04,os05,os06,os07,os08,os09,os10,os11,os12,os13,os14,os15 -e node.name=os01 --name os01 opensearchproject/opensearch:1.2.4 +docker run -it --rm --net curation -p 9202:9200 -p 9302:9300 -e plugins.security.disabled=true -e cluster.initial_master_nodes=os00,os01,os02,os03,os04,os05,os06,os07,os08,os09,os10,os11,os12,os13,os14,os15 -e discovery.seed_hosts=os00,os01,os02,os03,os04,os05,os06,os07,os08,os09,os10,os11,os12,os13,os14,os15 -e node.name=os02 --name os02 opensearchproject/opensearch:1.2.4 +docker run -it --rm --net curation -p 9203:9200 -p 9303:9300 -e plugins.security.disabled=true -e cluster.initial_master_nodes=os00,os01,os02,os03,os04,os05,os06,os07,os08,os09,os10,os11,os12,os13,os14,os15 -e discovery.seed_hosts=os00,os01,os02,os03,os04,os05,os06,os07,os08,os09,os10,os11,os12,os13,os14,os15 -e node.name=os03 --name os03 opensearchproject/opensearch:1.2.4 +docker run -it --rm --net curation -p 9204:9200 -p 9304:9300 -e plugins.security.disabled=true -e cluster.initial_master_nodes=os00,os01,os02,os03,os04,os05,os06,os07,os08,os09,os10,os11,os12,os13,os14,os15 -e discovery.seed_hosts=os00,os01,os02,os03,os04,os05,os06,os07,os08,os09,os10,os11,os12,os13,os14,os15 -e node.name=os04 --name os04 opensearchproject/opensearch:1.2.4 +docker run -it --rm --net curation -p 9205:9200 -p 9305:9300 -e plugins.security.disabled=true -e cluster.initial_master_nodes=os00,os01,os02,os03,os04,os05,os06,os07,os08,os09,os10,os11,os12,os13,os14,os15 -e discovery.seed_hosts=os00,os01,os02,os03,os04,os05,os06,os07,os08,os09,os10,os11,os12,os13,os14,os15 -e node.name=os05 --name os05 opensearchproject/opensearch:1.2.4 +docker run -it --rm --net curation -p 9206:9200 -p 9306:9300 -e plugins.security.disabled=true -e cluster.initial_master_nodes=os00,os01,os02,os03,os04,os05,os06,os07,os08,os09,os10,os11,os12,os13,os14,os15 -e discovery.seed_hosts=os00,os01,os02,os03,os04,os05,os06,os07,os08,os09,os10,os11,os12,os13,os14,os15 -e node.name=os06 --name os06 opensearchproject/opensearch:1.2.4 +docker run -it --rm --net curation -p 9207:9200 -p 9307:9300 -e plugins.security.disabled=true -e cluster.initial_master_nodes=os00,os01,os02,os03,os04,os05,os06,os07,os08,os09,os10,os11,os12,os13,os14,os15 -e discovery.seed_hosts=os00,os01,os02,os03,os04,os05,os06,os07,os08,os09,os10,os11,os12,os13,os14,os15 -e node.name=os07 --name os07 opensearchproject/opensearch:1.2.4 +docker run -it --rm --net curation -p 9208:9200 -p 9308:9300 -e plugins.security.disabled=true -e cluster.initial_master_nodes=os00,os01,os02,os03,os04,os05,os06,os07,os08,os09,os10,os11,os12,os13,os14,os15 -e discovery.seed_hosts=os00,os01,os02,os03,os04,os05,os06,os07,os08,os09,os10,os11,os12,os13,os14,os15 -e node.name=os08 --name os08 opensearchproject/opensearch:1.2.4 +docker run -it --rm --net curation -p 9209:9200 -p 9309:9300 -e plugins.security.disabled=true -e cluster.initial_master_nodes=os00,os01,os02,os03,os04,os05,os06,os07,os08,os09,os10,os11,os12,os13,os14,os15 -e discovery.seed_hosts=os00,os01,os02,os03,os04,os05,os06,os07,os08,os09,os10,os11,os12,os13,os14,os15 -e node.name=os09 --name os09 opensearchproject/opensearch:1.2.4 +docker run -it --rm --net curation -p 9210:9200 -p 9310:9300 -e plugins.security.disabled=true -e cluster.initial_master_nodes=os00,os01,os02,os03,os04,os05,os06,os07,os08,os09,os10,os11,os12,os13,os14,os15 -e discovery.seed_hosts=os00,os01,os02,os03,os04,os05,os06,os07,os08,os09,os10,os11,os12,os13,os14,os15 -e node.name=os10 --name os10 opensearchproject/opensearch:1.2.4 +docker run -it --rm --net curation -p 9211:9200 -p 9311:9300 -e plugins.security.disabled=true -e cluster.initial_master_nodes=os00,os01,os02,os03,os04,os05,os06,os07,os08,os09,os10,os11,os12,os13,os14,os15 -e discovery.seed_hosts=os00,os01,os02,os03,os04,os05,os06,os07,os08,os09,os10,os11,os12,os13,os14,os15 -e node.name=os11 --name os11 opensearchproject/opensearch:1.2.4 +docker run -it --rm --net curation -p 9212:9200 -p 9312:9300 -e plugins.security.disabled=true -e cluster.initial_master_nodes=os00,os01,os02,os03,os04,os05,os06,os07,os08,os09,os10,os11,os12,os13,os14,os15 -e discovery.seed_hosts=os00,os01,os02,os03,os04,os05,os06,os07,os08,os09,os10,os11,os12,os13,os14,os15 -e node.name=os12 --name os12 opensearchproject/opensearch:1.2.4 +docker run -it --rm --net curation -p 9213:9200 -p 9313:9300 -e plugins.security.disabled=true -e cluster.initial_master_nodes=os00,os01,os02,os03,os04,os05,os06,os07,os08,os09,os10,os11,os12,os13,os14,os15 -e discovery.seed_hosts=os00,os01,os02,os03,os04,os05,os06,os07,os08,os09,os10,os11,os12,os13,os14,os15 -e node.name=os13 --name os13 opensearchproject/opensearch:1.2.4 +docker run -it --rm --net curation -p 9214:9200 -p 9314:9300 -e plugins.security.disabled=true -e cluster.initial_master_nodes=os00,os01,os02,os03,os04,os05,os06,os07,os08,os09,os10,os11,os12,os13,os14,os15 -e discovery.seed_hosts=os00,os01,os02,os03,os04,os05,os06,os07,os08,os09,os10,os11,os12,os13,os14,os15 -e node.name=os14 --name os14 opensearchproject/opensearch:1.2.4 +docker run -it --rm --net curation -p 9215:9200 -p 9315:9300 -e plugins.security.disabled=true -e cluster.initial_master_nodes=os00,os01,os02,os03,os04,os05,os06,os07,os08,os09,os10,os11,os12,os13,os14,os15 -e discovery.seed_hosts=os00,os01,os02,os03,os04,os05,os06,os07,os08,os09,os10,os11,os12,os13,os14,os15 -e node.name=os15 --name os15 opensearchproject/opensearch:1.2.4 + +#docker run -d --net curation -p 9200:9200 -p 9600:9600 -e "discovery.type=single-node" -e "DISABLE_SECURITY_PLUGIN=true" --name opensearch opensearchproject/opensearch:1.2.4 +#docker run --rm -p 8080:8080 --net curation -e QUARKUS_HIBERNATE_SEARCH_ORM_ELASTICSEARCH_HOSTS=os00:9200,os01:9200,os02:9200,os03:9200,os04:9200,os05:9200,os06:9200,os07:9200,os08:9200,os09:9200,os10:9200,os11:9200,os12:9200,os13:9200,os14:9200,os15:9200 -e OKTA_AUTHENTICATION=false -it 100225593120.dkr.ecr.us-east-1.amazonaws.com/agr_curation:v0.20.0-rc4-138-g134cba0d diff --git a/pom.xml b/pom.xml index 9bc165a88..a1c303fd6 100644 --- a/pom.xml +++ b/pom.xml @@ -358,6 +358,12 @@ reflections 0.10.2 + + + org.jsoup + jsoup + 1.13.1 + diff --git a/src/main/cliapp/Makefile b/src/main/cliapp/Makefile index 1f9dcd360..1d315d0ea 100644 --- a/src/main/cliapp/Makefile +++ b/src/main/cliapp/Makefile @@ -6,6 +6,9 @@ all: run: npm start +runalpha: + export API_URL=https://alpha-curation.alliancegenome.org; make run; unset API_URL + build: npm run-script build diff --git a/src/main/cliapp/package.json b/src/main/cliapp/package.json index a4389c782..28ebe9955 100644 --- a/src/main/cliapp/package.json +++ b/src/main/cliapp/package.json @@ -9,6 +9,7 @@ }, "license": "MIT", "dependencies": { + "@babel/plugin-proposal-private-property-in-object": "^7.21.11", "@fullcalendar/core": "^5.7.2", "@fullcalendar/daygrid": "^5.7.2", "@fullcalendar/interaction": "^5.7.2", @@ -61,6 +62,7 @@ "not op_mini all" ], "devDependencies": { + "@babel/plugin-proposal-private-property-in-object": "^7.21.11", "msw": "^0.48.1" } } diff --git a/src/main/cliapp/src/App.scss b/src/main/cliapp/src/App.scss index 60f4236f6..112ebeb1f 100644 --- a/src/main/cliapp/src/App.scss +++ b/src/main/cliapp/src/App.scss @@ -4,4 +4,4 @@ .p-datatable-scrollable .p-datatable-thead{ z-index : 2 -} +} \ No newline at end of file diff --git a/src/main/cliapp/src/assets/layout/sass/_content.scss b/src/main/cliapp/src/assets/layout/sass/_content.scss index 2c40e1da8..c60707fce 100644 --- a/src/main/cliapp/src/assets/layout/sass/_content.scss +++ b/src/main/cliapp/src/assets/layout/sass/_content.scss @@ -10,3 +10,19 @@ .layout-main { flex: 1 1 auto; } + +.wrap-word { + overflow-wrap : break-word; +} + +.max-w-25rem { + max-width : 25rem; +} + +.max-w-35rem { + max-width : 35rem; +} + +.max-w-40rem { + max-width : 40rem; +} \ No newline at end of file diff --git a/src/main/cliapp/src/components/DuplicationAction.js b/src/main/cliapp/src/components/DuplicationAction.js new file mode 100644 index 000000000..ec952b97d --- /dev/null +++ b/src/main/cliapp/src/components/DuplicationAction.js @@ -0,0 +1,4 @@ +import { Button } from "primereact/button"; +export const DuplicationAction = ({ props, handleDuplication, disabled }) => { + return + + ); + } + }; + + const databaseStatusEditor = (props) => { + if (props?.rowData?.alleleDatabaseStatus?.databaseStatus) { + return ( + <> +
+ +
+ + + ) + } else { + return ( + <> +
+ +
+ + + ) + } + }; + + const handleDatabaseStatusOpen = (event, rowData, isInEdit) => { + let _databaseStatusData = {}; + _databaseStatusData["originalDatabaseStatuses"] = [rowData.alleleDatabaseStatus]; + _databaseStatusData["dialog"] = true; + _databaseStatusData["isInEdit"] = isInEdit; + setDatabaseStatusData(() => ({ + ..._databaseStatusData + })); + }; + + const handleDatabaseStatusOpenInEdit = (event, rowProps, isInEdit) => { + const { rows } = rowProps.props; + const { rowIndex } = rowProps; + const index = rowIndex % rows; + let _databaseStatusData = {}; + _databaseStatusData["originalDatabaseStatuses"] = [rowProps.rowData.alleleDatabaseStatus]; + _databaseStatusData["dialog"] = true; + _databaseStatusData["isInEdit"] = isInEdit; + _databaseStatusData["rowIndex"] = index; + _databaseStatusData["mainRowProps"] = rowProps; + setDatabaseStatusData(() => ({ + ..._databaseStatusData + })); + }; + const mutationTypesTemplate = (rowData) => { if (rowData?.alleleMutationTypes) { const mutationTypeSet = new Set(); @@ -1135,6 +1220,14 @@ export const AllelesTable = () => { sortable: isEnabled, filterConfig: FILTER_CONFIGS.alleleGermlineTransmissionStatusFilterConfig, }, + { + field: "alleleDatabaseStatus.databaseStatus.name", + header: "Database Status", + body: databaseStatusTemplate, + editor: (props) => databaseStatusEditor(props), + sortable: isEnabled, + filterConfig: FILTER_CONFIGS.alleleDatabaseStatusFilterConfig, + }, { field: "references.curie", header: "References", @@ -1174,6 +1267,12 @@ export const AllelesTable = () => { filterConfig: FILTER_CONFIGS.relatedNotesFilterConfig, editor: relatedNotesEditor }, + { + field: "dataProvider.sourceOrganization.abbreviation", + header: "Data Provider", + sortable: isEnabled, + filterConfig: FILTER_CONFIGS.alleleDataProviderFilterConfig, + }, { field: "updatedBy.uniqueId", header: "Updated By", @@ -1302,6 +1401,12 @@ export const AllelesTable = () => { errorMessagesMainRow={errorMessages} setErrorMessagesMainRow={setErrorMessages} /> + { + const { originalDatabaseStatuses, isInEdit, dialog, rowIndex, mainRowProps } = originalDatabaseStatusData; + const [localDatabaseStatuses, setLocalDatabaseStatuses] = useState(null) ; + const [editingRows, setEditingRows] = useState({}); + const [errorMessages, setErrorMessages] = useState([]); + const booleanTerms = useControlledVocabularyService('generic_boolean_terms'); + const validationService = new ValidationService(); + const tableRef = useRef(null); + const rowsEdited = useRef(0); + const toast_topright = useRef(null); + + const databaseStatusTerms = useControlledVocabularyService('Allele database status vocabulary'); + + const showDialogHandler = () => { + let _localDatabaseStatuses = cloneDatabaseStatuses(originalDatabaseStatuses); + setLocalDatabaseStatuses(_localDatabaseStatuses); + + if(isInEdit){ + let rowsObject = {}; + if(_localDatabaseStatuses) { + _localDatabaseStatuses.forEach((ds) => { + rowsObject[`${ds.dataKey}`] = true; + }); + } + setEditingRows(rowsObject); + }else{ + setEditingRows({}); + } + rowsEdited.current = 0; + }; + + const onRowEditChange = (e) => { + setEditingRows(e.data); + } + + const onRowEditCancel = (event) => { + let _editingRows = { ...editingRows }; + delete _editingRows[event.index]; + setEditingRows(_editingRows); + let _localDatabaseStatuses = [...localDatabaseStatuses];//add new note support + if(originalDatabaseStatuses && originalDatabaseStatuses[event.index]){ + let dataKey = _localDatabaseStatuses[event.index].dataKey; + _localDatabaseStatuses[event.index] = global.structuredClone(originalDatabaseStatuses[event.index]); + _localDatabaseStatuses[event.index].dataKey = dataKey; + setLocalDatabaseStatuses(_localDatabaseStatuses); + } + const errorMessagesCopy = errorMessages; + errorMessagesCopy[event.index] = {}; + setErrorMessages(errorMessagesCopy); + compareChangesInDatabaseStatuses(event.data,event.index); + }; + + const compareChangesInDatabaseStatuses = (data, index) => { + if(originalDatabaseStatuses && originalDatabaseStatuses[index]) { + if (data.internal !== originalDatabaseStatuses[index].internal) { + rowsEdited.current++; + } + if ((originalDatabaseStatuses[index].evidence && !data.evidence) || + (!originalDatabaseStatuses[index].evidence && data.evidence) || + (data.evidence && (data.evidence.length !== originalDatabaseStatuses[index].evidence.length)) + ) { + rowsEdited.current++; + } else { + if (data.evidence) { + for (var i = 0; i < data.evidence.length; i++) { + if (data.evidence[i].curie !== originalDatabaseStatuses[index].evidence[i].curie) { + rowsEdited.current++; + } + } + } + } + if ((originalDatabaseStatuses[index].databaseStatus && !data.databaseStatus) || + (!originalDatabaseStatuses[index].databaseStatus && data.databaseStatus) || + (originalDatabaseStatuses[index].databaseStatus && (originalDatabaseStatuses[index].databaseStatus.name !== data.databaseStatus.name)) + ) { + rowsEdited.current++; + } + } + + if (localDatabaseStatuses.length > originalDatabaseStatuses?.length || !originalDatabaseStatuses[0]) { + rowsEdited.current++; + } + }; + + const onRowEditSave = async(event) => { + const result = await validateDatabaseStatus(localDatabaseStatuses[event.index]); + const errorMessagesCopy = [...errorMessages]; + errorMessagesCopy[event.index] = {}; + let _editingRows = { ...editingRows }; + if (result.isError) { + let reported = false; + Object.keys(result.data).forEach((field) => { + let messageObject = { + severity: "error", + message: result.data[field] + }; + errorMessagesCopy[event.index][field] = messageObject; + if(!reported) { + toast_topright.current.show([ + { life: 7000, severity: 'error', summary: 'Update error: ', + detail: 'Could not update AlleleDatabaseStatus [' + localDatabaseStatuses[event.index].id + ']', sticky: false } + ]); + reported = true; + } + }); + } else { + delete _editingRows[event.index]; + compareChangesInDatabaseStatuses(event.data, event.index); + } + setErrorMessages(errorMessagesCopy); + let _localDatabaseStatuses = [...localDatabaseStatuses]; + _localDatabaseStatuses[event.index] = event.data; + setEditingRows(_editingRows); + setLocalDatabaseStatuses(_localDatabaseStatuses); + }; + + const hideDialog = () => { + setErrorMessages([]); + setOriginalDatabaseStatusData((originalDatabaseStatusData) => { + return { + ...originalDatabaseStatusData, + dialog: false, + }; + }); + let _localDatabaseStatuses = []; + setLocalDatabaseStatuses(_localDatabaseStatuses); + }; + + const validateDatabaseStatus = async (gts) => { + let _gts = global.structuredClone(gts); + delete _gts.dataKey; + const result = await validationService.validate('alleledatabasestatusslotannotation', _gts); + return result; + }; + + const cloneDatabaseStatuses = (clonableDatabaseStatuses) => { + let _clonableDatabaseStatuses = []; + if (clonableDatabaseStatuses?.length > 0 && clonableDatabaseStatuses[0]) { + _clonableDatabaseStatuses = global.structuredClone(clonableDatabaseStatuses); + if(_clonableDatabaseStatuses) { + let counter = 0 ; + _clonableDatabaseStatuses.forEach((gts) => { + gts.dataKey = counter++; + }); + } + } + return _clonableDatabaseStatuses; + }; + + const saveDataHandler = () => { + setErrorMessages([]); + for (const name of localDatabaseStatuses) { + delete name.dataKey; + } + mainRowProps.rowData.alleleDatabaseStatus = localDatabaseStatuses[0] ? localDatabaseStatuses[0] : null; + let updatedAnnotations = [...mainRowProps.props.value]; + updatedAnnotations[rowIndex].alleleDatabaseStatus = localDatabaseStatuses[0] ? localDatabaseStatuses[0] : null; + + const errorMessagesCopy = global.structuredClone(errorMessagesMainRow); + let messageObject = { + severity: "warn", + message: "Pending Edits!" + }; + errorMessagesCopy[rowIndex] = {}; + errorMessagesCopy[rowIndex]["alleleDatabaseStatus"] = messageObject; + setErrorMessagesMainRow({...errorMessagesCopy}); + + setOriginalDatabaseStatusData((originalDatabaseStatusData) => { + return { + ...originalDatabaseStatusData, + dialog: false, + } + } + ); + }; + + const onDatabaseStatusEditorValueChange = (props, event) => { + let _localDatabaseStatuses = [...localDatabaseStatuses]; + _localDatabaseStatuses[props.rowIndex].databaseStatus = event.value; + }; + + const databaseStatusEditor = (props) => { + return ( + <> + + + + ); + }; + + const databaseStatusTemplate = (rowData) => { + return {rowData.databaseStatus?.name}; + }; + + const internalTemplate = (rowData) => { + return {JSON.stringify(rowData.internal)}; + }; + + const internalEditor = (props) => { + return ( + <> + + + + ); + }; + + const onInternalEditorValueChange = (props, event) => { + let _localDatabaseStatuses = [...localDatabaseStatuses]; + _localDatabaseStatuses[props.rowIndex].internal = event.value.name; + } + + const footerTemplate = () => { + if (!isInEdit) { + return null; + }; + return ( +
+
+ ); + } + + const createNewDatabaseStatusHandler = (event) => { + let cnt = localDatabaseStatuses ? localDatabaseStatuses.length : 0; + const _localDatabaseStatuses = global.structuredClone(localDatabaseStatuses); + _localDatabaseStatuses.push({ + dataKey : cnt, + internal : false + }); + let _editingRows = { ...editingRows, ...{ [`${cnt}`]: true } }; + setEditingRows(_editingRows); + setLocalDatabaseStatuses(_localDatabaseStatuses); + }; + + const handleDeleteDatabaseStatus = (event, props) => { + let _localDatabaseStatuses = global.structuredClone(localDatabaseStatuses); + if(props.dataKey){ + _localDatabaseStatuses.splice(props.dataKey, 1); + }else { + _localDatabaseStatuses.splice(props.rowIndex, 1); + } + setLocalDatabaseStatuses(_localDatabaseStatuses); + rowsEdited.current++; + } + + const deleteAction = (props) => { + return ( +