From 048dff612492350517449ccdbbe51ab887e37396 Mon Sep 17 00:00:00 2001 From: Paul Murdoch Date: Tue, 19 Sep 2023 13:43:41 -0700 Subject: [PATCH 001/126] working version of existing services --- helm/portal/defaults/gitops.json | 553 +++++++++++++------------- helm/portal/templates/deployment.yaml | 4 +- values.yaml | 79 ++++ 3 files changed, 354 insertions(+), 282 deletions(-) create mode 100644 values.yaml diff --git a/helm/portal/defaults/gitops.json b/helm/portal/defaults/gitops.json index 2f2486e26..af1a3051c 100644 --- a/helm/portal/defaults/gitops.json +++ b/helm/portal/defaults/gitops.json @@ -1,351 +1,344 @@ { - "subcommons": [ - { - "URL": "https://tb.diseasedatahub.org/", - "name": "TB" - }, - { - "URL": "https://aids.diseasedatahub.org/", - "name": "AIDS" - }, - { - "URL": "https://flu.diseasedatahub.org/", - "name": "FLU" - }, - { - "URL": "https://microbiome.diseasedatahub.org/", - "name": "Microbiome" - } - ], - "gaTrackingId": "UA-119127212-1", + "gaTrackingId": "undefined", "graphql": { "boardCounts": [ + { + "graphql": "_person_count", + "name": "Person", + "plural": "Persons" + }, { "graphql": "_subject_count", "name": "Subject", "plural": "Subjects" - }, - { - "graphql": "_study_count", - "name": "Study", - "plural": "Studies" - }, - { - "graphql": "_summary_lab_result_count", - "name": "Lab record", - "plural": "Lab records" } ], "chartCounts": [ { - "graphql": "_subject_count", - "name": "Subject" + "graphql": "_person_count", + "name": "Person" }, { - "graphql": "_study_count", - "name": "Study" + "graphql": "_subject_count", + "name": "Subject" } ], "projectDetails": "boardCounts" }, "components": { - "appName": "Gen3 Disease Data Hub", + "appName": "Pediatric Cancer Data Commons Portal", "index": { "introduction": { - "heading": "Gen3 Disease Data Hub Datasets", - "text": "The Gen3 Disease Data Hub hosts data related to infectious diseases and aims to make data findable, accessible, interoperable, and reusable (FAIR).", - "link": "/datasets" + "heading": "Pediatric Cancer Data Commons", + "text": "The Pediatric Cancer Data Commons supports the management, analysis and sharing of data for the research community.", + "link": "/submission" }, "buttons": [ { - "name": "TB Environment", - "icon": "data-explore", - "body": "Explore TB data.", - "external_link": "https://tb.diseasedatahub.org" + "name": "Define Data Field", + "icon": "data-field-define", + "body": "The Pediatric Cancer Data Commons defines the data. Please study the dictionary before you start browsing.", + "link": "/DD", + "label": "Learn more" }, { - "name": "AIDS Environment", + "name": "Explore Data", "icon": "data-explore", - "body": "Explore AIDS data.", - "external_link": "https://aids.diseasedatahub.org" - }, - { - "name": "Flu Environment", - "icon": "data-explore", - "body": "Explore influenza data.", - "external_link": "https://flu.diseasedatahub.org" + "body": "The Exploration Page gives you insights and a clear overview under selected factors.", + "link": "/explorer", + "label": "Explore data" }, { - "name": "Microbiome Environment", - "icon": "data-explore", - "body": "Explore data from a collection of open-access microbiome-related studies.", - "external_link": "https://microbiome.diseasedatahub.org" + "name": "Access Data", + "icon": "data-access", + "body": "Use our selected tool to filter out the data you need.", + "link": "/query", + "label": "Query data" } ] }, "navigation": { "items": [ { - "icon": "query", - "link": "/datasets", + "icon": "dictionary", + "link": "/DD", "color": "#a2a2a2", - "name": "Dataset Browser" + "name": "Dictionary" }, { "icon": "exploration", "link": "/explorer", "color": "#a2a2a2", - "name": "Eco Explorer" + "name": "Exploration" + }, + { + "icon": "query", + "link": "/query", + "color": "#a2a2a2", + "name": "Query" + }, + { + "icon": "profile", + "link": "/identity", + "color": "#a2a2a2", + "name": "Profile" } ] }, "topBar": { "items": [ { - "link": "https://gen3.org/resources/user/", - "name": "Documentation" + "icon": "external-link", + "leftOrientation": true, + "link": "https://commons.cri.uchicago.edu/pcdc/", + "name": "About PCDC" + }, + { + "icon": "external-link", + "leftOrientation": true, + "link": "https://commons.cri.uchicago.edu/sponsors/", + "name": "Our Sponsors" + }, + { + "icon": "upload", + "link": "/submission", + "name": "Data Submission" } ] }, "login": { - "title": "Gen3 Disease Data Hub", - "subTitle": "Cross Environment Datasets", - "text": "The website combines open access datasets from multiple disciplines to create clean, easy to navigate visualizations for data-driven discovery within the fields of allergy and infectious diseases.", + "title": "Pediatric Cancer Data Commons", + "subTitle": "Connect. Share. Cure.", + "text": "The Pediatric Cancer Data Commons (PCDC) harnesses pediatric cancer clinical data from around the globe into a single combined platform, connecting the data to other sources and making it available to clinicians and researchers everywhere. Headquartered at the University of Chicago, the PCDC team works with international leaders in pediatric cancers to develop and apply uniform data standards that facilitate the collection, combination, and analysis of data from many different sources. The PCDC currently houses the world's largest sets of clinical data for pediatric neuroblastoma and soft tissue sarcoma and is in the process of onboarding additional pediatric cancer disease groups.", "contact": "If you have any questions about access or the registration process, please contact ", - "email": "support@datacommons.io" + "email": "pcdc_root@lists.uchicago.edu" }, "footerLogos": [ { - "src": "/custom/sponsors/gitops-sponsors/gen3.png", + "src": "/src/img/gen3.png", "href": "https://ctds.uchicago.edu/gen3", - "alt": "Gen3 Data Commons" + "alt": "Gen3 Data Commons", + "height": 40 }, { - "src": "/src/img/createdby.png", - "href": "https://ctds.uchicago.edu/", - "alt": "Center for Translational Data Science at the University of Chicago" + "src": "/src/img/uchicago.png", + "href": "https://www.uchicago.edu/", + "alt": "The University of Chicago", + "height": 40 } ] }, - "requiredCerts": [], - "featureFlags": { - "explorer": true, - "analysis": true - }, - "datasetBrowserConfig": { - "filterSections": [ - { - "title": "Supported Data Resources", - "options": [ - { "text": "TB", "filterType": "singleSelect"}, - { "text": "AIDS", "filterType": "singleSelect"}, - { "text": "Flu", "filterType": "singleSelect"}, - { "text": "Microbiome", "filterType": "singleSelect"} - ] + "explorerConfig": [ + { + "id": 1, + "label": "data", + "charts": { + "sex": { + "chartType": "bar", + "title": "Sex" + }, + "race": { + "chartType": "bar", + "title": "Race" + }, + "ethnicity": { + "chartType": "bar", + "title": "Ethnicity" + } }, - { - "title": "Research Focus", - "options": [ - { "text": "AIDS", "filterType": "singleSelect"}, - { "text": "TB", "filterType": "singleSelect"}, - { "text": "Flu", "filterType": "singleSelect"}, - { "text": "Immune Response", "filterType": "singleSelect"}, - { "text": "Immune Phenotype", "filterType": "singleSelect"}, - { "text": "Allergy", "filterType": "singleSelect"}, - { "text": "Atopy", "filterType": "singleSelect"}, - { "text": "Infection Response", "filterType": "singleSelect"}, - { "text": "Vaccine Response", "filterType": "singleSelect"}, - { "text": "Transplantation", "filterType": "singleSelect"}, - { "text": "Oncology", "filterType": "singleSelect"}, - { "text": "Autoimmune", "filterType": "singleSelect"}, - { "text": "Preterm Birth", "filterType": "singleSelect"} + "filters": { + "anchor": { + "field": "disease_phase", + "options": ["Initial Diagnosis", "Relapse"], + "tabs": ["Disease", "Molecular"] + }, + "tabs": [ + { + "title": "Subject", + "fields": [ + "consortium", + "sex", + "race", + "ethnicity", + "survival_characteristics.lkss", + "survival_characteristics.age_at_lkss" + ] + }, + { + "title": "Disease", + "fields": [ + "histologies.histology", + "tumor_assessments.age_at_tumor_assessment", + "tumor_assessments.tumor_classification", + "tumor_assessments.tumor_site", + "tumor_assessments.longest_diam_dim1", + "tumor_assessments.invasiveness", + "tumor_assessments.nodal_clinical", + "tumor_assessments.nodal_pathology", + "tumor_assessments.parameningeal_extension", + "stagings.irs_group", + "stagings.tnm_finding" + ] + }, + { + "title": "Molecular", + "fields": [ + "molecular_analysis.anaplasia", + "molecular_analysis.anaplasia_extent", + "molecular_analysis.molecular_abnormality", + "molecular_analysis.molecular_abnormality_result", + "molecular_analysis.gene1", + "molecular_analysis.gene2" + ] + } ] - } - ], - "fieldMapping" : [ - { "field": "link", "name": "View" }, - { "field": "dataset_name", "name": "Study" }, - { "field": "supported_data_resource", "name": "Supported Data Resource" }, - { "field": "research_focus", "name": "Research Focus" }, - { "field": "description", "name": "Description of Dataset" } - ], - "filterConfig": { - "tabs": [{ - "title": "Filters", - "fields": ["supported_data_resource", "research_focus"] - }] - } - }, - "dataExplorerConfig": { - "charts": { - "project_id": { - "chartType": "count", - "title": "Projects" - }, - "subject_id": { - "chartType": "count", - "title": "Subjects" - }, - "dataset": { - "chartType": "pie", - "title": "Resources", - "chartRow": 0 }, - "data_format": { - "chartType": "bar", - "title": "Data Format", - "chartRow": 0 - }, - "data_type": { - "chartType": "pie", - "title": "Data Type", - "chartRow": 0 - }, - "experimental_strategies": { - "chartType": "bar", - "title": "Experimental Strategies", - "chartRow": 0 - }, - "species": { - "chartType": "bar", - "title": "Genus species", - "chartRow": 0 - }, - "gender": { - "chartType": "pie", - "title": "Gender", - "chartRow": 1 - }, - "race": { - "chartType": "pie", - "title": "Race", - "chartRow": 1 - }, - "ethnicity": { - "chartType": "pie", - "title": "Ethnicity", - "chartRow": 1 - }, - "biospecimen_anatomic_site": { - "chartType": "pie", - "title": "Biospecimen Anatomic Site", - "chartRow": 1 - } - }, - "fieldMapping" : [ - { "field": "dataset", "name": "Resource" }, - { "field": "studyAccession", "name": "Study" }, - { "field": "phenotype", "name": "Phenotype" }, - { "field": "gender", "name": "Gender" }, - { "field": "ethnicity", "name": "Ethnicity" }, - { "field": "strain", "name": "Strain" }, - { "field": "species", "name": "Genus species" }, - { "field": "submitter_id", "name": "Submitter ID" }, - { "field": "race", "name": "Race" }, - { "field": "hiv_status", "name": "HIV Status" }, - { "field": "study_submitter_id", "name": "Study"}, - { "field": "frstdthd", "name": "Year of Death" }, - { "field": "arthxbase", "name": "ART Use Prior to Baseline"}, - { "field": "bshbvstat", "name": "Baseline HBV Sero-status"}, - { "field": "bshcvstat", "name": "Baseline HCV Sero-status"}, - { "field": "cd4nadir", "name": "CD4 Nadir Prior to HAART"}, - { "field": "status", "name": "Summarized HIV Sero-status"}, - {"field": "project_id", "name": "Project ID"}, - {"field": "frstcncrd", "name": "First Confirmed Cancer Year"}, - {"field": "frstdmd", "name": "First Visit Year with Diabetes"}, - {"field": "frstdmmd", "name": "First Visit Year with All Necessary Components to Determine Diabetes"}, - {"field": "frsthtnd", "name": "First Visit Year with Hypertension"}, - {"field": "frsthtnmd", "name": "First Visit Year with All Necessary Components to Determine Hypertension"}, - {"field": "fcd4lowd", "name": "First Year Seen CD4N < 200 or CD4% < 14"}, - {"field": "fposdate", "name": "First Year Seen Seropositive"}, - {"field": "frstaidd", "name": "First Reported AIDS Year"}, - {"field": "lastafrd", "name": "Last Reported AIDS Free Year"}, - {"field": "lastcond", "name": "Year of Last Study Visit Attended"}, - {"field": "lastcontact", "name": "Last Year of Contact"}, - {"field": "lcd4higd", "name": "Last Year Seen with CD4N >= 200 and CD4% >= 14"}, - {"field": "lnegdate", "name": "Last Year Seen Seronegative"}, - {"field": "amikacin_res_phenotype", "name": "Amikacin Phenotype" }, - {"field": "capreomycin_res_phenotype", "name": "Capreomycin Phenotype" }, - {"field": "isoniazid_res_phenotype", "name": "Isoniazid Phenotype" }, - {"field": "kanamycin_res_phenotype", "name": "Kanamycin Phenotype" }, - {"field": "ofloxacin_res_phenotype", "name": "Ofloxacin Phenotype" }, - {"field": "pyrazinamide_res_phenotype", "name": "Pyrazinamide Phenotype" }, - {"field": "rifampicin_res_phenotype", "name": "Rifampicin Phenotype" }, - {"field": "rifampin_res_phenotype", "name": "Rifampin Phenotype" }, - {"field": "streptomycin_res_phenotype", "name": "streptomycin Phenotype" } - ], - "filterConfig": { - "tabs": [{ - "title": "Resource", - "fields": ["dataset", "data_format", "data_type"] - }, - { - "title": "Subject", - "fields": ["ethnicity", "gender", "species", "race"] - }, - { - "title": "Diagnosis", + "buttons": [ + { + "enabled": true, + "type": "data", + "title": "Download Data", + "leftIcon": "user", + "rightIcon": "download", + "fileName": "data.json", + "tooltipText": "You can only download data accessible to you" + } + ], + "table": { + "enabled": true, "fields": [ - "arthxbase", - "bshbvstat", - "bshcvstat", - "cd4nadir", - "status", - "hiv_status" + "external_references.external_links", + "consortium", + "data_contributor_id", + "subject_submitter_id", + "sex", + "race", + "ethnicity", + "survival_characteristics.lkss", + "survival_characteristics.age_at_lkss" ] }, - { - "title": "Comorbidity", - "fields": [ - "frstcncrd", - "frstdmd", - "frstdmmd", - "frsthtnd", - "frsthtnmd" - ] - }, { - "title": "HIV History", - "fields": [ - "cd4nadir", - "fcd4lowd", - "fposdate", - "frstaidd", - "lastafrd", - "lastcond", - "lastcontact", - "lcd4higd", - "lnegdate", - "status" - ] + "patientIds": { + "filter": false, + "export": false }, - { - "title": "Drug Resistance", - "fields": [ - "amikacin_res_phenotype", - "capreomycin_res_phenotype", - "isoniazid_res_phenotype", - "kanamycin_res_phenotype", - "ofloxacin_res_phenotype", - "pyrazinamide_res_phenotype", - "rifampicin_res_phenotype", - "rifampin_res_phenotype", - "streptomycin_res_phenotype" - ] + "survivalAnalysis": { + "result": { + "pval": false, + "risktable": false, + "survival": false + } }, - { - "title": "Experiment", - "fields": [ - "experimental_strategies", - "virus_type", - "virus_subtype", - "analyte_type", - "biospecimen_anatomic_site", - "cell_line", - "sample_type", - "composition", - "strain" + "guppyConfig": { + "dataType": "subject", + "nodeCountTitle": "Subjects", + "fieldMapping": [ + { + "field": "survival_characteristics.lkss", + "name": "Last Known Survival Status (LKSS)" + }, + { + "field": "survival_characteristics.age_at_lkss", + "name": "Age at LKSS" + }, + { + "field": "external_references.external_resource_name", + "name": "External Resource Name" + }, + { + "field": "tumor_assessments.age_at_tumor_assessment", + "name": "Age at Tumor Assessment" + }, + { + "field": "tumor_assessments.tumor_classification", + "name": "Tumor Classification" + }, + { + "field": "tumor_assessments.tumor_site", + "name": "Tumor Site" + }, + { + "field": "tumor_assessments.tumor_size", + "name": "Tumor Size" + }, + { + "field": "tumor_assessments.longest_diam_dim1", + "name": "Longest Diam Dim 1" + }, + { + "field": "tumor_assessments.invasiveness", + "name": "Invasiveness" + }, + { + "field": "tumor_assessments.nodal_clinical", + "name": "Nodal Clinical" + }, + { + "field": "tumor_assessments.nodal_pathology", + "name": "Nodal Pathology" + }, + { + "field": "tumor_assessments.parameningeal_extension", + "name": "Parameningeal Extension" + }, + { + "field": "histologies.histology", + "name": "Histology" + }, + { + "field": "histologies.histology_grade", + "name": "Histology Grade" + }, + { + "field": "histologies.histology_inpc", + "name": "Histology Inpc" + }, + { + "field": "molecular_analysis.anaplasia", + "name": "Anaplasia" + }, + { + "field": "molecular_analysis.anaplasia_extent", + "name": "Anaplasia Extent" + }, + { + "field": "molecular_analysis.molecular_abnormality", + "name": "Molecular Abnormality" + }, + { + "field": "molecular_analysis.molecular_abnormality_result", + "name": "Molecular Abnormality Result" + }, + { + "field": "molecular_analysis.gene1", + "name": "Gene 1" + }, + { + "field": "molecular_analysis.gene2", + "name": "Gene 2" + }, + { + "field": "project_id", + "name": "Data Release Version" + }, + { + "field": "data_contributor_id", + "name": "Data Contributor" + }, + { + "field": "stagings.irs_group", + "name": "IRS Group" + }, + { + "field": "stagings.tnm_finding", + "name": "TNM Finding" + } ] - }] + }, + "dataRequests": { + "enabled": true + }, + "getAccessButtonLink": "https://pcdc-gen3-docs.s3.amazonaws.com/%5BDRAFT%5D+PCDC-request_form.docx" } - } + ] } diff --git a/helm/portal/templates/deployment.yaml b/helm/portal/templates/deployment.yaml index 395fbcf54..8b775001e 100644 --- a/helm/portal/templates/deployment.yaml +++ b/helm/portal/templates/deployment.yaml @@ -74,7 +74,7 @@ spec: {{- include "common.datadogEnvVar" . | nindent 12 }} {{- end }} - name: HOSTNAME - value: revproxy-service + value: portal-dev.pedscommons.org # disable npm 7's brand new update notifier to prevent Portal from stuck at starting up # see https://github.com/npm/cli/issues/3163 - name: NPM_CONFIG_UPDATE_NOTIFIER @@ -172,7 +172,7 @@ spec: # - name: BASENAME volumeMounts: - name: "config-volume" - mountPath: "/data-portal/data/config/gitops.json" + mountPath: "/data-portal/data/config/pcdc.json" subPath: "gitops.json" - name: "config-volume" mountPath: "/data-portal/custom/logo/gitops-logo.png" diff --git a/values.yaml b/values.yaml new file mode 100644 index 000000000..4d8cbb333 --- /dev/null +++ b/values.yaml @@ -0,0 +1,79 @@ +global: + dev: true + hostname: localhost + portalApp: pcdc + dictionaryUrl: https://pcdc-dev-dictionaries.s3.amazonaws.com/pcdc-schema-dev-20230912.json + #revproxyArn: arn:aws:acm:us-east-1:009732147623:certificate/8f00318a-90dd-4601-9059-244274cedd08 +fence: + FENCE_CONFIG: + MOCK_GOOGLE_AUTH: true + OPENID_CONNECT: + google: + mock_default_user: 'test@example.com' + image: + repository: quay.io/pcdc/fence + tag: 1.12.2 + +arborist: + image: + repository: quay.io/pcdc/arborist + tag: 2023.08 + +peregrine: + image: + repository: quay.io/pcdc/peregrine + tag: "helm-test" + +sheepdog: + image: + repository: quay.io/pcdc/sheepdog + tag: "helm-test" + + +aws-es-proxy: +#currently turn off in base image + enabled: true + image: + repository: abutaha/aws-es-proxy + tag: 0.8 + + +portal: + image: + repository: quay.io/pcdc/windmill + tag: "helm-test" + pullPolicy: Always + resources: + requests: + cpu: 0.2 + memory: 500Mi + gitops: + json: "" + +audit: + # -- (bool) Whether to deploy the audit subchart. + enabled: false + +argo-wrapper: + # -- (bool) Whether to deploy the argo-wrapper subchart. + enabled: false + +metadata: + # -- (bool) Whether to deploy the metadata subchart. + enabled: false + +pidgin: + # -- (bool) Whether to deploy the pidgin subchart. + enabled: false + +# guppy: +# enabled: true +# image: +# repository: quay.io/pcdc/guppy +# tag: 1.5.0 + + +# fluentd: +# image: +# repository: fluent/fluentd-kubernetes-daemonset +# tag: v1.15.3-debian-cloudwatch-1.0 \ No newline at end of file From 09a9c2e0f9c528ce996f954f5eb5a70ac3e9fc2d Mon Sep 17 00:00:00 2001 From: Paul Murdoch Date: Tue, 19 Sep 2023 16:12:34 -0700 Subject: [PATCH 002/126] updated values.yaml --- values.yaml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/values.yaml b/values.yaml index 4d8cbb333..382c91800 100644 --- a/values.yaml +++ b/values.yaml @@ -3,7 +3,6 @@ global: hostname: localhost portalApp: pcdc dictionaryUrl: https://pcdc-dev-dictionaries.s3.amazonaws.com/pcdc-schema-dev-20230912.json - #revproxyArn: arn:aws:acm:us-east-1:009732147623:certificate/8f00318a-90dd-4601-9059-244274cedd08 fence: FENCE_CONFIG: MOCK_GOOGLE_AUTH: true @@ -36,12 +35,17 @@ aws-es-proxy: image: repository: abutaha/aws-es-proxy tag: 0.8 - + +# guppy: +# enabled: true +# image: +# repository: quay.io/pcdc/guppy +# tag: 1.5.0 portal: image: repository: quay.io/pcdc/windmill - tag: "helm-test" + tag: 1.19.0 pullPolicy: Always resources: requests: @@ -66,11 +70,7 @@ pidgin: # -- (bool) Whether to deploy the pidgin subchart. enabled: false -# guppy: -# enabled: true -# image: -# repository: quay.io/pcdc/guppy -# tag: 1.5.0 + # fluentd: From 1c3d6539fa7a36e7da41b14ab99ba345a8600493 Mon Sep 17 00:00:00 2001 From: Paul Murdoch Date: Wed, 20 Sep 2023 11:39:31 -0700 Subject: [PATCH 003/126] clean up versioning still workign copy --- helm/gen3/Chart.yaml | 3 +- helm/gen3/values.yaml | 3 + values.yaml | 141 +++++++++++++++++++++++++++++++----------- 3 files changed, 110 insertions(+), 37 deletions(-) diff --git a/helm/gen3/Chart.yaml b/helm/gen3/Chart.yaml index 1d6023277..1b5d5469e 100644 --- a/helm/gen3/Chart.yaml +++ b/helm/gen3/Chart.yaml @@ -88,11 +88,10 @@ dependencies: repository: "file://../wts" condition: wts.enabled - - name: elasticsearch version: "0.1.5" repository: "file://../elasticsearch" - condition: global.dev + condition: elasticsearch.enabled - name: postgresql version: 11.9.13 repository: "https://charts.bitnami.com/bitnami" diff --git a/helm/gen3/values.yaml b/helm/gen3/values.yaml index 004691124..4502d672a 100644 --- a/helm/gen3/values.yaml +++ b/helm/gen3/values.yaml @@ -106,6 +106,9 @@ aws-es-proxy: # -- (str) AWS secret access key for aws-es-proxy awsSecretAccessKey: "" +elasticsearch: + enabled: false + fence: # -- (bool) Whether to deploy the fence subchart. enabled: true diff --git a/values.yaml b/values.yaml index 382c91800..a32346a69 100644 --- a/values.yaml +++ b/values.yaml @@ -3,6 +3,35 @@ global: hostname: localhost portalApp: pcdc dictionaryUrl: https://pcdc-dev-dictionaries.s3.amazonaws.com/pcdc-schema-dev-20230912.json + +arborist: + image: + repository: quay.io/pcdc/arborist + tag: 2023.08 + +ambassador: + # -- (bool) Whether to deploy the ambassador subchart. + enabled: false + +argo-wrapper: + # -- (bool) Whether to deploy the argo-wrapper subchart. + enabled: false + + +audit: + # -- (bool) Whether to deploy the audit subchart. + enabled: false + +aws-es-proxy: + enabled: true + image: + repository: abutaha/aws-es-proxy + tag: 0.8 + +#modify gen3 chart so that elasticsearch automaticcally starts +elasticsearch: + enabled: false + fence: FENCE_CONFIG: MOCK_GOOGLE_AUTH: true @@ -13,34 +42,26 @@ fence: repository: quay.io/pcdc/fence tag: 1.12.2 -arborist: - image: - repository: quay.io/pcdc/arborist - tag: 2023.08 +hatchery: + enabled: false -peregrine: +manifestservice: image: - repository: quay.io/pcdc/peregrine - tag: "helm-test" + repository: quay.io/cdis/manifestservice + tag: 2023.08 -sheepdog: - image: - repository: quay.io/pcdc/sheepdog - tag: "helm-test" +metadata: + # -- (bool) Whether to deploy the metadata subchart. + enabled: false +pidgin: + # -- (bool) Whether to deploy the pidgin subchart. + enabled: false -aws-es-proxy: -#currently turn off in base image - enabled: true +peregrine: image: - repository: abutaha/aws-es-proxy - tag: 0.8 - -# guppy: -# enabled: true -# image: -# repository: quay.io/pcdc/guppy -# tag: 1.5.0 + repository: quay.io/pcdc/peregrine + tag: "helm-test" portal: image: @@ -54,26 +75,76 @@ portal: gitops: json: "" -audit: - # -- (bool) Whether to deploy the audit subchart. - enabled: false +sheepdog: + image: + repository: quay.io/pcdc/sheepdog + tag: "helm-test" -argo-wrapper: - # -- (bool) Whether to deploy the argo-wrapper subchart. - enabled: false +sower: + image: + repository: quay.io/cdis/sower + tag: 2023.08 -metadata: - # -- (bool) Whether to deploy the metadata subchart. - enabled: false +wts: + image: + repository: quay.io/cdis/workspace-token-service + tag: 2023.08 -pidgin: - # -- (bool) Whether to deploy the pidgin subchart. - enabled: false + + +guppy: + enabled: true + image: + repository: quay.io/pcdc/guppy + tag: 1.5.0 + # -- (int) Only relevant if tireAccessLevel is set to "regular". + # The minimum amount of files unauthorized users can filter down to + tierAccessLimit: 1000 + + secrets: + # -- (string) AWS access key. + awsAccessKeyId: "test_key" + # -- (string) AWS secret access key. + awsSecretAccessKey: "test_secrect_key" + # -- (list) Elasticsearch index configurations + indices: + - index: dev_case + type: case + - index: dev_file + type: file + + # -- (string) The Elasticsearch configuration index + configIndex: dev_case-array-config + # -- (string) The field used for access control and authorization filters + authFilterField: auth_resource_path + # -- (bool) Whether or not to enable encryption for specified fields + enableEncryptWhitelist: true + # -- (string) A comma-separated list of fields to encrypt + encryptWhitelist: test1 + + + + +# pcdcanalysistools: +# image: +# repository: quay.io/pcdc/pcdcanalysistools +# tag: 1.8.4 # fluentd: # image: # repository: fluent/fluentd-kubernetes-daemonset -# tag: v1.15.3-debian-cloudwatch-1.0 \ No newline at end of file +# tag: v1.15.3-debian-cloudwatch-1.0 + +# amanuensis: +# image: +# repository: quay.io/pcdc/amanuensis +# tag: pcdc_dev_2023-09-06T16_36_49-05_00 + + +# revproxy: +# image: +# repository: quay.io/cdis/nginx +# tag: 1.17.6-ctds-1.0.1 From 51ee3b49a31bb1e81adc0be0cfc6aeeb0d98ca3c Mon Sep 17 00:00:00 2001 From: Paul Murdoch Date: Thu, 21 Sep 2023 17:14:28 -0700 Subject: [PATCH 004/126] pcdcanalysistools runs in a cluster but needs to be added to nginx --- helm/gen3/Chart.yaml | 4 + helm/gen3/values.yaml | 2 + helm/pcdcanalysistools/.helmignore | 23 +++ helm/pcdcanalysistools/Chart.yaml | 30 +++ .../pcdcanalysistools-secret/confighelper.py | 54 +++++ .../pcdcanalysistools-secret/settings.py | 128 ++++++++++++ helm/pcdcanalysistools/templates/NOTES.txt | 1 + helm/pcdcanalysistools/templates/_helpers.tpl | 62 ++++++ .../templates/deployment.yaml | 184 ++++++++++++++++++ helm/pcdcanalysistools/templates/hpa.yaml | 32 +++ .../pcdcanalysistools/templates/jwt-keys.yaml | 7 + .../templates/pcdcanalysistools-creds.yaml | 8 + .../templates/pcdcanalysistools-secret.yaml | 7 + helm/pcdcanalysistools/templates/service.yaml | 15 ++ .../templates/serviceaccount.yaml | 12 ++ .../templates/tests/test-connection.yaml | 15 ++ helm/pcdcanalysistools/values.yaml | 134 +++++++++++++ 17 files changed, 718 insertions(+) create mode 100644 helm/pcdcanalysistools/.helmignore create mode 100644 helm/pcdcanalysistools/Chart.yaml create mode 100644 helm/pcdcanalysistools/pcdcanalysistools-secret/confighelper.py create mode 100644 helm/pcdcanalysistools/pcdcanalysistools-secret/settings.py create mode 100644 helm/pcdcanalysistools/templates/NOTES.txt create mode 100644 helm/pcdcanalysistools/templates/_helpers.tpl create mode 100644 helm/pcdcanalysistools/templates/deployment.yaml create mode 100644 helm/pcdcanalysistools/templates/hpa.yaml create mode 100644 helm/pcdcanalysistools/templates/jwt-keys.yaml create mode 100644 helm/pcdcanalysistools/templates/pcdcanalysistools-creds.yaml create mode 100644 helm/pcdcanalysistools/templates/pcdcanalysistools-secret.yaml create mode 100644 helm/pcdcanalysistools/templates/service.yaml create mode 100644 helm/pcdcanalysistools/templates/serviceaccount.yaml create mode 100644 helm/pcdcanalysistools/templates/tests/test-connection.yaml create mode 100644 helm/pcdcanalysistools/values.yaml diff --git a/helm/gen3/Chart.yaml b/helm/gen3/Chart.yaml index 1b5d5469e..c85cbc638 100644 --- a/helm/gen3/Chart.yaml +++ b/helm/gen3/Chart.yaml @@ -87,6 +87,10 @@ dependencies: version: "0.1.10" repository: "file://../wts" condition: wts.enabled +- name: pcdcanalysistools + version: "0.1.0" + repository: "file://../pcdcanalysistools" + condition: pcdcanalysistools.enabled - name: elasticsearch version: "0.1.5" diff --git a/helm/gen3/values.yaml b/helm/gen3/values.yaml index 4502d672a..74ffc96c6 100644 --- a/helm/gen3/values.yaml +++ b/helm/gen3/values.yaml @@ -364,6 +364,8 @@ ssjdispatcher: # -- (string) Overrides the image tag whose default is the chart appVersion. tag: +pcdcanalysistools: + enabled: true wts: # -- (bool) Whether to deploy the wts subchart. diff --git a/helm/pcdcanalysistools/.helmignore b/helm/pcdcanalysistools/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/helm/pcdcanalysistools/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/helm/pcdcanalysistools/Chart.yaml b/helm/pcdcanalysistools/Chart.yaml new file mode 100644 index 000000000..be33cae3e --- /dev/null +++ b/helm/pcdcanalysistools/Chart.yaml @@ -0,0 +1,30 @@ +apiVersion: v2 +name: pcdcanalysistools +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" + + +dependencies: +- name: common + version: 0.1.7 + repository: file://../common \ No newline at end of file diff --git a/helm/pcdcanalysistools/pcdcanalysistools-secret/confighelper.py b/helm/pcdcanalysistools/pcdcanalysistools-secret/confighelper.py new file mode 100644 index 000000000..ad7b8d697 --- /dev/null +++ b/helm/pcdcanalysistools/pcdcanalysistools-secret/confighelper.py @@ -0,0 +1,54 @@ +""" +Originally copied from `cloud-automation/apis_configs/config_helper.py` +(renamed `confighelper.py` so it isn't overwritten by the file that cloud-automation +still mounts for backwards compatibility). + +TODO: once everyone has this independent version of PcdcAnalysisTools, remove `wsgi.py` and +`config_helper.py` here: +https://github.com/uc-cdis/cloud-automation/blob/afb750d/kube/services/PcdcAnalysisTools/PcdcAnalysisTools-deploy.yaml#L166-L177 +""" + +import json +import os + +# +# make it easy to change this for testing +XDG_DATA_HOME = os.getenv("XDG_DATA_HOME", "/usr/share/") + + +def default_search_folders(app_name): + """ + Return the list of folders to search for configuration files + """ + return [ + "%s/cdis/%s" % (XDG_DATA_HOME, app_name), + "/usr/share/cdis/%s" % app_name, + "%s/gen3/%s" % (XDG_DATA_HOME, app_name), + "/usr/share/gen3/%s" % app_name, + "/var/www/%s" % app_name, + "/etc/gen3/%s" % app_name, + ] + + +def find_paths(file_name, app_name, search_folders=None): + """ + Search the given folders for file_name + search_folders defaults to default_search_folders if not specified + return the first path to file_name found + """ + search_folders = search_folders or default_search_folders(app_name) + possible_files = [os.path.join(folder, file_name) for folder in search_folders] + return [path for path in possible_files if os.path.exists(path)] + + +def load_json(file_name, app_name, search_folders=None): + """ + json.load(file_name) after finding file_name in search_folders + + return the loaded json data or None if file not found + """ + actual_files = find_paths(file_name, app_name, search_folders) + if not actual_files: + return None + with open(actual_files[0], "r") as reader: + return json.load(reader) diff --git a/helm/pcdcanalysistools/pcdcanalysistools-secret/settings.py b/helm/pcdcanalysistools/pcdcanalysistools-secret/settings.py new file mode 100644 index 000000000..743d8e02a --- /dev/null +++ b/helm/pcdcanalysistools/pcdcanalysistools-secret/settings.py @@ -0,0 +1,128 @@ +from PcdcAnalysisTools.api import app, app_init +from os import environ +#import confighelper +from pcdcutils.environment import is_env_enabled + +APP_NAME='PcdcAnalysisTools' +# def load_json(file_name): +# return confighelper.load_json(file_name, APP_NAME) + + +# conf_data = load_json("creds.json") +config = app.config + +config['SERVICE_NAME'] = 'pcdcanalysistools' +config['PRIVATE_KEY_PATH'] = "/var/www/PcdcAnalysisTools/jwt_private_key.pem" + +config["AUTH"] = "https://auth.service.consul:5000/v3/" +config["AUTH_ADMIN_CREDS"] = None +config["INTERNAL_AUTH"] = None + +# ARBORIST deprecated, replaced by ARBORIST_URL +# ARBORIST_URL is initialized in app_init() directly +config["ARBORIST"] = "http://arborist-service/" + +# Signpost: deprecated, replaced by index client. +# config["SIGNPOST"] = { +# "host": environ.get('INDEX_CLIENT_HOST') or "http://indexd-service", +# "version": "v0", +# "auth": ("gdcapi", environ.get( "PGHOST")), +# } +config["INDEX_CLIENT"] = { + "host": environ.get('INDEX_CLIENT_HOST') or "http://indexd-service", + "version": "v0", + "auth": ("gdcapi", environ.get( "PGHOST")), +} +#config["FAKE_AUTH"] = False +config["PSQLGRAPH"] = { + 'host': environ.get( "PGHOST"), + 'user': environ.get( "PGUSER"), + 'password': environ.get( "PGPASSWORD"), + 'database': environ.get( "PGDB"), +} + +config["HMAC_ENCRYPTION_KEY"] = environ.get( "HMAC_ENCRYPTION_KEY") +config["FLASK_SECRET_KEY"] = environ.get( "FLASK_SECRET_KEY") +fence_username = environ.get( "FENCE_DB_USER") +fence_password = environ.get( "FENCE_DB_PASS") +fence_host = environ.get( "FENCE_DB_HOST") +fence_database = environ.get( "FENCE_DB_DBNAME") +config['PSQL_USER_DB_CONNECTION'] = 'postgresql://%s:%s@%s:5432/%s' % (fence_username, fence_password, fence_host, fence_database) + +hostname = environ.get("CONF_HOSTNAME") +config['OIDC_ISSUER'] = 'https://%s/user' % hostname + +config["OAUTH2"] = { + "client_id": 'conf_data.get("oauth2_client_id", "{{oauth2_client_id}}")', + "client_secret": 'conf_data.get("oauth2_client_secret", "{{oauth2_client_secret}}")', + "api_base_url": "https://%s/user/" % hostname, + "authorize_url": "https://%s/user/oauth2/authorize" % hostname, + "access_token_url": "https://%s/user/oauth2/token" % hostname, + "refresh_token_url": "https://%s/user/oauth2/token" % hostname, + "client_kwargs": { + "redirect_uri": "https://%s/api/v0/oauth2/authorize" % hostname, + "scope": "openid data user", + }, + # deprecated key values, should be removed after all commons use new oidc + "internal_oauth_provider": "http://fence-service/oauth2/", + "oauth_provider": "https://%s/user/oauth2/" % hostname, + "redirect_uri": "https://%s/api/v0/oauth2/authorize" % hostname, +} + +# trailing slash intentionally omitted +config['GUPPY_API'] = 'http://guppy-service' + +# config['USER_API'] = 'http://fence-service/' +config["USER_API"] = config["OIDC_ISSUER"] # for use by authutils +# use the USER_API URL instead of the public issuer URL to accquire JWT keys +config["FORCE_ISSUER"] = True + +if environ.get('DICTIONARY_URL'): + config['DICTIONARY_URL'] = environ.get('DICTIONARY_URL') +else: + config['PATH_TO_SCHEMA_DIR'] = environ.get('PATH_TO_SCHEMA_DIR') + + +config['SURVIVAL'] = { + 'consortium': ["INSTRuCT", "INRG"], + 'excluded_variables': [ + { + 'label': 'Data Contributor', + 'field': 'data_contributor_id', + }, + { + 'label': 'Study', + 'field': 'studies.study_id', + }, + { + 'label': 'Treatment Arm', + 'field': 'studies.treatment_arm', + } + ], + 'result': { + 'risktable': True, + 'survival': True + } +} + +config['EXTERNAL'] = { + 'commons': [ + { + 'label': 'Genomic Data Commons', + 'value': 'gdc' + }, + { + 'label': 'Gabriella Miller Kids First', + 'value': 'gmkf' + } + ], + "commons_dict": { + "gdc": "TARGET - GDC", + "gmkf": "GMKF" + } +} + + +app_init(app) +application = app +application.debug = (is_env_enabled('GEN3_DEBUG')) diff --git a/helm/pcdcanalysistools/templates/NOTES.txt b/helm/pcdcanalysistools/templates/NOTES.txt new file mode 100644 index 000000000..70b82c54c --- /dev/null +++ b/helm/pcdcanalysistools/templates/NOTES.txt @@ -0,0 +1 @@ +{{ .Chart.Name }} has been deployed \ No newline at end of file diff --git a/helm/pcdcanalysistools/templates/_helpers.tpl b/helm/pcdcanalysistools/templates/_helpers.tpl new file mode 100644 index 000000000..e2d7ec287 --- /dev/null +++ b/helm/pcdcanalysistools/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "pcdcanalysistools.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "pcdcanalysistools.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "pcdcanalysistools.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "pcdcanalysistools.labels" -}} +helm.sh/chart: {{ include "pcdcanalysistools.chart" . }} +{{ include "pcdcanalysistools.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "pcdcanalysistools.selectorLabels" -}} +app.kubernetes.io/name: {{ include "pcdcanalysistools.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "pcdcanalysistools.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "pcdcanalysistools.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/helm/pcdcanalysistools/templates/deployment.yaml b/helm/pcdcanalysistools/templates/deployment.yaml new file mode 100644 index 000000000..1a7be4941 --- /dev/null +++ b/helm/pcdcanalysistools/templates/deployment.yaml @@ -0,0 +1,184 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pcdcanalysistools-deployment + labels: + {{- include "pcdcanalysistools.labels" . | nindent 4 }} + {{- if .Values.global.ddEnabled }} + {{- include "common.datadogLabels" . | nindent 4 }} + {{- end }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "pcdcanalysistools.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "pcdcanalysistools.selectorLabels" . | nindent 8 }} + {{- if .Values.global.ddEnabled }} + {{- include "common.datadogLabels" . | nindent 8 }} + {{- end }} + spec: + volumes: + - name: config-volume + secret: + secretName: "pcdcanalysistools-secret" + - name: pcdcanalysistools-jwt-keys + secret: + secretName: "pcdcanalysistools-jwt-keys" + # - name: config-helper + # secret: + # secretName: "pcdcanalysistools-secret" + # - name: creds-volume + # secret: + # secretName: "sheepdog-creds" + + + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "pcdcanalysistools.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: CONF_HOSTNAME + value: {{ .Values.global.hostname }} + - name: FENCE_DB_USER + valueFrom: + secretKeyRef: + name: fence-dbcreds + key: username + optional: false + - name: FENCE_DB_PASS + valueFrom: + secretKeyRef: + name: fence-dbcreds + key: password + optional: false + - name: FENCE_DB_HOST + valueFrom: + secretKeyRef: + name: fence-dbcreds + key: host + optional: false + - name: FENCE_DB_DBNAME + valueFrom: + secretKeyRef: + name: fence-dbcreds + key: database + optional: false + - name: DICTIONARY_URL + valueFrom: + configMapKeyRef: + name: manifest-global + key: dictionary_url + - name: SIGNPOST_HOST + valueFrom: + configMapKeyRef: + name: manifest-global + key: indexd_url + optional: true + - name: PGPASSWORD + valueFrom: + secretKeyRef: + name: sheepdog-dbcreds + key: password + optional: false + - name: PGDB + valueFrom: + secretKeyRef: + name: sheepdog-dbcreds + key: database + optional: false + - name: PGUSER + valueFrom: + secretKeyRef: + name: sheepdog-dbcreds + key: username + optional: false + - name: PGHOST + valueFrom: + secretKeyRef: + name: sheepdog-dbcreds + key: host + optional: false + - name: INDEX_CLIENT_HOST + valueFrom: + configMapKeyRef: + name: manifest-global + key: indexd_url + optional: true + - name: FENCE_URL + valueFrom: + configMapKeyRef: + name: manifest-global + key: fence_url + optional: true + - name: ARBORIST_URL + value: http://arborist-service + - name: AUTH_NAMESPACE + value: default + - name: REQUESTS_CA_BUNDLE + value: /etc/ssl/certs/ca-certificates.crt + - name: GEN3_DEBUG + value: "False" + volumeMounts: + - name: "config-volume" + readOnly: true + mountPath: "/var/www/PcdcAnalysisTools/wsgi.py" + subPath: "settings.py" + - name: "pcdcanalysistools-jwt-keys" + readOnly: true + mountPath: "/var/www/PcdcAnalysisTools/jwt_private_key.pem" + subPath: "jwt_private_key.pem" + # - name: "creds-volume" + # readOnly: true + # mountPath: "/var/www/PcdcAnalysisTools/creds.json" + # subPath: creds.json + # - name: "config-helper" + # readOnly: true + # mountPath: "/var/www/PcdcAnalysisTools/config_helper.py" + # subPath: confighelper.py + ports: + - name: http + containerPort: 80 + protocol: TCP + livenessProbe: + httpGet: + path: /_status?timeout=20 + port: 80 + initialDelaySeconds: 30 + periodSeconds: 60 + timeoutSeconds: 30 + readinessProbe: + httpGet: + path: /_status?timeout=2 + port: http + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} \ No newline at end of file diff --git a/helm/pcdcanalysistools/templates/hpa.yaml b/helm/pcdcanalysistools/templates/hpa.yaml new file mode 100644 index 000000000..ddc5e5225 --- /dev/null +++ b/helm/pcdcanalysistools/templates/hpa.yaml @@ -0,0 +1,32 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "pcdcanalysistools.fullname" . }} + labels: + {{- include "pcdcanalysistools.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "pcdcanalysistools.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/helm/pcdcanalysistools/templates/jwt-keys.yaml b/helm/pcdcanalysistools/templates/jwt-keys.yaml new file mode 100644 index 000000000..779761090 --- /dev/null +++ b/helm/pcdcanalysistools/templates/jwt-keys.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Secret +metadata: + name: pcdcanalysistools-jwt-keys +type: Opaque +data: + jwt_private_key.pem: {{ include "getOrCreatePrivateKey" . }} \ No newline at end of file diff --git a/helm/pcdcanalysistools/templates/pcdcanalysistools-creds.yaml b/helm/pcdcanalysistools/templates/pcdcanalysistools-creds.yaml new file mode 100644 index 000000000..6d9b6a53a --- /dev/null +++ b/helm/pcdcanalysistools/templates/pcdcanalysistools-creds.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: pcdcanalysistools-creds +type: Opaque +stringData: + creds.json: |- + "" \ No newline at end of file diff --git a/helm/pcdcanalysistools/templates/pcdcanalysistools-secret.yaml b/helm/pcdcanalysistools/templates/pcdcanalysistools-secret.yaml new file mode 100644 index 000000000..be51e06c0 --- /dev/null +++ b/helm/pcdcanalysistools/templates/pcdcanalysistools-secret.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Secret +metadata: + name: pcdcanalysistools-secret +type: Opaque +data: +{{ (.Files.Glob "pcdcanalysistools-secret/*").AsSecrets | indent 2 }} \ No newline at end of file diff --git a/helm/pcdcanalysistools/templates/service.yaml b/helm/pcdcanalysistools/templates/service.yaml new file mode 100644 index 000000000..5ba7ae336 --- /dev/null +++ b/helm/pcdcanalysistools/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "pcdcanalysistools.fullname" . }} + labels: + {{- include "pcdcanalysistools.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "pcdcanalysistools.selectorLabels" . | nindent 4 }} diff --git a/helm/pcdcanalysistools/templates/serviceaccount.yaml b/helm/pcdcanalysistools/templates/serviceaccount.yaml new file mode 100644 index 000000000..cf8465e5b --- /dev/null +++ b/helm/pcdcanalysistools/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "pcdcanalysistools.serviceAccountName" . }} + labels: + {{- include "pcdcanalysistools.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/helm/pcdcanalysistools/templates/tests/test-connection.yaml b/helm/pcdcanalysistools/templates/tests/test-connection.yaml new file mode 100644 index 000000000..12364747b --- /dev/null +++ b/helm/pcdcanalysistools/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "pcdcanalysistools.fullname" . }}-test-connection" + labels: + {{- include "pcdcanalysistools.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "pcdcanalysistools.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/helm/pcdcanalysistools/values.yaml b/helm/pcdcanalysistools/values.yaml new file mode 100644 index 000000000..7bd08e26a --- /dev/null +++ b/helm/pcdcanalysistools/values.yaml @@ -0,0 +1,134 @@ +# Default values for pcdcanalysistools. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +global: + # -- (map) AWS configuration + aws: + # -- (bool) Set to true if deploying to AWS. Controls ingress annotations. + enabled: false + # -- (string) Credentials for AWS stuff. + awsAccessKeyId: + # -- (string) Credentials for AWS stuff. + awsSecretAccessKey: + # -- (bool) Whether the deployment is for development purposes. + dev: true + # -- (map) Postgres database configuration. + postgres: + # -- (bool) Whether the database should be created. + dbCreate: true + # -- (map) Master credentials to postgres. This is going to be the default postgres server being used for each service, unless each service specifies their own postgres + master: + # -- (string) hostname of postgres server + host: + # -- (string) username of superuser in postgres. This is used to create or restore databases + username: postgres + # -- (string) password for superuser in postgres. This is used to create or restore databases + password: + # -- (string) Port for Postgres. + port: "5432" + +replicaCount: 1 + +# -- (string) URL for the arborist service +arboristUrl: http://arborist-service +authNamespace: default + +image: + repository: quay.io/pcdc/pcdcanalysistools + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: 1.8.4 + +# -- (list) Docker image pull secrets. +imagePullSecrets: [] + +# -- (string) Override the name of the chart. +nameOverride: "" + +# -- (string) Override the full name of the deployment. +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +# -- (map) Annotations to add to the pod +podAnnotations: {} + +# -- (map) Security context to apply to the pod +podSecurityContext: {} + # fsGroup: 2000 + +# -- (map) Security context to apply to the container +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + # -- (string) Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". + type: ClusterIP + # -- (int) The port number that the service exposes. + port: 80 + + + +resources: + # -- (map) The amount of resources that the container requests + requests: + # -- (string) The amount of CPU requested + cpu: 0.1 + # -- (string) The amount of memory requested + memory: 12Mi + # -- (map) The maximum amount of resources that the container is allowed to use + limits: + # -- (string) The maximum amount of CPU the container can use + cpu: 1.0 + # -- (string) The maximum amount of memory the container can use + memory: 512Mi + +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +volumes: +- name: config-volume + secret: + secretName: "pcdcanalysistools-secret" + +# -- (map) Will completely override the selectorLabels defined in the common chart's _label_setup.tpl +selectorLabels: + + +# ingress: +# enabled: false +# className: "" +# annotations: {} +# # kubernetes.io/ingress.class: nginx +# # kubernetes.io/tls-acme: "true" +# hosts: +# - host: chart-example.local +# paths: +# - path: / +# pathType: ImplementationSpecific +# tls: [] +# # - secretName: chart-example-tls +# # hosts: +# # - chart-example.local \ No newline at end of file From 945e45b519216960510967deda19177885883aa9 Mon Sep 17 00:00:00 2001 From: Paul Murdoch Date: Thu, 28 Sep 2023 10:30:38 -0700 Subject: [PATCH 005/126] added amanuensis and pcdcanalysistools services --- helm/amanuensis/.helmignore | 23 + helm/amanuensis/Chart.yaml | 24 + .../amanuensis_google_app_creds_secret.json | 0 ...manuensis_google_storage_creds_secret.json | 0 .../amanuensis-secret/amanuensis_settings.py | 79 +++ .../amanuensis-secret/config_helper.py | 368 ++++++++++++ helm/amanuensis/logo/logo.svg | 1 + helm/amanuensis/templates/NOTES.txt | 2 + helm/amanuensis/templates/_helpers.tpl | 62 ++ .../templates/amanuensis-config.yaml | 11 + .../templates/amanuensis-creds.yaml | 18 + .../templates/amanuensis-logo-config.yaml | 6 + .../templates/amanuensis-secret.yaml | 23 + helm/amanuensis/templates/db-init.yaml | 6 + helm/amanuensis/templates/deployment.yaml | 89 +++ helm/amanuensis/templates/hpa.yaml | 32 + helm/amanuensis/templates/jwt-keys.yaml | 7 + helm/amanuensis/templates/service.yaml | 15 + helm/amanuensis/templates/serviceaccount.yaml | 12 + .../templates/tests/test-connection.yaml | 15 + helm/amanuensis/values.yaml | 561 ++++++++++++++++++ helm/gen3/Chart.yaml | 4 + helm/gen3/values.yaml | 5 + helm/pcdcanalysistools/templates/service.yaml | 2 +- .../gen3.nginx.conf/amanuensis-service.conf | 24 + .../pcdcanalysistools-service.conf | 26 + values.yaml | 24 +- 27 files changed, 1429 insertions(+), 10 deletions(-) create mode 100644 helm/amanuensis/.helmignore create mode 100644 helm/amanuensis/Chart.yaml create mode 100644 helm/amanuensis/amanuensis-google-creds/amanuensis_google_app_creds_secret.json create mode 100644 helm/amanuensis/amanuensis-google-creds/amanuensis_google_storage_creds_secret.json create mode 100644 helm/amanuensis/amanuensis-secret/amanuensis_settings.py create mode 100644 helm/amanuensis/amanuensis-secret/config_helper.py create mode 100644 helm/amanuensis/logo/logo.svg create mode 100644 helm/amanuensis/templates/NOTES.txt create mode 100644 helm/amanuensis/templates/_helpers.tpl create mode 100644 helm/amanuensis/templates/amanuensis-config.yaml create mode 100644 helm/amanuensis/templates/amanuensis-creds.yaml create mode 100644 helm/amanuensis/templates/amanuensis-logo-config.yaml create mode 100644 helm/amanuensis/templates/amanuensis-secret.yaml create mode 100644 helm/amanuensis/templates/db-init.yaml create mode 100644 helm/amanuensis/templates/deployment.yaml create mode 100644 helm/amanuensis/templates/hpa.yaml create mode 100644 helm/amanuensis/templates/jwt-keys.yaml create mode 100644 helm/amanuensis/templates/service.yaml create mode 100644 helm/amanuensis/templates/serviceaccount.yaml create mode 100644 helm/amanuensis/templates/tests/test-connection.yaml create mode 100644 helm/amanuensis/values.yaml create mode 100644 helm/revproxy/gen3.nginx.conf/amanuensis-service.conf create mode 100644 helm/revproxy/gen3.nginx.conf/pcdcanalysistools-service.conf diff --git a/helm/amanuensis/.helmignore b/helm/amanuensis/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/helm/amanuensis/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/helm/amanuensis/Chart.yaml b/helm/amanuensis/Chart.yaml new file mode 100644 index 000000000..e9d67f8ab --- /dev/null +++ b/helm/amanuensis/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: amanuensis +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/helm/amanuensis/amanuensis-google-creds/amanuensis_google_app_creds_secret.json b/helm/amanuensis/amanuensis-google-creds/amanuensis_google_app_creds_secret.json new file mode 100644 index 000000000..e69de29bb diff --git a/helm/amanuensis/amanuensis-google-creds/amanuensis_google_storage_creds_secret.json b/helm/amanuensis/amanuensis-google-creds/amanuensis_google_storage_creds_secret.json new file mode 100644 index 000000000..e69de29bb diff --git a/helm/amanuensis/amanuensis-secret/amanuensis_settings.py b/helm/amanuensis/amanuensis-secret/amanuensis_settings.py new file mode 100644 index 000000000..8522a7521 --- /dev/null +++ b/helm/amanuensis/amanuensis-secret/amanuensis_settings.py @@ -0,0 +1,79 @@ +import os +import json +from boto.s3.connection import OrdinaryCallingFormat + + +DB = "postgresql://test:test@localhost:5432/amanuensis" + +MOCK_AUTH = False +MOCK_STORAGE = False + +SERVER_NAME = "http://localhost/user" +BASE_URL = SERVER_NAME +APPLICATION_ROOT = "/user" + +ROOT_DIR = "/amanuensis" + +# If using multi-tenant setup, configure this to the base URL for the provider +# amanuensis (i.e. ``BASE_URL`` in the provider amanuensis config). +# OIDC_ISSUER = 'http://localhost:8080/user + + +HMAC_ENCRYPTION_KEY = "" + + + +""" +If the api is behind firewall that need to set http proxy: + HTTP_PROXY = {'host': 'cloud-proxy', 'port': 3128} +""" +HTTP_PROXY = None +STORAGES = ["/cleversafe"] + + + + +SESSION_COOKIE_SECURE = False +ENABLE_CSRF_PROTECTION = True + +INDEXD = "/index" + +INDEXD_AUTH = ("gdcapi", "") + +ARBORIST = "/rbac" + +AWS_CREDENTIALS = { + "CRED1": {"aws_access_key_id": "", "aws_secret_access_key": ""}, + "CRED2": {"aws_access_key_id": "", "aws_secret_access_key": ""}, +} + +ASSUMED_ROLES = {"arn:aws:iam::role1": "CRED1"} + +DATA_UPLOAD_BUCKET = "bucket1" + +S3_BUCKETS = { + "bucket1": {"cred": "CRED1"}, + "bucket2": {"cred": "CRED2"}, + "bucket3": {"cred": "CRED1", "role-arn": "arn:aws:iam::role1"}, +} + + +APP_NAME = "" + + + +# dir_path = "/secrets" +# fence_creds = os.path.join(dir_path, "fence_credentials.json") + + +# SUPPORT_EMAIL_FOR_ERRORS = None +# dbGaP = {} +# if os.path.exists(fence_creds): +# with open(fence_creds, "r") as f: +# data = json.load(f) +# AWS_CREDENTIALS = data["AWS_CREDENTIALS"] +# S3_BUCKETS = data["S3_BUCKETS"] +# OIDC_ISSUER = data["OIDC_ISSUER"] +# APP_NAME = data["APP_NAME"] +# HTTP_PROXY = data["HTTP_PROXY"] +# dbGaP = data["dbGaP"] diff --git a/helm/amanuensis/amanuensis-secret/config_helper.py b/helm/amanuensis/amanuensis-secret/config_helper.py new file mode 100644 index 000000000..081ef64d2 --- /dev/null +++ b/helm/amanuensis/amanuensis-secret/config_helper.py @@ -0,0 +1,368 @@ +import json +import os +import copy +import argparse +import re +import types + +# +# make it easy to change this for testing +XDG_DATA_HOME = os.getenv("XDG_DATA_HOME", "/usr/share/") + + +def default_search_folders(app_name): + """ + Return the list of folders to search for configuration files + """ + return [ + "%s/cdis/%s" % (XDG_DATA_HOME, app_name), + "/usr/share/cdis/%s" % app_name, + "%s/gen3/%s" % (XDG_DATA_HOME, app_name), + "/usr/share/gen3/%s" % app_name, + "/var/www/%s" % app_name, + "/etc/gen3/%s" % app_name, + ] + + +def find_paths(file_name, app_name, search_folders=None): + """ + Search the given folders for file_name + search_folders defaults to default_search_folders if not specified + return the first path to file_name found + """ + search_folders = search_folders or default_search_folders(app_name) + possible_files = [os.path.join(folder, file_name) for folder in search_folders] + return [path for path in possible_files if os.path.exists(path)] + + +def load_json(file_name, app_name, search_folders=None): + """ + json.load(file_name) after finding file_name in search_folders + + return the loaded json data or None if file not found + """ + actual_files = find_paths(file_name, app_name, search_folders) + if not actual_files: + return None + with open(actual_files[0], "r") as reader: + return json.load(reader) + + +def inject_creds_into_amanuensis_config(creds_file_path, config_file_path): + creds_file = open(creds_file_path, "r") + creds = json.load(creds_file) + creds_file.close() + + # get secret values from creds.json file + db_host = _get_nested_value(creds, "db_host") + db_username = _get_nested_value(creds, "db_username") + db_password = _get_nested_value(creds, "db_password") + db_database = _get_nested_value(creds, "db_database") + hostname = _get_nested_value(creds, "hostname") + data_delivery_bucket = _get_nested_value(creds, "data_delivery_bucket") + data_delivery_bucket_aws_key_id = _get_nested_value(creds, "data_delivery_bucket_aws_key_id") + data_delivery_bucket_aws_access_key = _get_nested_value(creds, "data_delivery_bucket_aws_access_key") + + db_path = "postgresql://{}:{}@{}:5432/{}".format( + db_username, db_password, db_host, db_database + ) + + config_file = open(config_file_path, "r").read() + + print(" DB injected with value(s) from creds.json") + config_file = _replace(config_file, "DB", db_path) + + print(" BASE_URL injected with value(s) from creds.json") + config_file = _replace(config_file, "BASE_URL", "https://{}/amanuensis".format(hostname)) + + print(" HOSTNAME injected with value(s) from creds.json") + config_file = _replace(config_file, "HOSTNAME", "{}".format(hostname)) + + print(" AWS_CREDENTIALS/DATA_DELIVERY_S3_BUCKET/aws_access_key_id injected with value(s) from creds.json") + config_file = _replace( + config_file, "AWS_CREDENTIALS/DATA_DELIVERY_S3_BUCKET/aws_access_key_id", data_delivery_bucket_aws_key_id + ) + + print(" AWS_CREDENTIALS/DATA_DELIVERY_S3_BUCKET/aws_secret_access_key injected with value(s) from creds.json") + config_file = _replace( + config_file, "AWS_CREDENTIALS/DATA_DELIVERY_S3_BUCKET/aws_secret_access_key", data_delivery_bucket_aws_access_key + ) + + print(" AWS_CREDENTIALS/DATA_DELIVERY_S3_BUCKET injected with value(s) from creds.json") + config_file = _replace( + config_file, "AWS_CREDENTIALS/DATA_DELIVERY_S3_BUCKET", data_delivery_bucket, key_only=True + ) + + # print(" ENCRYPTION_KEY injected with value(s) from creds.json") + # config_file = _replace(config_file, "ENCRYPTION_KEY", hmac_key) + + + open(config_file_path, "w+").write(config_file) + + +def set_prod_defaults_amanuensis(config_file_path): + config_file = open(config_file_path, "r").read() + + print(" INDEXD set as http://indexd-service/") + config_file = _replace(config_file, "INDEXD", "http://indexd-service/") + + print(" ARBORIST set as http://arborist-service/") + config_file = _replace(config_file, "ARBORIST", "http://arborist-service/") + + print(" HTTP_PROXY/host set as cloud-proxy.internal.io") + config_file = _replace(config_file, "HTTP_PROXY/host", "cloud-proxy.internal.io") + + print(" HTTP_PROXY/port set as 3128") + config_file = _replace(config_file, "HTTP_PROXY/port", 3128) + + print(" DEBUG set to false") + config_file = _replace(config_file, "DEBUG", False) + + print(" MOCK_AUTH set to false") + config_file = _replace(config_file, "MOCK_AUTH", False) + + print(" MOCK_GOOGLE_AUTH set to false") + config_file = _replace(config_file, "MOCK_GOOGLE_AUTH", False) + + print(" AUTHLIB_INSECURE_TRANSPORT set to true") + config_file = _replace(config_file, "AUTHLIB_INSECURE_TRANSPORT", True) + + print(" SESSION_COOKIE_SECURE set to true") + config_file = _replace(config_file, "SESSION_COOKIE_SECURE", True) + + print(" ENABLE_CSRF_PROTECTION set to true") + config_file = _replace(config_file, "ENABLE_CSRF_PROTECTION", True) + + open(config_file_path, "w+").write(config_file) + + +def inject_other_files_into_fence_config(other_files, config_file_path): + additional_cfgs = _get_all_additional_configs(other_files) + + config_file = open(config_file_path, "r").read() + + for key, value in additional_cfgs.iteritems(): + print(" {} set to {}".format(key, value)) + config_file = _nested_replace(config_file, key, value) + + open(config_file_path, "w+").write(config_file) + + +def _get_all_additional_configs(other_files): + """ + Attempt to parse given list of files and extract configuration variables and values + """ + additional_configs = dict() + for file_path in other_files: + try: + file_ext = file_path.strip().split(".")[-1] + if file_ext == "json": + json_file = open(file_path, "r") + configs = json.load(json_file) + json_file.close() + elif file_ext == "py": + configs = from_pyfile(file_path) + else: + print( + "Cannot load config vars from a file with extention: {}".format( + file_ext + ) + ) + except Exception as exc: + # if there's any issue reading the file, exit + print( + "Error reading {}. Cannot get configuration. Skipping this file. " + "Details: {}".format(other_files, str(exc)) + ) + continue + + if configs: + additional_configs.update(configs) + + return additional_configs + + +def _nested_replace(config_file, key, value, replacement_path=None): + replacement_path = replacement_path or key + try: + for inner_key, inner_value in value.iteritems(): + temp_path = replacement_path + temp_path = temp_path + "/" + inner_key + config_file = _nested_replace( + config_file, inner_key, inner_value, temp_path + ) + except AttributeError: + # not a dict so replace + if value is not None: + config_file = _replace(config_file, replacement_path, value) + + return config_file + + +def _replace(yaml_config, path_to_key, replacement_value, start=0, nested_level=0, key_only=False): + """ + Replace a nested value in a YAML file string with the given value without + losing comments. Uses a regex to do the replacement. + + Args: + yaml_config (str): a string representing a full configuration file + path_to_key (str): nested/path/to/key. The value of this key will be + replaced + replacement_value (str): Replacement value for the key from + path_to_key + """ + nested_path_to_replace = path_to_key.split("/") + + # our regex looks for a specific number of spaces to ensure correct + # level of nesting. It matches to the end of the line + search_string = ( + " " * nested_level + ".*" + nested_path_to_replace[0] + "(')?(\")?:.*\n" + ) + matches = re.search(search_string, yaml_config[start:]) + + # early return if we haven't found anything + if not matches: + return yaml_config + + # if we're on the last item in the path, we need to get the value and + # replace it in the original file + if len(nested_path_to_replace) == 1: + # replace the current key:value with the new replacement value + match_start = start + matches.start(0) + len(" " * nested_level) + match_end = start + matches.end(0) + if not key_only: + yaml_config = ( + yaml_config[:match_start] + + "{}: {}\n".format( + nested_path_to_replace[0], + _get_yaml_replacement_value(replacement_value, nested_level), + ) + + yaml_config[match_end:] + ) + else: + yaml_config = ( + yaml_config[:match_start] + + "{}:\n".format( + _get_yaml_replacement_value(replacement_value, nested_level), + ) + + yaml_config[match_end:] + ) + + return yaml_config + + # set new start point to past current match and move on to next match + start = matches.end(0) + nested_level += 1 + del nested_path_to_replace[0] + + return _replace( + yaml_config, + "/".join(nested_path_to_replace), + replacement_value, + start, + nested_level, + key_only=key_only, + ) + + +def from_pyfile(filename, silent=False): + """ + Modeled after flask's ability to load in python files: + https://github.com/pallets/flask/blob/master/flask/config.py + + Some alterations were made but logic is essentially the same + """ + filename = os.path.abspath(filename) + d = types.ModuleType("config") + d.__file__ = filename + try: + with open(filename, mode="rb") as config_file: + exec(compile(config_file.read(), filename, "exec"), d.__dict__) + except IOError as e: + print("Unable to load configuration file ({})".format(e.strerror)) + if silent: + return False + raise + return _from_object(d) + + +def _from_object(obj): + configs = {} + for key in dir(obj): + if key.isupper(): + configs[key] = getattr(obj, key) + return configs + + +def _get_yaml_replacement_value(value, nested_level=0): + if isinstance(value, str): + return "'" + value + "'" + elif isinstance(value, bool): + return str(value).lower() + elif isinstance(value, list) or isinstance(value, set): + output = "" + for item in value: + # spaces for nested level then spaces and hyphen for each list item + output += ( + "\n" + + " " * nested_level + + " - " + + _get_yaml_replacement_value(item) + + "" + ) + return output + else: + return value + + +def _get_nested_value(dictionary, nested_path): + """ + Return a value from a dictionary given a path-like nesting of keys. + + Will default to an empty string if value cannot be found. + + Args: + dictionary (dict): a dictionary + nested_path (str): nested/path/to/key + + Returns: + ?: Value from dict + """ + replacement_value_path = nested_path.split("/") + replacement_value = copy.deepcopy(dictionary) + + for item in replacement_value_path: + replacement_value = replacement_value.get(item, {}) + + if replacement_value == {}: + replacement_value = "" + + return replacement_value + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "-i", + "--creds_file_to_inject", + default="creds.json", + help="creds file to inject into the configuration yaml", + ) + parser.add_argument( + "--other_files_to_inject", + nargs="+", + help="amanuensis_credentials.json, local_settings.py, amanuensis_settings.py file(s) to " + "inject into the configuration yaml", + ) + parser.add_argument( + "-c", "--config_file", default="config.yaml", help="configuration yaml" + ) + args = parser.parse_args() + + inject_creds_into_amanuensis_config(args.creds_file_to_inject, args.config_file) + set_prod_defaults_amanuensis(args.config_file) + + if args.other_files_to_inject: + inject_other_files_into_fence_config( + args.other_files_to_inject, args.config_file + ) diff --git a/helm/amanuensis/logo/logo.svg b/helm/amanuensis/logo/logo.svg new file mode 100644 index 000000000..721ee23fa --- /dev/null +++ b/helm/amanuensis/logo/logo.svg @@ -0,0 +1 @@ +fresh diff --git a/helm/amanuensis/templates/NOTES.txt b/helm/amanuensis/templates/NOTES.txt new file mode 100644 index 000000000..772641bfe --- /dev/null +++ b/helm/amanuensis/templates/NOTES.txt @@ -0,0 +1,2 @@ +{{ .Chart.Name }} has been deployed successfully. + diff --git a/helm/amanuensis/templates/_helpers.tpl b/helm/amanuensis/templates/_helpers.tpl new file mode 100644 index 000000000..b46ed2a9f --- /dev/null +++ b/helm/amanuensis/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "amanuensis.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "amanuensis.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "amanuensis.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "amanuensis.labels" -}} +helm.sh/chart: {{ include "amanuensis.chart" . }} +{{ include "amanuensis.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "amanuensis.selectorLabels" -}} +app.kubernetes.io/name: {{ include "amanuensis.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "amanuensis.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "amanuensis.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/helm/amanuensis/templates/amanuensis-config.yaml b/helm/amanuensis/templates/amanuensis-config.yaml new file mode 100644 index 000000000..6939d1d72 --- /dev/null +++ b/helm/amanuensis/templates/amanuensis-config.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Secret +metadata: + name: amanuensis-config +stringData: + amanuensis-config.yaml: | + BASE_URL: https://{{ .Values.global.hostname }}/amanuensis + DB: postgresql://{{ .Files.Get "secrets/amanuensis-dbcreds/username" | default "" }}:{{ .Files.Get "secrets/amanuensis-dbcreds/password" | default "" }}@{{ .Files.Get "secrets/amanuensis-dbcreds/host" | default "" }}:5432/{{ .Files.Get "secrets/amanuensis-dbcreds/database" | default "" }} + {{- with .Values.AMANUENSIS_CONFIG }} + {{- toYaml . | nindent 4 }} + {{ end }} \ No newline at end of file diff --git a/helm/amanuensis/templates/amanuensis-creds.yaml b/helm/amanuensis/templates/amanuensis-creds.yaml new file mode 100644 index 000000000..c904835fe --- /dev/null +++ b/helm/amanuensis/templates/amanuensis-creds.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Secret +metadata: + name: amanuensis-creds +type: Opaque +stringData: + creds.json: |- + { + "db_host": "{{ include "gen3.service-postgres" (dict "key" "host" "service" $.Chart.Name "context" $) }}", + "db_username": "{{include "gen3.service-postgres" (dict "key" "username" "service" $.Chart.Name "context" $) }}", + "db_password": "{{include "gen3.service-postgres" (dict "key" "password" "service" $.Chart.Name "context" $) }}", + "db_database": "{{ include "gen3.service-postgres" (dict "key" "database" "service" $.Chart.Name "context" $)}}", + "hostname": "{{ .Values.global.hostname }}", + "indexd_password": "", + "google_client_secret": "YOUR.GOOGLE.SECRET", + "google_client_id": "YOUR.GOOGLE.CLIENT", + "hmac_key": "" + } diff --git a/helm/amanuensis/templates/amanuensis-logo-config.yaml b/helm/amanuensis/templates/amanuensis-logo-config.yaml new file mode 100644 index 000000000..6131c0fca --- /dev/null +++ b/helm/amanuensis/templates/amanuensis-logo-config.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: amanuensis-logo-config +data: + logo.svg: {{ .Values.logo | default ((.Files.Get "logo/logo.svg")) }} \ No newline at end of file diff --git a/helm/amanuensis/templates/amanuensis-secret.yaml b/helm/amanuensis/templates/amanuensis-secret.yaml new file mode 100644 index 000000000..4adb27736 --- /dev/null +++ b/helm/amanuensis/templates/amanuensis-secret.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Secret +metadata: + name: amanuensis-secret +type: Opaque +data: +{{ (.Files.Glob "amanuensis-secret/*").AsSecrets | indent 2 }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: amanuensis-google-app-creds-secret +type: Opaque +data: +{{ (.Files.Glob "amanuensis-google-creds/*").AsSecrets | indent 2 }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: amanuensis-google-storage-creds-secret +type: Opaque +data: +{{ (.Files.Glob "amanuensis-google-creds/*").AsSecrets | indent 2 }} diff --git a/helm/amanuensis/templates/db-init.yaml b/helm/amanuensis/templates/db-init.yaml new file mode 100644 index 000000000..abbefb6eb --- /dev/null +++ b/helm/amanuensis/templates/db-init.yaml @@ -0,0 +1,6 @@ +{{ include "common.db_setup_job" . }} +--- +{{ include "common.db-secret" . }} +--- +{{ include "common.db_setup_sa" . }} +--- diff --git a/helm/amanuensis/templates/deployment.yaml b/helm/amanuensis/templates/deployment.yaml new file mode 100644 index 000000000..33761c5ec --- /dev/null +++ b/helm/amanuensis/templates/deployment.yaml @@ -0,0 +1,89 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: amanuensis-deployment + labels: + {{- include "amanuensis.labels" . | nindent 4 }} + {{- if .Values.global.ddEnabled }} + {{- include "common.datadogLabels" . | nindent 4 }} + {{- end }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "amanuensis.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "amanuensis.selectorLabels" . | nindent 8 }} + {{- if .Values.global.ddEnabled }} + {{- include "common.datadogLabels" . | nindent 8 }} + {{- end }} + spec: + volumes: + {{- toYaml .Values.volumes | nindent 8 }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "amanuensis.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + livenessProbe: + httpGet: + path: /_status + port: 80 + initialDelaySeconds: 30 + periodSeconds: 60 + timeoutSeconds: 60 + readinessProbe: + httpGet: + path: /_status + port: 80 + resources: + {{- toYaml .Values.resources | nindent 12 }} + command: ["/bin/bash"] + args: + - "-c" + - | + echo "${AMANUENSIS_PUBLIC_CONFIG:-""}" > "/var/www/amanuensis/amanuensis-config-public.yaml" + python /var/www/amanuensis/yaml_merge.py /var/www/amanuensis/amanuensis-config-public.yaml /var/www/amanuensis/amanuensis-config-secret.yaml > /var/www/amanuensis/amanuensis-config.yaml + #bash /amanuensis/dockerrun.bash && if [[ -f /dockerrun.sh ]]; then /dockerrun.sh; fi + bash /dockerrun.sh + env: + {{- if .Values.global.ddEnabled }} + {{- include "common.datadogEnvVar" . | nindent 12 }} + {{- end }} + {{- toYaml .Values.env | nindent 12 }} + volumeMounts: + {{- toYaml .Values.volumeMounts | nindent 12 }} + + + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/helm/amanuensis/templates/hpa.yaml b/helm/amanuensis/templates/hpa.yaml new file mode 100644 index 000000000..9181d33b6 --- /dev/null +++ b/helm/amanuensis/templates/hpa.yaml @@ -0,0 +1,32 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "amanuensis.fullname" . }} + labels: + {{- include "amanuensis.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "amanuensis.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/helm/amanuensis/templates/jwt-keys.yaml b/helm/amanuensis/templates/jwt-keys.yaml new file mode 100644 index 000000000..d902c85dc --- /dev/null +++ b/helm/amanuensis/templates/jwt-keys.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Secret +metadata: + name: amanuensis-jwt-keys +type: Opaque +data: + jwt_private_key.pem: {{ include "getOrCreatePrivateKey" . }} \ No newline at end of file diff --git a/helm/amanuensis/templates/service.yaml b/helm/amanuensis/templates/service.yaml new file mode 100644 index 000000000..781b31a52 --- /dev/null +++ b/helm/amanuensis/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: "amanuensis-service" + labels: + {{- include "amanuensis.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "amanuensis.selectorLabels" . | nindent 4 }} diff --git a/helm/amanuensis/templates/serviceaccount.yaml b/helm/amanuensis/templates/serviceaccount.yaml new file mode 100644 index 000000000..7e6f0a46e --- /dev/null +++ b/helm/amanuensis/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "amanuensis.serviceAccountName" . }} + labels: + {{- include "amanuensis.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/helm/amanuensis/templates/tests/test-connection.yaml b/helm/amanuensis/templates/tests/test-connection.yaml new file mode 100644 index 000000000..a54ae8e77 --- /dev/null +++ b/helm/amanuensis/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "amanuensis.fullname" . }}-test-connection" + labels: + {{- include "amanuensis.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "amanuensis.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/helm/amanuensis/values.yaml b/helm/amanuensis/values.yaml new file mode 100644 index 000000000..dd99265c5 --- /dev/null +++ b/helm/amanuensis/values.yaml @@ -0,0 +1,561 @@ +# Default values for amanuensis. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +global: + # -- (bool) Whether the deployment is for development purposes. + dev: true + # -- (map) Postgres database configuration. + postgres: + # -- (bool) Whether the database should be created. + dbCreate: true + # -- (map) Master credentials to postgres. This is going to be the default postgres server being used for each service, unless each service specifies their own postgres + master: + # -- (string) hostname of postgres server + host: + # -- (string) username of superuser in postgres. This is used to create or restore databases + username: postgres + # -- (string) password for superuser in postgres. This is used to create or restore databases + password: + # -- (string) Port for Postgres. + port: "5432" + # -- (string) Environment name. This should be the same as vpcname if you're doing an AWS deployment. Currently this is being used to share ALB's if you have multiple namespaces. Might be used other places too. + environment: default + # -- (string) Hostname for the deployment. + hostname: localhost + # -- (string) ARN of the reverse proxy certificate. + revproxyArn: arn:aws:acm:us-east-1:123456:certificate + # -- (string) URL of the data dictionary. + dictionaryUrl: https://s3.amazonaws.com/dictionary-artifacts/datadictionary/develop/schema.json + # -- (string) Portal application name. + portalApp: gitops + # -- (string) S3 bucket name for Kubernetes manifest files. + kubeBucket: kube-gen3 + # -- (string) S3 bucket name for log files. + logsBucket: logs-gen3 + # -- (bool) Whether to sync data from dbGaP. + syncFromDbgap: false + # -- (bool) Whether public datasets are enabled. + publicDataSets: true + # -- (string) Access level for tiers. acceptable values for `tier_access_level` are: `libre`, `regular` and `private`. If omitted, by default common will be treated as `private` + tierAccessLevel: libre + # -- (int) Only relevant if tireAccessLevel is set to "regular". Summary charts below this limit will not appear for aggregated data. + tierAccessLimit: 1000 + # -- (bool) Whether network policies are enabled. + netPolicy: true + # -- (int) Number of dispatcher jobs. + dispatcherJobNum: 10 + # -- (bool) Whether Datadog is enabled. + ddEnabled: false + # -- (bool) If the service will be deployed with a Pod Disruption Budget. Note- you need to have more than 2 replicas for the pdb to be deployed. + pdb: false + # -- (int) The minimum amount of pods that are available at all times if the PDB is deployed. + minAvialable: 1 + + postgres: + # -- (bool) Whether the database should be created. + dbCreate: true + # -- (map) Master credentials to postgres. This is going to be the default postgres server being used for each service, unless each service specifies their own postgres + master: + # -- (string) hostname of postgres server + host: + # -- (string) username of superuser in postgres. This is used to create or restore databases + username: postgres + # -- (string) password for superuser in postgres. This is used to create or restore databases + password: + # -- (string) Port for Postgres. + port: "5432" + +postgres: + # (bool) Whether the database should be restored from s3. Default to global.postgres.dbRestore + dbRestore: false + # -- (bool) Whether the database should be created. Default to global.postgres.dbCreate + dbCreate: + # -- (string) Hostname for postgres server. This is a service override, defaults to global.postgres.host + host: + # -- (string) Database name for postgres. This is a service override, defaults to - + database: + # -- (string) Username for postgres. This is a service override, defaults to - + username: + # -- (string) Port for Postgres. + port: "5432" + # -- (string) Password for Postgres. Will be autogenerated if left empty. + password: + # -- (string) Will create a Database for the individual service to help with developing it. + separate: false + +postgresql: + primary: + persistence: + # -- (bool) Option to persist the dbs data. + enabled: false + +arboristUrl: + +replicaCount: 1 + +image: + repository: quay.io/pcdc/amanuensis + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "pcdc_dev_2023-09-06T16_36_49-05_00" + +imagePullSecrets: [] + +nameOverride: "" + +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +resources: + requests: + # -- (string) The amount of CPU requested + cpu: 0.1 + # -- (string) The amount of memory requested + memory: 12Mi + # -- (map) The maximum amount of resources that the container is allowed to use + limits: + # -- (string) The maximum amount of CPU the container can use + cpu: 1.0 + # -- (string) The maximum amount of memory the container can use + memory: 512Mi + +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +logo: + +# -- (list) Environment variables to pass to the container +env: + - name: GEN3_UWSGI_TIMEOUT + valueFrom: + configMapKeyRef: + name: manifest-global + key: uwsgi-timeout + optional: true + - name: AWS_STS_REGIONAL_ENDPOINTS + value: regional + - name: PYTHONPATH + value: /var/www/amanuensis + - name: GEN3_DEBUG + value: "False" + - name: AMANUENSIS_PUBLIC_CONFIG + valueFrom: + configMapKeyRef: + name: manifest-amanuensis + key: amanuensis-config-public.yaml + optional: true + - name: PGHOST + valueFrom: + secretKeyRef: + name: amanuensis-dbcreds + key: host + optional: false + - name: PGUSER + valueFrom: + secretKeyRef: + name: amanuensis-dbcreds + key: username + optional: false + - name: PGPASSWORD + valueFrom: + secretKeyRef: + name: amanuensis-dbcreds + key: password + optional: false + - name: PGDB + valueFrom: + secretKeyRef: + name: amanuensis-dbcreds + key: database + optional: false + - name: DBREADY + valueFrom: + secretKeyRef: + name: amanuensis-dbcreds + key: dbcreated + optional: false + - name: DB + value: postgresql://$(PGUSER):$(PGPASSWORD)@$(PGHOST):5432/$(PGDB) + +# -- (list) Volumes to attach to the container. +volumes: + - name: logo-volume + configMap: + name: "logo-config" + - name: config-volume + secret: + secretName: "amanuensis-config" + - name: amanuensis-volume + secret: + secretName: "amanuensis-creds" + - name: amanuensis-jwt-keys + secret: + secretName: "amanuensis-jwt-keys" + #need to add potentially + - name: yaml-merge + configMap: + name: "amanuensis-yaml-merge" + optional: true + +# -- (list) Volumes to mount to the container. +volumeMounts: + - name: "logo-volume" + readOnly: true + mountPath: "/amanuensis/amanuensis/static/img/logo.svg" + subPath: "logo.svg" + - name: "config-volume" + readOnly: true + mountPath: "/var/www/amanuensis/amanuensis-config.yaml" + subPath: amanuensis-config.yaml + - name: "yaml-merge" + readOnly: true + mountPath: "/var/www/amanuensis/yaml_merge.py" + subPath: yaml_merge.py + - name: "amanuensis-volume" + readOnly: true + mountPath: "/var/www/amanuensis/creds.json" + subPath: creds.json + - name: "amanuensis-jwt-keys" + readOnly: true + mountPath: "/var/www/amanuensis/jwt_private_key.pem" + subPath: "jwt_private_key.pem" + + +# Values to determine the labels that are used for the deployment, pod, etc. +# -- (string) Valid options are "production" or "dev". If invalid option is set- the value will default to "dev". +release: "production" +# -- (string) Valid options are "true" or "false". If invalid option is set- the value will default to "false". +criticalService: "true" +# -- (string) Label to help organize pods and their use. Any value is valid, but use "_" or "-" to divide words. +partOf: "Core-Service" +# -- (map) Will completely override the selectorLabels defined in the common chart's _label_setup.tpl +selectorLabels: +# -- (map) Will completely override the commonLabels defined in the common chart's _label_setup.tpl +commonLabels: + +# Values to configure datadog if ddEnabled is set to "true". +# -- (bool) If enabled, the Datadog Agent will automatically inject Datadog-specific metadata into your application logs. +datadogLogsInjection: true +# -- (bool) If enabled, the Datadog Agent will collect profiling data for your application using the Continuous Profiler. This data can be used to identify performance bottlenecks and optimize your application. +datadogProfilingEnabled: true +# -- (int) A value between 0 and 1, that represents the percentage of requests that will be traced. For example, a value of 0.5 means that 50% of requests will be traced. +datadogTraceSampleRate: 1 + + +AMANUENSIS_CONFIG: + APP_NAME: 'Gen3 Data Commons' + HOSTNAME: 'localhost' + # Where amanuensis microservice is deployed + # a standardized name unique to each app for service-to-service interaction + # so the service receiving the request knows it came from another Gen3 service + # postgres db to connect to + # connection url format: + # postgresql://[user[:password]@][netloc][:port][/dbname] + + # A URL-safe base64-encoded 32-byte key for encrypting keys in db + # in python you can use the following script to generate one: + # import base64 + # import os + # key = base64.urlsafe_b64encode(os.urandom(32)) + # print(key) + ENCRYPTION_KEY: '' + + # Cross-service keys + # Private key for signing requests sent to other Gen3 services + PRIVATE_KEY_PATH: '/var/www/amanuensis/jwt_private_key.pem' + + # ////////////////////////////////////////////////////////////////////////////////////// + # DEBUG & SECURITY SETTINGS + # - Modify based on whether you're in a dev environment or in production + # ////////////////////////////////////////////////////////////////////////////////////// + # flask's debug setting + # WARNING: DO NOT ENABLE IN PRODUCTION (for testing purposes only) + DEBUG: true + # if true, will automatically login a user with username "test" + # WARNING: DO NOT ENABLE IN PRODUCTION (for testing purposes only) + MOCK_AUTH: true + # if true, will fake a successful login response from Google in /login/google + # NOTE: this will also modify the behavior of /link/google endpoints + # WARNING: DO NOT ENABLE IN PRODUCTION (for testing purposes only) + # will login as the username set in cookie DEV_LOGIN_COOKIE_NAME + MOCK_GOOGLE_AUTH: true + + # if true, will ignore anything configured in STORAGE_CREDENTIALS + MOCK_STORAGE: true + # allow OIDC traffic on http for development. By default it requires https. + # + # WARNING: ONLY set to true when amanuensis will be deployed in such a way that it will + # ONLY receive traffic from internal clients and can safely use HTTP. + AUTHLIB_INSECURE_TRANSPORT: true + + # set if you want browsers to only send cookies with requests over HTTPS + SESSION_COOKIE_SECURE: true + + ENABLE_CSRF_PROTECTION: true + + OIDC_ISSUER: 'https://{{HOSTNAME}}/user' + + OAUTH2: + client_id: 'oauth2_client_id' + client_secret: 'oauth2_client_secret' + api_base_url: 'https://{{HOSTNAME}}/user/' + authorize_url: 'https://{{HOSTNAME}}/user/oauth2/authorize' + access_token_url: 'https://{{HOSTNAME}}/user/oauth2/token' + refresh_token_url: 'https://{{HOSTNAME}}/user/oauth2/token' + client_kwargs: + # redirect_uri: 'https://{{HOSTNAME}}/api/v0/oauth2/authorize' + redirect_uri: 'https://{{HOSTNAME}}/amanuensis/oauth2/authorize' + scope: 'openid data user' + # deprecated key values, should be removed after all commons use new oidc + internal_oauth_provider: 'http://fence-service/oauth2/' + oauth_provider: 'https://{{HOSTNAME}}/user/oauth2/' + # redirect_uri: 'https://{{HOSTNAME}}/api/v0/oauth2/authorize' + redirect_uri: 'https://{{HOSTNAME}}/amanuensis/oauth2/authorize' + + USER_API: 'http://fence-service/' + # option to force authutils to prioritize USER_API setting over the issuer from + # token when redirecting, used during local docker compose setup when the + # services are on different containers but the hostname is still localhost + FORCE_ISSUER: true + + # amanuensis (at the moment) attempts a migration on startup. setting this to false will disable that + # WARNING: ONLY set to false if you do NOT want to automatically migrate your database. + # You should be careful about incompatible versions of your db schema with what + # amanuensis expects. In other words, things could be broken if you update to a later + # amanuensis that expects a schema your database isn't migrated to. + # NOTE: We are working to improve the migration process in the near future + ENABLE_DB_MIGRATION: true + + + # ////////////////////////////////////////////////////////////////////////////////////// + # LIBRARY CONFIGURATION (flask) + # - Already contains reasonable defaults + # ////////////////////////////////////////////////////////////////////////////////////// + + # used for flask, "path mounted under by the application / web server" + # since we deploy as microservices, fence is typically under {{base}}/user + # this is also why our BASE_URL default ends in /user + APPLICATION_ROOT: '/amanuensis' + + + + ######################################################################################## + # OPTIONAL CONFIGURATIONS # + ######################################################################################## + + # ////////////////////////////////////////////////////////////////////////////////////// + # SUPPORT INFO + # ////////////////////////////////////////////////////////////////////////////////////// + # If you want an email address to show up when an unhandled error occurs, provide one + # here. Something like: support@example.com + SUPPORT_EMAIL_FOR_ERRORS: null + + + # ////////////////////////////////////////////////////////////////////////////////////// + # AWS BUCKETS AND CREDENTIALS + # - Support `/data` endpoints + # ////////////////////////////////////////////////////////////////////////////////////// + AWS_CREDENTIALS: + 'DATA_DELIVERY_S3_BUCKET': + aws_access_key_id: 'DATA_DELIVERY_S3_BUCKET_ACCESS_KEY' + aws_secret_access_key: 'DATA_DELIVERY_S3_BUCKET_PRIVATE_KEY' + # NOTE: Remove the {} and supply creds if needed. Example in comments below + # 'CRED1': + # aws_access_key_id: '' + # aws_secret_access_key: '' + # 'CRED2': + # aws_access_key_id: '' + # aws_secret_access_key: '' + + # NOTE: the region is optonal for s3_buckets, however it should be specified to avoid a + # call to GetBucketLocation which you make lack the AWS ACLs for. + # public buckets do not need the region field. + S3_BUCKETS: {} + # NOTE: Remove the {} and supply buckets if needed. Example in comments below + # bucket1: + # cred: 'CRED1' + # region: 'us-east-1' + # # optionally you can manually specify an s3-compliant endpoint for this bucket + # endpoint_url: 'https://cleversafe.example.com/' + # bucket2: + # cred: 'CRED2' + # region: 'us-east-1' + # bucket3: + # cred: '*' # public bucket + # bucket4: + # cred: 'CRED1' + # region: 'us-east-1' + # role-arn: 'arn:aws:iam::role1' + + # `DATA_DOWNLOAD_BUCKET` specifies an S3 bucket to which data files are uploaded by the system/admin user, + # User Data request files are stored here. + DATA_DOWNLOAD_BUCKET: 'bucket1' + + # ////////////////////////////////////////////////////////////////////////////////////// + # PROXY + # - Optional: If the api is behind firewall that needs to set http proxy + # ////////////////////////////////////////////////////////////////////////////////////// + # NOTE: leave as-is to not use proxy + # this is only used by the Google Oauth2Client at the moment if provided + HTTP_PROXY: + host: null + port: 3128 + + # ////////////////////////////////////////////////////////////////////////////////////// + # MICROSERVICE PATHS + # - Support `/data` endpoints & authz functionality + # ////////////////////////////////////////////////////////////////////////////////////// + # url where indexd microservice is running (for signed urls primarily) + # NOTE: Leaving as null will force fence to default to {{BASE_URL}}/index + # example value: 'https://example.com/index' + INDEXD: null + + # this is the username which fence uses to make authenticated requests to indexd + INDEXD_USERNAME: 'amanuensis' + # this is the password which fence uses to make authenticated requests to indexd + INDEXD_PASSWORD: '' + + # url where authz microservice is running + ARBORIST: null + FENCE: 'http://fence-service' + + # ////////////////////////////////////////////////////////////////////////////////////// + # EMAIL + # - Support for sending hubspot API work updates to project management team + # ////////////////////////////////////////////////////////////////////////////////////// + # Simple Email Service (for sending emails from fence) + # + # NOTE: Example in comments below + AWS_SES: + SENDER: "" + RECIPIENT: "" + AWS_REGION: "us-east-1" + AWS_ACCESS_KEY: "" + AWS_SECRET_KEY: "" + + HUBSPOT: + API_KEY: "DEV_KEY" + + DB_MIGRATION_POSTGRES_LOCK_KEY: 100 + + + # Draft: The user started the form but saves it to complete another day + # Submitted: the user sends the completed form to a PM (email or through the system) + # Review: In the hand of the EC + # Revision: back to the requestor, needs to respond to the EC questions or concerns. loop to review + # Approved with Feedback: It is approved but it needs some changes with for example the filter-set before being approved + # Request Criteria Finalized: Update the filterset or apply the feedback the EC gave + # Approved: the request is approved + # Rejected: the request is rejected + # Withdrawal: The request has been withdrawn + # Agreements Negotiation: Creating the agreements after the request is approved + # Agreements Executed: The agreements is in place + # Data Available: The data is ready to be submitted + # Data Downloaded: When a user actually download the data + # Published: Remove access to the data + CONSORTIUM_STATUS: + DEFAULT: + # In order of precedence. + CODES: + - "DRAFT" + - "SUBMITTED" + - "IN_REVIEW" + - "REVISION" + - "APPROVED_WITH_FEEDBACK" + - "REQUEST_CRITERIA_FINALIZED" + - "APPROVED" + - "REJECTED" + - "WITHDRAWAL" + - "AGREEMENTS_NEGOTIATION" + - "AGREEMENTS_EXECUTED" + - "DATA_AVAILABLE" + - "DATA_DOWNLOADED" + - "PUBLISHED" + FINAL: + - "REJECTED" + - "WITHDRAWAL" + - "DATA_DOWNLOADED" + NOTIFY: + - "DATA_DOWNLOADED" + INSTRUCT: + # In order of precedence. + CODES: + - "DRAFT" + - "SUBMITTED" + - "IN_REVIEW" + - "REVISION" + - "APPROVED_WITH_FEEDBACK" + - "REQUEST_CRITERIA_FINALIZED" + - "APPROVED" + - "REJECTED" + - "WITHDRAWAL" + - "AGREEMENTS_NEGOTIATION" + - "AGREEMENTS_EXECUTED" + - "DATA_AVAILABLE" + - "DATA_DOWNLOADED" + - "PUBLISHED" + FINAL: + - "REJECTED" + - "WITHDRAWAL" + - "DATA_DOWNLOADED" + NOTIFY: + - "DATA_DOWNLOADED" + INRG: + # In order of precedence. + CODES: + - "DRAFT" + - "SUBMITTED" + - "IN_REVIEW" + - "REVISION" + - "APPROVED_WITH_FEEDBACK" + - "REQUEST_CRITERIA_FINALIZED" + - "APPROVED" + - "REJECTED" + - "WITHDRAWAL" + - "AGREEMENTS_NEGOTIATION" + - "AGREEMENTS_EXECUTED" + - "DATA_AVAILABLE" + - "DATA_DOWNLOADED" + - "PUBLISHED" + FINAL: + - "REJECTED" + - "WITHDRAWAL" + - "DATA_DOWNLOADED" + NOTIFY: + - "DATA_DOWNLOADED" + # Add consortia \ No newline at end of file diff --git a/helm/gen3/Chart.yaml b/helm/gen3/Chart.yaml index c85cbc638..58c8ca632 100644 --- a/helm/gen3/Chart.yaml +++ b/helm/gen3/Chart.yaml @@ -91,6 +91,10 @@ dependencies: version: "0.1.0" repository: "file://../pcdcanalysistools" condition: pcdcanalysistools.enabled +- name: amanuensis + version: "0.1.0" + repository: "file://../amanuensis" + condition: amanuensis.enabled - name: elasticsearch version: "0.1.5" diff --git a/helm/gen3/values.yaml b/helm/gen3/values.yaml index 74ffc96c6..83daa49c5 100644 --- a/helm/gen3/values.yaml +++ b/helm/gen3/values.yaml @@ -367,6 +367,11 @@ ssjdispatcher: pcdcanalysistools: enabled: true + +amanuensis: + enabled: true + + wts: # -- (bool) Whether to deploy the wts subchart. enabled: true diff --git a/helm/pcdcanalysistools/templates/service.yaml b/helm/pcdcanalysistools/templates/service.yaml index 5ba7ae336..721f830df 100644 --- a/helm/pcdcanalysistools/templates/service.yaml +++ b/helm/pcdcanalysistools/templates/service.yaml @@ -1,7 +1,7 @@ apiVersion: v1 kind: Service metadata: - name: {{ include "pcdcanalysistools.fullname" . }} + name: "pcdcanalysis-service" labels: {{- include "pcdcanalysistools.labels" . | nindent 4 }} spec: diff --git a/helm/revproxy/gen3.nginx.conf/amanuensis-service.conf b/helm/revproxy/gen3.nginx.conf/amanuensis-service.conf new file mode 100644 index 000000000..e9fffae2c --- /dev/null +++ b/helm/revproxy/gen3.nginx.conf/amanuensis-service.conf @@ -0,0 +1,24 @@ +location /amanuensis/ { + if ($csrf_check !~ ^ok-\S.+$) { + return 403 "failed csrf check"; + } + + proxy_next_upstream off; + proxy_set_header Host $host; + proxy_set_header Authorization "$access_token"; + proxy_set_header X-Forwarded-For "$realip"; + proxy_set_header X-UserId "$userid"; + proxy_set_header X-SessionId "$session_id"; + proxy_set_header X-VisitorId "$visitor_id"; + + proxy_connect_timeout 300; + proxy_send_timeout 300; + proxy_read_timeout 300; + send_timeout 300; + + set $proxy_service "amanuensis"; + # set $upstream http://amanuensis-service.$namespace.svc.cluster.local; + set $upstream http://amanuensis-service$des_domain; + rewrite ^/amanuensis/(.*) /$1 break; + proxy_pass $upstream; +} \ No newline at end of file diff --git a/helm/revproxy/gen3.nginx.conf/pcdcanalysistools-service.conf b/helm/revproxy/gen3.nginx.conf/pcdcanalysistools-service.conf new file mode 100644 index 000000000..b170da423 --- /dev/null +++ b/helm/revproxy/gen3.nginx.conf/pcdcanalysistools-service.conf @@ -0,0 +1,26 @@ +location /analysis/ { + if ($csrf_check !~ ^ok-\S.+$) { + return 403 "failed csrf check"; + } + + proxy_next_upstream off; + # Forward the host and set Subdir header so api + # knows the original request path for hmac signing + proxy_set_header Host $host; + proxy_set_header Subdir /api; + proxy_set_header Authorization "$access_token"; + proxy_set_header X-Forwarded-For "$realip"; + proxy_set_header X-UserId "$userid"; + proxy_set_header X-SessionId "$session_id"; + proxy_set_header X-VisitorId "$visitor_id"; + + proxy_connect_timeout 300; + proxy_send_timeout 300; + proxy_read_timeout 300; + send_timeout 300; + + set $proxy_service "pcdcanalysistools"; + set $upstream http://pcdcanalysis-service.default.svc.cluster.local; + rewrite ^/analysis/(.*) /$1 break; + proxy_pass $upstream; +} \ No newline at end of file diff --git a/values.yaml b/values.yaml index a32346a69..e18a44cd7 100644 --- a/values.yaml +++ b/values.yaml @@ -3,6 +3,12 @@ global: hostname: localhost portalApp: pcdc dictionaryUrl: https://pcdc-dev-dictionaries.s3.amazonaws.com/pcdc-schema-dev-20230912.json + # postgres: + # master: + # host: pcdc-postgresql + # username: postgres + # password: pcdc + # port: "5432" arborist: image: @@ -90,6 +96,10 @@ wts: repository: quay.io/cdis/workspace-token-service tag: 2023.08 +# amanuensis: +# image: +# repository: quay.io/pcdc/amanuensis +# tag: "test" guppy: @@ -127,21 +137,17 @@ guppy: - -# pcdcanalysistools: -# image: -# repository: quay.io/pcdc/pcdcanalysistools -# tag: 1.8.4 +pcdcanalysistools: + image: + repository: quay.io/pcdc/pcdcanalysistools + tag: 1.8.4 # fluentd: # image: # repository: fluent/fluentd-kubernetes-daemonset # tag: v1.15.3-debian-cloudwatch-1.0 -# amanuensis: -# image: -# repository: quay.io/pcdc/amanuensis -# tag: pcdc_dev_2023-09-06T16_36_49-05_00 + # revproxy: From ea577a15a5b7505aacb9853f2a6ab7ba6e2852e5 Mon Sep 17 00:00:00 2001 From: Paul Murdoch Date: Fri, 29 Sep 2023 11:02:10 -0700 Subject: [PATCH 006/126] added modifications --- .../amanuensis_google_app_creds_secret.json | 0 ...manuensis_google_storage_creds_secret.json | 0 .../templates/amanuensis-creds.yaml | 6 +- .../templates/amanuensis-secret.yaml | 16 +- helm/amanuensis/values.yaml | 1 - .../pcdcanalysistools-secret/settings.py | 41 +- .../templates/deployment.yaml | 2 + helm/portal/defaults/gitops.json | 566 +++++++++++++++++- helm/portal/templates/deployment.yaml | 6 +- helm/portal/values.yaml | 259 +------- helm/sheepdog/templates/deployment.yaml | 2 +- values.yaml | 20 +- 12 files changed, 581 insertions(+), 338 deletions(-) delete mode 100644 helm/amanuensis/amanuensis-google-creds/amanuensis_google_app_creds_secret.json delete mode 100644 helm/amanuensis/amanuensis-google-creds/amanuensis_google_storage_creds_secret.json diff --git a/helm/amanuensis/amanuensis-google-creds/amanuensis_google_app_creds_secret.json b/helm/amanuensis/amanuensis-google-creds/amanuensis_google_app_creds_secret.json deleted file mode 100644 index e69de29bb..000000000 diff --git a/helm/amanuensis/amanuensis-google-creds/amanuensis_google_storage_creds_secret.json b/helm/amanuensis/amanuensis-google-creds/amanuensis_google_storage_creds_secret.json deleted file mode 100644 index e69de29bb..000000000 diff --git a/helm/amanuensis/templates/amanuensis-creds.yaml b/helm/amanuensis/templates/amanuensis-creds.yaml index c904835fe..638e1fe0b 100644 --- a/helm/amanuensis/templates/amanuensis-creds.yaml +++ b/helm/amanuensis/templates/amanuensis-creds.yaml @@ -12,7 +12,7 @@ stringData: "db_database": "{{ include "gen3.service-postgres" (dict "key" "database" "service" $.Chart.Name "context" $)}}", "hostname": "{{ .Values.global.hostname }}", "indexd_password": "", - "google_client_secret": "YOUR.GOOGLE.SECRET", - "google_client_id": "YOUR.GOOGLE.CLIENT", - "hmac_key": "" + "data_delivery_bucket": {{ .Values.AMANUENSIS_CONFIG.DATA_DOWNLOAD_BUCKET }}, + "data_delivery_bucket_aws_key_id": {{ .Values.AMANUENSIS_CONFIG.AWS_CREDENTIALS.aws_access_key_id }}, + "data_delivery_bucket_aws_access_key": {{ .Values.AMANUENSIS_CONFIG.AWS_CREDENTIALS.aws_secret_access_key }} } diff --git a/helm/amanuensis/templates/amanuensis-secret.yaml b/helm/amanuensis/templates/amanuensis-secret.yaml index 4adb27736..c7ae2e52b 100644 --- a/helm/amanuensis/templates/amanuensis-secret.yaml +++ b/helm/amanuensis/templates/amanuensis-secret.yaml @@ -6,18 +6,4 @@ type: Opaque data: {{ (.Files.Glob "amanuensis-secret/*").AsSecrets | indent 2 }} --- -apiVersion: v1 -kind: Secret -metadata: - name: amanuensis-google-app-creds-secret -type: Opaque -data: -{{ (.Files.Glob "amanuensis-google-creds/*").AsSecrets | indent 2 }} ---- -apiVersion: v1 -kind: Secret -metadata: - name: amanuensis-google-storage-creds-secret -type: Opaque -data: -{{ (.Files.Glob "amanuensis-google-creds/*").AsSecrets | indent 2 }} + diff --git a/helm/amanuensis/values.yaml b/helm/amanuensis/values.yaml index dd99265c5..2855ac6fc 100644 --- a/helm/amanuensis/values.yaml +++ b/helm/amanuensis/values.yaml @@ -96,7 +96,6 @@ replicaCount: 1 image: repository: quay.io/pcdc/amanuensis pullPolicy: IfNotPresent - # Overrides the image tag whose default is the chart appVersion. tag: "pcdc_dev_2023-09-06T16_36_49-05_00" imagePullSecrets: [] diff --git a/helm/pcdcanalysistools/pcdcanalysistools-secret/settings.py b/helm/pcdcanalysistools/pcdcanalysistools-secret/settings.py index 743d8e02a..6f6ea4f86 100644 --- a/helm/pcdcanalysistools/pcdcanalysistools-secret/settings.py +++ b/helm/pcdcanalysistools/pcdcanalysistools-secret/settings.py @@ -1,11 +1,11 @@ from PcdcAnalysisTools.api import app, app_init from os import environ -#import confighelper +#import config_helper from pcdcutils.environment import is_env_enabled APP_NAME='PcdcAnalysisTools' # def load_json(file_name): -# return confighelper.load_json(file_name, APP_NAME) +# return config_helper.load_json(file_name, APP_NAME) # conf_data = load_json("creds.json") @@ -23,17 +23,17 @@ config["ARBORIST"] = "http://arborist-service/" # Signpost: deprecated, replaced by index client. -# config["SIGNPOST"] = { -# "host": environ.get('INDEX_CLIENT_HOST') or "http://indexd-service", -# "version": "v0", -# "auth": ("gdcapi", environ.get( "PGHOST")), -# } +config["SIGNPOST"] = { + "host": environ.get("SIGNPOST_HOST") or "http://indexd-service", + "version": "v0", + "auth": ("gdcapi", environ.get( "INDEXD_PASS") ), +} config["INDEX_CLIENT"] = { - "host": environ.get('INDEX_CLIENT_HOST') or "http://indexd-service", + "host": environ.get("INDEX_CLIENT_HOST") or "http://indexd-service", "version": "v0", - "auth": ("gdcapi", environ.get( "PGHOST")), + "auth": ("gdcapi", environ.get( "INDEXD_PASS") ), } -#config["FAKE_AUTH"] = False +config["FAKE_AUTH"] = False config["PSQLGRAPH"] = { 'host': environ.get( "PGHOST"), 'user': environ.get( "PGUSER"), @@ -41,6 +41,8 @@ 'database': environ.get( "PGDB"), } +# config["HMAC_ENCRYPTION_KEY"] = conf_data.get("hmac_key", "{{hmac_key}}") +# config["FLASK_SECRET_KEY"] = conf_data.get("gdcapi_secret_key", "{{gdcapi_secret_key}}") config["HMAC_ENCRYPTION_KEY"] = environ.get( "HMAC_ENCRYPTION_KEY") config["FLASK_SECRET_KEY"] = environ.get( "FLASK_SECRET_KEY") fence_username = environ.get( "FENCE_DB_USER") @@ -49,11 +51,11 @@ fence_database = environ.get( "FENCE_DB_DBNAME") config['PSQL_USER_DB_CONNECTION'] = 'postgresql://%s:%s@%s:5432/%s' % (fence_username, fence_password, fence_host, fence_database) -hostname = environ.get("CONF_HOSTNAME") +hostname = environ.get("CONF_HOSTNAME", "localhost") config['OIDC_ISSUER'] = 'https://%s/user' % hostname config["OAUTH2"] = { - "client_id": 'conf_data.get("oauth2_client_id", "{{oauth2_client_id}}")', + "client_id": 'conf_data.get("oauth2_client_id", "{{oauth2_client_id}}")', "client_secret": 'conf_data.get("oauth2_client_secret", "{{oauth2_client_secret}}")', "api_base_url": "https://%s/user/" % hostname, "authorize_url": "https://%s/user/oauth2/authorize" % hostname, @@ -72,19 +74,20 @@ # trailing slash intentionally omitted config['GUPPY_API'] = 'http://guppy-service' -# config['USER_API'] = 'http://fence-service/' config["USER_API"] = config["OIDC_ISSUER"] # for use by authutils -# use the USER_API URL instead of the public issuer URL to accquire JWT keys -config["FORCE_ISSUER"] = True +# config['USER_API'] = 'http://fence-service/' +# option to force authutils to prioritize USER_API setting over the issuer from +# token when redirecting, used during local docker compose setup when the +# services are on different containers but the hostname is still localhost +config['FORCE_ISSUER'] = True if environ.get('DICTIONARY_URL'): config['DICTIONARY_URL'] = environ.get('DICTIONARY_URL') else: config['PATH_TO_SCHEMA_DIR'] = environ.get('PATH_TO_SCHEMA_DIR') - config['SURVIVAL'] = { - 'consortium': ["INSTRuCT", "INRG"], + 'consortium': ["INSTRuCT", "INRG", "MaGIC", "NODAL"], 'excluded_variables': [ { 'label': 'Data Contributor', @@ -99,6 +102,7 @@ 'field': 'studies.treatment_arm', } ], + 'result': { 'risktable': True, 'survival': True @@ -122,7 +126,6 @@ } } - app_init(app) application = app -application.debug = (is_env_enabled('GEN3_DEBUG')) +application.debug = (is_env_enabled('GEN3_DEBUG')) \ No newline at end of file diff --git a/helm/pcdcanalysistools/templates/deployment.yaml b/helm/pcdcanalysistools/templates/deployment.yaml index 1a7be4941..eef69e6a9 100644 --- a/helm/pcdcanalysistools/templates/deployment.yaml +++ b/helm/pcdcanalysistools/templates/deployment.yaml @@ -128,6 +128,8 @@ spec: name: manifest-global key: fence_url optional: true + - name: FLASK_SECRET_KEY + value: "TODO: FIX THIS!!!" - name: ARBORIST_URL value: http://arborist-service - name: AUTH_NAMESPACE diff --git a/helm/portal/defaults/gitops.json b/helm/portal/defaults/gitops.json index af1a3051c..9c6358f3b 100644 --- a/helm/portal/defaults/gitops.json +++ b/helm/portal/defaults/gitops.json @@ -55,7 +55,10 @@ "link": "/query", "label": "Query data" } - ] + ], + "barChart": { + "showPercentage": true + } }, "navigation": { "items": [ @@ -76,12 +79,6 @@ "link": "/query", "color": "#a2a2a2", "name": "Query" - }, - { - "icon": "profile", - "link": "/identity", - "color": "#a2a2a2", - "name": "Profile" } ] }, @@ -98,11 +95,6 @@ "leftOrientation": true, "link": "https://commons.cri.uchicago.edu/sponsors/", "name": "Our Sponsors" - }, - { - "icon": "upload", - "link": "/submission", - "name": "Data Submission" } ] }, @@ -111,7 +103,7 @@ "subTitle": "Connect. Share. Cure.", "text": "The Pediatric Cancer Data Commons (PCDC) harnesses pediatric cancer clinical data from around the globe into a single combined platform, connecting the data to other sources and making it available to clinicians and researchers everywhere. Headquartered at the University of Chicago, the PCDC team works with international leaders in pediatric cancers to develop and apply uniform data standards that facilitate the collection, combination, and analysis of data from many different sources. The PCDC currently houses the world's largest sets of clinical data for pediatric neuroblastoma and soft tissue sarcoma and is in the process of onboarding additional pediatric cancer disease groups.", "contact": "If you have any questions about access or the registration process, please contact ", - "email": "pcdc_root@lists.uchicago.edu" + "email": "pcdc_help@lists.uchicago.edu" }, "footerLogos": [ { @@ -132,6 +124,515 @@ { "id": 1, "label": "data", + "charts": { + "sex": { + "chartType": "bar", + "title": "Sex" + }, + "race": { + "chartType": "bar", + "title": "Race" + }, + "ethnicity": { + "chartType": "bar", + "title": "Ethnicity" + }, + "consortium": { + "chartType": "bar", + "title": "Consortium" + } + }, + "filters": { + "anchor": { + "field": "disease_phase", + "options": ["Initial Diagnosis", "Relapse"], + "tabs": ["Disease", "Molecular", "Surgery", "Radiation", "Response", "SMN", "Imaging", "Labs", "SCT"] + }, + "tabs": [ + { + "title": "Subject", + "fields": [ + "consortium", + "data_contributor_id", + "studies.study_id", + "studies.treatment_arm", + "sex", + "race", + "ethnicity", + "year_at_disease_phase", + "survival_characteristics.lkss_obfuscated", + "censor_status", + "age_at_censor_status", + "medical_histories.medical_history", + "medical_histories.medical_history_status", + "external_references.external_resource_name" + ] + }, + { + "title": "Disease", + "fields": [ + "histologies.histology", + "histologies.histology_grade", + "histologies.histology_inpc", + "tumor_assessments.age_at_tumor_assessment", + "tumor_assessments.tumor_classification", + "tumor_assessments.tumor_site", + "tumor_assessments.tumor_state", + "tumor_assessments.longest_diam_dim1", + "tumor_assessments.depth", + "tumor_assessments.tumor_size", + "tumor_assessments.invasiveness", + "tumor_assessments.nodal_clinical", + "tumor_assessments.nodal_pathology", + "tumor_assessments.parameningeal_extension", + "tumor_assessments.necrosis", + "tumor_assessments.necrosis_pct", + "tumor_assessments.tumor_laterality", + "stagings.irs_group", + "stagings.tnm_finding", + "stagings.stage_system", + "stagings.stage", + "stagings.AB", + "stagings.E", + "stagings.S", + "disease_characteristics.mki", + "disease_characteristics.bulk_disease", + "disease_characteristics.BULK_MED_MASS", + "disease_characteristics.bulky_nodal_aggregate" + ] + }, + { + "title": "Molecular", + "fields": [ + "molecular_analysis.anaplasia", + "molecular_analysis.anaplasia_extent", + "molecular_analysis.molecular_abnormality", + "molecular_analysis.molecular_abnormality_result", + "molecular_analysis.gene1", + "molecular_analysis.gene2", + "molecular_analysis.dna_index", + "molecular_analysis.age_at_molecular_analysis", + "molecular_analysis.mitoses", + "molecular_analysis.cytodifferentiation" + ] + }, + { + "title": "Surgery", + "fields": [ + "biopsy_surgical_procedures.tumor_classification", + "biopsy_surgical_procedures.procedure_type", + "biopsy_surgical_procedures.margins" + ] + }, + { + "title": "Radiation", + "fields": [ + "radiation_therapies.tumor_classification", + "radiation_therapies.energy_type", + "radiation_therapies.rt_dose" + ] + }, + { + "title": "Response", + "fields": [ + "subject_responses.tx_prior_response", + "subject_responses.response", + "subject_responses.interim_response", + "subject_responses.response_method" + ] + }, + { + "title": "SMN", + "fields": [ + "secondary_malignant_neoplasm.age_at_smn", + "secondary_malignant_neoplasm.smn_site", + "secondary_malignant_neoplasm.smn_type", + "secondary_malignant_neoplasm.smn_morph_icdo" + ] + }, + { + "title": "Imaging", + "fields": [ + "imagings.imaging_method", + "imagings.imaging_result" + ] + }, + { + "title": "Labs", + "fields": [ + "labs.lab_test", + "labs.lab_result", + "labs.lab_result_numeric", + "labs.lab_result_unit" + ] + }, + { + "title": "SCT", + "fields": [ + "stem_cell_transplants.sct_type" + ] + } + ] + }, + "projectId": "search", + "graphqlField": "subject", + "index": "", + "buttons": [ + { + "enabled": true, + "type": "export-to-pfb", + "title": "Export to PFB", + "leftIcon": "datafile", + "rightIcon": "download" + }, + { + "enabled": false, + "type": "data", + "title": "Download Data", + "leftIcon": "user", + "rightIcon": "download", + "fileName": "data.json", + "tooltipText": "You can only download data accessible to you" + } + ], + "table": { + "enabled": true, + "fields": [ + "external_references.external_links", + "consortium", + "data_contributor_id", + "subject_submitter_id", + "sex", + "race", + "ethnicity", + "survival_characteristics.lkss", + "survival_characteristics.age_at_lkss" + ] + }, + "patientIds": { + "filter": false, + "export": true + }, + "survivalAnalysis": { + "result": { + "pval": false, + "risktable": true, + "survival": true + } + }, + "guppyConfig": { + "dataType": "subject", + "nodeCountTitle": "Subjects", + "fieldMapping": [ + { + "field": "data_contributor_id", + "name": "Data Contributor", + "tooltip": "Survival analysis is not allowed for this variable. Filtering by this variable will disable survival analysis." + }, + { + "field": "studies.study_id", + "name": "Study Id", + "tooltip": "Survival analysis is not allowed for this variable. Filtering by this variable will disable survival analysis." + }, + { + "field": "studies.treatment_arm", + "name": "Treatment Arm", + "tooltip": "Survival analysis is not allowed for this variable. Filtering by this variable will disable survival analysis." + }, + { + "field": "year_at_disease_phase", + "name": "Year at Initial Diagnosis" + }, + { + "field": "survival_characteristics.lkss", + "name": "Last Known Survival Status (LKSS)" + }, + { + "field": "survival_characteristics.lkss_obfuscated", + "name": "Last Known Survival Status (LKSS)" + }, + { + "field": "medical_histories.medical_history", + "name": "Medical History" + }, + { + "field": "medical_histories.medical_history_status", + "name": "Medical History Status" + }, + { + "field": "external_references.external_resource_name", + "name": "External Resource Name" + }, + { + "field": "histologies.histology", + "name": "Histology" + }, + { + "field": "histologies.histology_grade", + "name": "Histology Grade" + }, + { + "field": "histologies.histology_inpc", + "name": "INPC Classification" + }, + { + "field": "tumor_assessments.age_at_tumor_assessment", + "name": "Age at Tumor Assessment" + }, + { + "field": "tumor_assessments.tumor_classification", + "name": "Tumor Classification" + }, + { + "field": "tumor_assessments.tumor_site", + "name": "Tumor Site" + }, + { + "field": "tumor_assessments.tumor_state", + "name": "Tumor State" + }, + { + "field": "tumor_assessments.longest_diam_dim1", + "name": "Longest Diameter Dimension 1" + }, + { + "field": "tumor_assessments.depth", + "name": "Tumor Depth" + }, + { + "field": "tumor_assessments.tumor_size", + "name": "Tumor Size" + }, + { + "field": "tumor_assessments.invasiveness", + "name": "Invasiveness" + }, + { + "field": "tumor_assessments.nodal_clinical", + "name": "Nodal Clinical" + }, + { + "field": "tumor_assessments.nodal_pathology", + "name": "Nodal Pathology" + }, + { + "field": "tumor_assessments.parameningeal_extension", + "name": "Parameningeal Extension" + }, + { + "field": "tumor_assessments.necrosis", + "name": "Necrosis" + }, + { + "field": "tumor_assessments.necrosis_pct", + "name": "Necrosis PCT" + }, + { + "field": "tumor_assessments.tumor_laterality", + "name": "Tumor Laterality" + }, + { + "field": "stagings.irs_group", + "name": "IRS Group" + }, + { + "field": "stagings.tnm_finding", + "name": "TNM Finding" + }, + { + "field": "stagings.stage_system", + "name": "Stage System" + }, + { + "field": "stagings.stage", + "name": "Stage" + }, + { + "field": "stagings.AB", + "name": "Ann Arbor AB" + }, + { + "field": "stagings.E", + "name": "Ann Arbor E" + }, + { + "field": "stagings.S", + "name": "Ann Arbor S" + }, + { + "field": "disease_characteristics.mki", + "name": "MKI" + }, + { + "field": "disease_characteristics.bulk_disease", + "name": "Bulky Disease" + }, + { + "field": "disease_characteristics.BULK_MED_MASS", + "name": "Bulky Mediastinal Mass" + }, + { + "field": "disease_characteristics.bulky_nodal_aggregate", + "name": "Bulky Nodal Aggregate" + }, + { + "field": "molecular_analysis.anaplasia", + "name": "Anaplasia" + }, + { + "field": "molecular_analysis.anaplasia_extent", + "name": "Anaplasia Extent" + }, + { + "field": "molecular_analysis.molecular_abnormality", + "name": "Molecular Abnormality" + }, + { + "field": "molecular_analysis.molecular_abnormality_result", + "name": "Molecular Abnormality Result" + }, + { + "field": "molecular_analysis.gene1", + "name": "Gene 1" + }, + { + "field": "molecular_analysis.gene2", + "name": "Gene 2" + }, + { + "field": "molecular_analysis.dna_index", + "name": "DNA Index" + }, + { + "field": "molecular_analysis.age_at_molecular_analysis", + "name": "Age at Molecular Analysis" + }, + { + "field": "molecular_analysis.mitoses", + "name": "Mitoses" + }, + { + "field": "molecular_analysis.cytodifferentiation", + "name": "Cytodifferentiation" + }, + { + "field": "biopsy_surgical_procedures.tumor_classification", + "name": "Tumor Classification" + }, + { + "field": "biopsy_surgical_procedures.procedure_type", + "name": "Procedure Type" + }, + { + "field": "biopsy_surgical_procedures.procedure_site", + "name": "Procedure Site" + }, + { + "field": "biopsy_surgical_procedures.margins", + "name": "Margins" + }, + { + "field": "radiation_therapies.tumor_classification", + "name": "Tumor Classification" + }, + { + "field": "radiation_therapies.age_at_rt_start", + "name": "Age at Radiation Therapy" + }, + { + "field": "radiation_therapies.rt_site", + "name": "Radiation Site" + }, + { + "field": "radiation_therapies.energy_type", + "name": "Energy Type" + }, + { + "field": "radiation_therapies.rt_dose", + "name": "Radiation Dose" + }, + { + "field": "radiation_therapies.rt_unit", + "name": "Radiation Unit" + }, + { + "field": "subject_responses.age_at_response", + "name": "Age at Response" + }, + { + "field": "subject_responses.tx_prior_response", + "name": "Treatment Prior Response" + }, + { + "field": "subject_responses.response", + "name": "Response" + }, + { + "field": "subject_responses.interim_response", + "name": "Interim Response" + }, + { + "field": "subject_responses.response_method", + "name": "Response Method" + }, + { + "field": "subject_responses.necrosis", + "name": "Necrosis" + }, + { + "field": "secondary_malignant_neoplasm.age_at_smn", + "name": "Age at SMN" + }, + { + "field": "secondary_malignant_neoplasm.smn_site", + "name": "SMN Site" + }, + { + "field": "secondary_malignant_neoplasm.smn_type", + "name": "SMN Type" + }, + { + "field": "secondary_malignant_neoplasm.smn_morph_icdo", + "name": "ICD-O Morphology" + }, + { + "field": "imagings.imaging_method", + "name": "Imaging Method" + }, + { + "field": "imagings.imaging_result", + "name": "Imaging Result" + }, + { + "field": "labs.lab_result_numeric", + "name": "Numeric Lab Result" + }, + { + "field": "labs.lab_result_unit", + "name": "Lab Result Unit" + }, + { + "field": "labs.lab_result", + "name": "Lab Result" + }, + { + "field": "labs.lab_test", + "name": "Lab Test" + }, + { + "field": "stem_cell_transplants.sct_type", + "name": "SCT Type" + } + ] + }, + "dataRequests": { + "enabled": false + }, + "getAccessButtonLink": "https://docs.pedscommons.org/PCDCProjectRequestForm/" + }, + { + "id": 2, + "label": "data - survival", "charts": { "sex": { "chartType": "bar", @@ -146,11 +647,17 @@ "title": "Ethnicity" } }, + "adminAppliedPreFilters": { + "consortium": { + "selectedValues": ["INSTRuCT"] + } + }, "filters": { "anchor": { "field": "disease_phase", "options": ["Initial Diagnosis", "Relapse"], - "tabs": ["Disease", "Molecular"] + "tabs": ["Disease", "Molecular"], + "tooltip": "You can describe this filter here" }, "tabs": [ { @@ -193,9 +700,19 @@ } ] }, + "projectId": "search", + "graphqlField": "subject", + "index": "", "buttons": [ { "enabled": true, + "type": "export-to-pfb", + "title": "Export to PFB", + "leftIcon": "datafile", + "rightIcon": "download" + }, + { + "enabled": false, "type": "data", "title": "Download Data", "leftIcon": "user", @@ -220,13 +737,13 @@ }, "patientIds": { "filter": false, - "export": false + "export": true }, "survivalAnalysis": { "result": { "pval": false, - "risktable": false, - "survival": false + "risktable": true, + "survival": true } }, "guppyConfig": { @@ -235,7 +752,8 @@ "fieldMapping": [ { "field": "survival_characteristics.lkss", - "name": "Last Known Survival Status (LKSS)" + "name": "Last Known Survival Status (LKSS)", + "tooltip": "test tooltip" }, { "field": "survival_characteristics.age_at_lkss", @@ -263,7 +781,7 @@ }, { "field": "tumor_assessments.longest_diam_dim1", - "name": "Longest Diam Dim 1" + "name": "Longest Diameter Dimension 1" }, { "field": "tumor_assessments.invasiveness", @@ -321,10 +839,6 @@ "field": "project_id", "name": "Data Release Version" }, - { - "field": "data_contributor_id", - "name": "Data Contributor" - }, { "field": "stagings.irs_group", "name": "IRS Group" @@ -336,9 +850,9 @@ ] }, "dataRequests": { - "enabled": true + "enabled": false }, - "getAccessButtonLink": "https://pcdc-gen3-docs.s3.amazonaws.com/%5BDRAFT%5D+PCDC-request_form.docx" + "getAccessButtonLink": "https://docs.pedscommons.org/PCDCProjectRequestForm/" } ] -} +} \ No newline at end of file diff --git a/helm/portal/templates/deployment.yaml b/helm/portal/templates/deployment.yaml index 8b775001e..dbc29f873 100644 --- a/helm/portal/templates/deployment.yaml +++ b/helm/portal/templates/deployment.yaml @@ -74,13 +74,15 @@ spec: {{- include "common.datadogEnvVar" . | nindent 12 }} {{- end }} - name: HOSTNAME - value: portal-dev.pedscommons.org + value: revproxy-service # disable npm 7's brand new update notifier to prevent Portal from stuck at starting up # see https://github.com/npm/cli/issues/3163 - name: NPM_CONFIG_UPDATE_NOTIFIER value: "false" - name: NODE_ENV value: "dev" + - name: DICTIONARY_URL + value: {{ .Values.global.dictionaryUrl }} - name: APP value: {{ .Values.global.portalApp | quote }} - name: GEN3_BUNDLE @@ -172,7 +174,7 @@ spec: # - name: BASENAME volumeMounts: - name: "config-volume" - mountPath: "/data-portal/data/config/pcdc.json" + mountPath: "/data-portal/data/config/gitops.json" subPath: "gitops.json" - name: "config-volume" mountPath: "/data-portal/custom/logo/gitops-logo.png" diff --git a/helm/portal/values.yaml b/helm/portal/values.yaml index fa1e84677..d8e81d209 100644 --- a/helm/portal/values.yaml +++ b/helm/portal/values.yaml @@ -201,264 +201,7 @@ datadogTraceSampleRate: 1 # -- (map) GitOps configuration for portal gitops: # -- (string) multiline string - gitops.json - json: | - { - "graphql": { - "boardCounts": [ - { - "graphql": "_case_count", - "name": "Case", - "plural": "Cases" - }, - { - "graphql": "_experiment_count", - "name": "Experiment", - "plural": "Experiments" - }, - { - "graphql": "_aliquot_count", - "name": "Aliquot", - "plural": "Aliquots" - } - ], - "chartCounts": [ - { - "graphql": "_case_count", - "name": "Case" - }, - { - "graphql": "_experiment_count", - "name": "Experiment" - }, - { - "graphql": "_aliquot_count", - "name": "Aliquot" - } - ], - "projectDetails": "boardCounts" - }, - "components": { - "appName": "Generic Data Commons Portal", - "index": { - "introduction": { - "heading": "Data Commons", - "text": "The Generic Data Commons supports the management, analysis and sharing of data for the research community.", - "link": "/submission" - }, - "buttons": [ - { - "name": "Define Data Field", - "icon": "data-field-define", - "body": "The Generic Data Commons define the data in a general way. Please study the dictionary before you start browsing.", - "link": "/DD", - "label": "Learn more" - }, - { - "name": "Explore Data", - "icon": "data-explore", - "body": "The Exploration Page gives you insights and a clear overview under selected factors.", - "link": "/explorer", - "label": "Explore data" - }, - { - "name": "Access Data", - "icon": "data-access", - "body": "Use our selected tool to filter out the data you need.", - "link": "/query", - "label": "Query data" - }, - { - "name": "Submit Data", - "icon": "data-submit", - "body": "Submit Data based on the dictionary.", - "link": "/submission", - "label": "Submit data" - } - ] - }, - "navigation": { - "title": "Generic Data Commons", - "items": [ - { - "icon": "dictionary", - "link": "/DD", - "color": "#a2a2a2", - "name": "Dictionary" - }, - { - "icon": "exploration", - "link": "/explorer", - "color": "#a2a2a2", - "name": "Exploration" - }, - { - "icon": "query", - "link": "/query", - "color": "#a2a2a2", - "name": "Query" - }, - { - "icon": "workspace", - "link": "/workspace", - "color": "#a2a2a2", - "name": "Workspace" - }, - { - "icon": "profile", - "link": "/identity", - "color": "#a2a2a2", - "name": "Profile" - } - ] - }, - "topBar": { - "items": [ - { - "icon": "upload", - "link": "/submission", - "name": "Submit Data" - }, - { - "link": "https://gen3.org/resources/user", - "name": "Documentation" - } - ] - }, - "login": { - "title": "Generic Data Commons", - "subTitle": "Explore, Analyze, and Share Data", - "text": "This website supports the management, analysis and sharing of human disease data for the research community and aims to advance basic understanding of the genetic basis of complex traits and accelerate discovery and development of therapies, diagnostic tests, and other technologies for diseases like cancer.", - "contact": "If you have any questions about access or the registration process, please contact ", - "email": "support@datacommons.io" - }, - "certs": {}, - "footerLogos": [ - { - "src": "/src/img/gen3.png", - "href": "https://ctds.uchicago.edu/gen3", - "alt": "Gen3 Data Commons" - }, - { - "src": "/src/img/createdby.png", - "href": "https://ctds.uchicago.edu/", - "alt": "Center for Translational Data Science at the University of Chicago" - } - ] - }, - "requiredCerts": [], - "featureFlags": { - "explorer": true, - "noIndex": true, - "analysis": false, - "discovery": false, - "discoveryUseAggMDS": false, - "studyRegistration": false - }, - "dataExplorerConfig": { - "charts": { - "project_id": { - "chartType": "count", - "title": "Projects" - }, - "case_id": { - "chartType": "count", - "title": "Cases" - }, - "gender": { - "chartType": "pie", - "title": "Gender" - }, - "race": { - "chartType": "bar", - "title": "Race" - } - }, - "filters": { - "tabs": [ - { - "title": "Case", - "fields":[ - "project_id", - "gender", - "race", - "ethnicity" - ] - } - ] - }, - "table": { - "enabled": false - }, - "dropdowns": {}, - "buttons": [], - "guppyConfig": { - "dataType": "case", - "nodeCountTitle": "Cases", - "fieldMapping": [ - { "field": "disease_type", "name": "Disease type" }, - { "field": "primary_site", "name": "Site where samples were collected"} - ], - "manifestMapping": { - "resourceIndexType": "file", - "resourceIdField": "object_id", - "referenceIdFieldInResourceIndex": "case_id", - "referenceIdFieldInDataIndex": "node_id" - }, - "accessibleFieldCheckList": ["case_id"], - "accessibleValidationField": "case_id" - } - }, - "fileExplorerConfig": { - "charts": { - "data_type": { - "chartType": "stackedBar", - "title": "File Type" - }, - "data_format": { - "chartType": "stackedBar", - "title": "File Format" - } - }, - "filters": { - "tabs": [ - { - "title": "File", - "fields": [ - "project_id", - "data_type", - "data_format" - ] - } - ] - }, - "table": { - "enabled": true, - "fields": [ - "project_id", - "file_name", - "file_size", - "object_id" - ] - }, - "dropdowns": {}, - "guppyConfig": { - "dataType": "file", - "fieldMapping": [ - { "field": "object_id", "name": "GUID" } - ], - "nodeCountTitle": "Files", - "manifestMapping": { - "resourceIndexType": "case", - "resourceIdField": "case_id", - "referenceIdFieldInResourceIndex": "object_id", - "referenceIdFieldInDataIndex": "object_id" - }, - "accessibleFieldCheckList": ["case_id"], - "accessibleValidationField": "case_id", - "downloadAccessor": "object_id" - } - } - } + json: # -- (string) - favicon in base64 favicon: "AAABAAEAICAAAAEAIACoEAAAFgAAACgAAAAgAAAAQAAAAAEAIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADQv3IAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA1MiCAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwKg0Nd6yqf+8pi7D3rKp/96yqf/esqn/3rKp/76qNMPEpU2QxbFJNwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/7WfF3cAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMWySQAAAAAA3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/TrIS0AAAAAL+nLQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACxmAIAxrhKBregGtLesqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/2MyPCLGaCwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAs5kJANqvn0vesqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/18l+GwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKuSAADq5L8H3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/z79qBca0SwAAAAAAAAAAAAAAAAAAAAAAAAAAAN6yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf+4oR3YAAAAAAAAAAAAAAAAAAAAAAAAAAC4oBlZ3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/AqC/N3rKp/96yqf+/rD3M3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf+4oyBkAAAAAAAAAAAAAAAAAAAAAN6yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf+9qDAqAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAzb1oH96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/8qoYv8AAAAAAAAAALefHQC4oB5X3rKp/96yqf/esqn/AAAAAAAAAADm3bsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOHbrAAAAAAA6ePTEd6yqf/esqn/3rKp/8CsNngAAAAAAAAAAN6yqf/esqn/3rKp/////xIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADq4bwA08V3EN6yqf/esqn/3rKp/wAAAAAAAAAA3rKp/96yqf+6nyfZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA3rKp/96yqf/esqn/AAAAALyjJDbesqn/3rKp/7ihIc0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADFpE7l3rKp/96yqf/esqn/wq0+Wd6yqf/esqn/3rKp/wAAAADPwW4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAC7pCAAAAAAAN6yqf/esqn/3rKp/8CsOVK6oyF63rKp/96yqf/esqn/uqQqxAAAAAC7oyQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAtZ8WAAAAAADesqn/3rKp/96yqf/esqn/3rKp/7ukIHresqn/3rKp/96yqf/esqn/3rKp/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA3rKp/96yqf/esqn/3rKp/96yqf/esqn/wK1BXN6yqf/esqn/3rKp/96yqf/esqn/uKAYUgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAL+oO1Hesqn/3rKp/96yqf/esqn/3rKp/76pLXq3nx023rKp/96yqf/esqn/3rKp/96yqf/esqn/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAt58l896yqf/esqn/3rKp/96yqf/esqn/3rKp/wAAAADesqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/xrRRVQAAAADYzYkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAM67agAAAAAAybZYUt6yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/9+/UXAAAAAN6yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAN6yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/wAAAACznRMAtJ4ZV96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADesqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/ArDZ4AAAAAAAAAAAAAAAA3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/yqdi/wAAAAAAAAAAAAAAAAAAAADHplZ93rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/6Ny8U+bauVDesqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf+5oyBkAAAAAAAAAAAAAAAAAAAAAAAAAADesqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/t6Ec1wAAAAAAAAAAAAAAAAAAAAAAAAAAs5sWAOHUlQfesqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/OxHUFxbRJAAAAAAAAAAAAAAAAAAAAAAAAAAAAsJkFAN6yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/29COIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAr5YBAN6yqf+7pSf43rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/uaMf+d2xp6MAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAyrhUAAAAAAC7pil73rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/7miH38AAAAAxrJDAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADi150b2K6T4N6yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/7mjI5zUxHAaAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOnftwAAAAAAAAAAAN6yqf/esqn/3rKp/7egG+e2nxf/uKAk/7mjIvPesqn/3rKp/7agGEAAAAAAAAAAANnOjAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA///////wD///gAP//gAAf/wAAD/4AAAf8AAAD+AAAAfgAAAHwA/wA8f//+OP///xj///8Y////CP///xh///4IP//8CD///Bgf//gID//wGAP/wBwB/4A8AP8APgAYAH4AAAB/AAAA/wAAAf+AAAH/8AAP//" diff --git a/helm/sheepdog/templates/deployment.yaml b/helm/sheepdog/templates/deployment.yaml index d56beb93f..b4673242c 100644 --- a/helm/sheepdog/templates/deployment.yaml +++ b/helm/sheepdog/templates/deployment.yaml @@ -205,7 +205,7 @@ spec: - name: GEN3_UWSGI_TIMEOUT value: "600" - name: DICTIONARY_URL - value: {{ include "sheepdog.dictionaryUrl" .}} + value: {{ .Values.global.dictionaryUrl }} {{- with .Values.indexdUrl }} - name: INDEX_CLIENT_HOST value: {{ . }} diff --git a/values.yaml b/values.yaml index e18a44cd7..4272c214d 100644 --- a/values.yaml +++ b/values.yaml @@ -3,12 +3,6 @@ global: hostname: localhost portalApp: pcdc dictionaryUrl: https://pcdc-dev-dictionaries.s3.amazonaws.com/pcdc-schema-dev-20230912.json - # postgres: - # master: - # host: pcdc-postgresql - # username: postgres - # password: pcdc - # port: "5432" arborist: image: @@ -36,7 +30,7 @@ aws-es-proxy: #modify gen3 chart so that elasticsearch automaticcally starts elasticsearch: - enabled: false + enabled: true fence: FENCE_CONFIG: @@ -96,14 +90,14 @@ wts: repository: quay.io/cdis/workspace-token-service tag: 2023.08 -# amanuensis: -# image: -# repository: quay.io/pcdc/amanuensis -# tag: "test" - +amanuensis: + image: + repository: quay.io/pcdc/amanuensis + tag: "pcdc_dev_2023-09-06T16_36_49-05_00" +#todo address this error EnvVar.spec.template.spec.containers.env.value guppy: - enabled: true + enabled: true image: repository: quay.io/pcdc/guppy tag: 1.5.0 From d943d7dcd9b5b9a6d4eff1fbb644f711d1a04023 Mon Sep 17 00:00:00 2001 From: Paul Murdoch Date: Fri, 29 Sep 2023 11:06:59 -0700 Subject: [PATCH 007/126] clean up --- helm/amanuensis/Chart.yaml | 9 ++++ helm/gen3/values.yaml | 2 +- values.yaml | 106 +++++++++++++++---------------------- 3 files changed, 54 insertions(+), 63 deletions(-) diff --git a/helm/amanuensis/Chart.yaml b/helm/amanuensis/Chart.yaml index e9d67f8ab..1ee0699cc 100644 --- a/helm/amanuensis/Chart.yaml +++ b/helm/amanuensis/Chart.yaml @@ -22,3 +22,12 @@ version: 0.1.0 # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. appVersion: "1.16.0" + +dependencies: +- name: common + version: 0.1.7 + repository: file://../common +- name: postgresql + version: 11.9.13 + repository: "https://charts.bitnami.com/bitnami" + condition: postgres.separate diff --git a/helm/gen3/values.yaml b/helm/gen3/values.yaml index 83daa49c5..9c51a1ae7 100644 --- a/helm/gen3/values.yaml +++ b/helm/gen3/values.yaml @@ -107,7 +107,7 @@ aws-es-proxy: awsSecretAccessKey: "" elasticsearch: - enabled: false + enabled: true fence: # -- (bool) Whether to deploy the fence subchart. diff --git a/values.yaml b/values.yaml index 4272c214d..91dbb98b8 100644 --- a/values.yaml +++ b/values.yaml @@ -9,6 +9,11 @@ arborist: repository: quay.io/pcdc/arborist tag: 2023.08 +amanuensis: + image: + repository: quay.io/pcdc/amanuensis + tag: "pcdc_dev_2023-09-06T16_36_49-05_00" + ambassador: # -- (bool) Whether to deploy the ambassador subchart. enabled: false @@ -17,7 +22,6 @@ argo-wrapper: # -- (bool) Whether to deploy the argo-wrapper subchart. enabled: false - audit: # -- (bool) Whether to deploy the audit subchart. enabled: false @@ -28,7 +32,6 @@ aws-es-proxy: repository: abutaha/aws-es-proxy tag: 0.8 -#modify gen3 chart so that elasticsearch automaticcally starts elasticsearch: enabled: true @@ -42,6 +45,39 @@ fence: repository: quay.io/pcdc/fence tag: 1.12.2 +guppy: + enabled: true + image: + repository: quay.io/pcdc/guppy + tag: 1.5.0 + # -- (int) Only relevant if tireAccessLevel is set to "regular". + # The minimum amount of files unauthorized users can filter down to + tierAccessLimit: 1000 + + secrets: + # -- (string) AWS access key. + awsAccessKeyId: "test_key" + # -- (string) AWS secret access key. + awsSecretAccessKey: "test_secrect_key" + + + + # -- (list) Elasticsearch index configurations + indices: + - index: dev_case + type: case + - index: dev_file + type: file + + # -- (string) The Elasticsearch configuration index + configIndex: dev_case-array-config + # -- (string) The field used for access control and authorization filters + authFilterField: auth_resource_path + # -- (bool) Whether or not to enable encryption for specified fields + enableEncryptWhitelist: true + # -- (string) A comma-separated list of fields to encrypt + encryptWhitelist: test1 + hatchery: enabled: false @@ -54,6 +90,11 @@ metadata: # -- (bool) Whether to deploy the metadata subchart. enabled: false +pcdcanalysistools: + image: + repository: quay.io/pcdc/pcdcanalysistools + tag: 1.8.4 + pidgin: # -- (bool) Whether to deploy the pidgin subchart. enabled: false @@ -88,63 +129,4 @@ sower: wts: image: repository: quay.io/cdis/workspace-token-service - tag: 2023.08 - -amanuensis: - image: - repository: quay.io/pcdc/amanuensis - tag: "pcdc_dev_2023-09-06T16_36_49-05_00" - -#todo address this error EnvVar.spec.template.spec.containers.env.value -guppy: - enabled: true - image: - repository: quay.io/pcdc/guppy - tag: 1.5.0 - # -- (int) Only relevant if tireAccessLevel is set to "regular". - # The minimum amount of files unauthorized users can filter down to - tierAccessLimit: 1000 - - secrets: - # -- (string) AWS access key. - awsAccessKeyId: "test_key" - # -- (string) AWS secret access key. - awsSecretAccessKey: "test_secrect_key" - - - - # -- (list) Elasticsearch index configurations - indices: - - index: dev_case - type: case - - index: dev_file - type: file - - # -- (string) The Elasticsearch configuration index - configIndex: dev_case-array-config - # -- (string) The field used for access control and authorization filters - authFilterField: auth_resource_path - # -- (bool) Whether or not to enable encryption for specified fields - enableEncryptWhitelist: true - # -- (string) A comma-separated list of fields to encrypt - encryptWhitelist: test1 - - - -pcdcanalysistools: - image: - repository: quay.io/pcdc/pcdcanalysistools - tag: 1.8.4 - -# fluentd: -# image: -# repository: fluent/fluentd-kubernetes-daemonset -# tag: v1.15.3-debian-cloudwatch-1.0 - - - - -# revproxy: -# image: -# repository: quay.io/cdis/nginx -# tag: 1.17.6-ctds-1.0.1 + tag: 2023.08 \ No newline at end of file From 0148e217ba4e2ae8d4bbb3ff86be32fea0126a14 Mon Sep 17 00:00:00 2001 From: Paul Murdoch Date: Mon, 2 Oct 2023 08:54:05 -0700 Subject: [PATCH 008/126] update revproxy tag --- values.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/values.yaml b/values.yaml index 91dbb98b8..dec3bfee1 100644 --- a/values.yaml +++ b/values.yaml @@ -116,6 +116,11 @@ portal: gitops: json: "" +revproxy: + image: + repository: quay.io/cdis/nginx + tag: 2023.09 + sheepdog: image: repository: quay.io/pcdc/sheepdog From 9f5b6985d33f1db02c746e883ea7f7e914573785 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Wed, 20 Mar 2024 16:11:03 -0700 Subject: [PATCH 009/126] add in user yaml --- helm/amanuensis/values.yaml | 18 +- helm/portal/templates/deployment.yaml | 2 +- values.yaml | 1040 ++++++++++++++++++++++++- 3 files changed, 1048 insertions(+), 12 deletions(-) diff --git a/helm/amanuensis/values.yaml b/helm/amanuensis/values.yaml index 2855ac6fc..3a916c739 100644 --- a/helm/amanuensis/values.yaml +++ b/helm/amanuensis/values.yaml @@ -282,16 +282,17 @@ AMANUENSIS_CONFIG: # Where amanuensis microservice is deployed # a standardized name unique to each app for service-to-service interaction # so the service receiving the request knows it came from another Gen3 service + SERVICE_NAME: 'amanuensis' # postgres db to connect to # connection url format: # postgresql://[user[:password]@][netloc][:port][/dbname] - # A URL-safe base64-encoded 32-byte key for encrypting keys in db # in python you can use the following script to generate one: # import base64 # import os # key = base64.urlsafe_b64encode(os.urandom(32)) # print(key) + ENCRYPTION_KEY: '' # Cross-service keys @@ -304,23 +305,22 @@ AMANUENSIS_CONFIG: # ////////////////////////////////////////////////////////////////////////////////////// # flask's debug setting # WARNING: DO NOT ENABLE IN PRODUCTION (for testing purposes only) - DEBUG: true + DEBUG: false # if true, will automatically login a user with username "test" # WARNING: DO NOT ENABLE IN PRODUCTION (for testing purposes only) - MOCK_AUTH: true + MOCK_AUTH: false # if true, will fake a successful login response from Google in /login/google # NOTE: this will also modify the behavior of /link/google endpoints # WARNING: DO NOT ENABLE IN PRODUCTION (for testing purposes only) # will login as the username set in cookie DEV_LOGIN_COOKIE_NAME - MOCK_GOOGLE_AUTH: true - + MOCK_GOOGLE_AUTH: false # if true, will ignore anything configured in STORAGE_CREDENTIALS MOCK_STORAGE: true # allow OIDC traffic on http for development. By default it requires https. # # WARNING: ONLY set to true when amanuensis will be deployed in such a way that it will # ONLY receive traffic from internal clients and can safely use HTTP. - AUTHLIB_INSECURE_TRANSPORT: true + AUTHLIB_INSECURE_TRANSPORT: false # set if you want browsers to only send cookies with requests over HTTPS SESSION_COOKIE_SECURE: true @@ -442,15 +442,15 @@ AMANUENSIS_CONFIG: # url where indexd microservice is running (for signed urls primarily) # NOTE: Leaving as null will force fence to default to {{BASE_URL}}/index # example value: 'https://example.com/index' - INDEXD: null + INDEXD: http://indexd-service # this is the username which fence uses to make authenticated requests to indexd - INDEXD_USERNAME: 'amanuensis' + INDEXD_USERNAME: 'indexd_client' # this is the password which fence uses to make authenticated requests to indexd INDEXD_PASSWORD: '' # url where authz microservice is running - ARBORIST: null + ARBORIST: 'http://arborist-service' FENCE: 'http://fence-service' # ////////////////////////////////////////////////////////////////////////////////////// diff --git a/helm/portal/templates/deployment.yaml b/helm/portal/templates/deployment.yaml index dbc29f873..43ab2f401 100644 --- a/helm/portal/templates/deployment.yaml +++ b/helm/portal/templates/deployment.yaml @@ -60,7 +60,7 @@ spec: port: 80 initialDelaySeconds: 30 periodSeconds: 60 - timeoutSeconds: 30 + timeoutSeconds: 60 resources: {{- toYaml .Values.resources | nindent 12 }} ports: diff --git a/values.yaml b/values.yaml index dec3bfee1..13930e78b 100644 --- a/values.yaml +++ b/values.yaml @@ -37,13 +37,1049 @@ elasticsearch: fence: FENCE_CONFIG: - MOCK_GOOGLE_AUTH: true + DEBUG: true + MOCK_STORAGE: true + #fill in + AMANUENSIS_PUBLIC_KEY_PATH: + ENCRYPTION_KEY: + #uncomment and add user email to fake google login + # MOCK_GOOGLE_AUTH: true OPENID_CONNECT: google: - mock_default_user: 'test@example.com' + client_id: + client_secret: + # mock_default_user: 'test@example.com' + + image: repository: quay.io/pcdc/fence tag: 1.12.2 + USER_YAML: | + cloud_providers: {} + groups: {} + authz: + # policies automatically given to anyone, even if they haven't authenticated + anonymous_policies: ['open_data_reader', 'full_open_access'] + + # policies automatically given to authenticated users (in addition to their other + # policies) + all_users_policies: ['open_data_reader', 'authn_open_access'] + + user_project_to_resource: + QA: /programs/QA + DEV: /programs/DEV + test: /programs/QA/projects/test + jenkins: /programs/jnkns/projects/jenkins + jenkins2: /programs/jnkns/projects/jenkins2 + jnkns: /programs/jnkns + + policies: + # General Access + - id: 'workspace' + description: 'be able to use workspace' + resource_paths: ['/workspace'] + role_ids: ['workspace_user'] + - id: 'dashboard' + description: 'be able to use the commons dashboard' + resource_paths: ['/dashboard'] + role_ids: ['dashboard_user'] + - id: 'prometheus' + description: 'be able to use prometheus' + resource_paths: ['/prometheus'] + role_ids: ['prometheus_user'] + - id: 'ttyadmin' + description: 'be able to use the admin tty' + resource_paths: ['/ttyadmin'] + role_ids: ['ttyadmin_user'] + - id: 'mds_admin' + description: 'be able to use metadata service' + resource_paths: ['/mds_gateway'] + role_ids: ['mds_user'] + - id: 'data_upload' + description: 'upload raw data files to S3' + role_ids: ['file_uploader'] + resource_paths: ['/data_file'] + - description: be able to use sower job + id: sower + resource_paths: [/sower] + role_ids: [sower_user] + - id: 'mariner_admin' + description: 'full access to mariner API' + resource_paths: ['/mariner'] + role_ids: ['mariner_admin'] + - id: audit_reader + role_ids: + - audit_reader + resource_paths: + - /services/audit + - id: audit_login_reader + role_ids: + - audit_reader + resource_paths: + - /services/audit/login + - id: audit_presigned_url_reader + role_ids: + - audit_reader + resource_paths: + - /services/audit/presigned_url + - id: requestor_admin + role_ids: + - requestor_admin + resource_paths: + - /programs + - id: requestor_reader + role_ids: + - requestor_reader + resource_paths: + - /programs + - id: requestor_creator + role_ids: + - requestor_creator + resource_paths: + - /programs + - id: requestor_updater + role_ids: + - requestor_updater + resource_paths: + - /programs + - id: requestor_deleter + role_ids: + - requestor_deleter + resource_paths: + - /programs + # Data Access + + # All programs policy + - id: 'all_programs_reader' + description: '' + role_ids: + - 'reader' + - 'storage_reader' + resource_paths: ['/programs'] + + # # example if need access to write to storage + # - id: 'programs.jnkns-storage_writer' + # description: '' + # role_ids: + # - 'storage_writer' + # resource_paths: ['/programs/jnkns'] + + - id: 'programs.jnkns-admin' + description: '' + role_ids: + - 'creator' + - 'reader' + - 'updater' + - 'deleter' + - 'storage_reader' + resource_paths: + - '/programs/jnkns' + - '/gen3/programs/jnkns' + + - id: 'programs.jnkns-viewer' + description: '' + role_ids: + - 'reader' + - 'storage_reader' + resource_paths: + - '/programs/jnkns' + - '/gen3/programs/jnkns' + + + - id: 'programs.QA-admin' + description: '' + role_ids: + - 'creator' + - 'reader' + - 'updater' + - 'deleter' + - 'storage_reader' + resource_paths: + - '/programs/QA' + - '/gen3/programs/QA' + + - id: 'programs.QA-admin-no-storage' + description: '' + role_ids: + - 'creator' + - 'reader' + - 'updater' + - 'deleter' + resource_paths: + - '/programs/QA' + - '/gen3/programs/QA' + + - id: 'programs.QA-viewer' + description: '' + role_ids: + - 'reader' + - 'storage_reader' + resource_paths: + - '/programs/QA' + - '/gen3/programs/QA' + + - id: 'programs.DEV-admin' + description: '' + role_ids: + - 'creator' + - 'reader' + - 'updater' + - 'deleter' + - 'storage_reader' + - 'storage_writer' + resource_paths: + - '/programs/DEV' + - '/gen3/programs/DEV' + + - id: 'programs.DEV-storage_writer' + description: '' + role_ids: + - 'storage_writer' + resource_paths: ['/programs/DEV'] + + - id: 'programs.DEV-viewer' + description: '' + role_ids: + - 'reader' + - 'storage_reader' + resource_paths: + - '/programs/DEV' + - '/gen3/programs/DEV' + + - id: 'programs.test-admin' + description: '' + role_ids: + - 'creator' + - 'reader' + - 'updater' + - 'deleter' + - 'storage_reader' + resource_paths: + - '/programs/test' + - '/gen3/programs/test' + + - id: 'programs.test-viewer' + description: '' + role_ids: + - 'reader' + - 'storage_reader' + resource_paths: + - '/programs/test' + - '/gen3/programs/test' + + - id: 'abc-admin' + description: '' + role_ids: + - 'creator' + - 'reader' + - 'updater' + - 'deleter' + - 'storage_reader' + resource_paths: + - '/abc' + + - id: 'gen3-admin' + description: '' + role_ids: + - 'creator' + - 'reader' + - 'updater' + - 'deleter' + - 'storage_reader' + resource_paths: + - '/gen3' + + - id: 'gen3-hmb-researcher' + description: '' + role_ids: + - 'creator' + - 'reader' + - 'updater' + - 'deleter' + - 'storage_reader' + resource_paths: + - '/consents/NRES' + - '/consents/GRU' + - '/consents/GRU_CC' + - '/consents/HMB' + - '/gen3' + + - id: 'abc.programs.test_program.projects.test_project1-viewer' + description: '' + role_ids: + - 'reader' + - 'storage_reader' + resource_paths: + - '/abc/programs/test_program/projects/test_project1' + + - id: 'abc.programs.test_program.projects.test_project2-viewer' + description: '' + role_ids: + - 'reader' + - 'storage_reader' + resource_paths: + - '/abc/programs/test_program/projects/test_project2' + + - id: 'abc.programs.test_program2.projects.test_project3-viewer' + description: '' + role_ids: + - 'reader' + - 'storage_reader' + resource_paths: + - '/abc/programs/test_program2/projects/test_project3' + + # Open data policies + - id: 'authn_open_access' + resource_paths: ['/programs/open/projects/authnRequired'] + description: '' + role_ids: + - 'reader' + - 'storage_reader' + - id: 'full_open_access' + resource_paths: ['/programs/open/projects/1000G'] + description: '' + role_ids: + - 'reader' + - 'storage_reader' + - id: 'open_data_reader' + description: '' + role_ids: + - 'reader' + - 'storage_reader' + resource_paths: ['/open'] + - id: 'open_data_admin' + description: '' + role_ids: + - 'creator' + - 'reader' + - 'updater' + - 'deleter' + - 'storage_writer' + - 'storage_reader' + resource_paths: ['/open'] + + # Consent Code Policies + - id: 'not-for-profit-researcher' + description: '' + role_ids: + - 'admin' + resource_paths: + - '/consents/NPU' + + - id: 'publication-required-researcher' + description: '' + role_ids: + - 'admin' + resource_paths: + - '/consents/PUB' + + - id: 'gru-researcher' + description: '' + role_ids: + - 'admin' + resource_paths: + - '/consents/NRES' + - '/consents/GRU' + + - id: 'gru-cc-researcher' + description: '' + role_ids: + - 'admin' + resource_paths: + - '/consents/NRES' + - '/consents/GRU' + - '/consents/GRU_CC' + + - id: 'hmb-researcher' + description: '' + role_ids: + - 'admin' + resource_paths: + - '/consents/NRES' + - '/consents/GRU' + - '/consents/GRU_CC' + - '/consents/HMB' + + - id: 'poa-researcher' + description: '' + role_ids: + - 'admin' + resource_paths: + - '/consents/NRES' + - '/consents/GRU' + - '/consents/GRU_CC' + - '/consents/POA' + + - id: 'ds-lung-researcher' + description: '' + role_ids: + - 'admin' + resource_paths: + - '/consents/NRES' + - '/consents/GRU' + - '/consents/GRU_CC' + - '/consents/HMB' + - '/consents/DS_LungDisease' + + - id: 'ds-chronic-obstructive-pulmonary-disease-researcher' + description: '' + role_ids: + - 'admin' + resource_paths: + - '/consents/NRES' + - '/consents/GRU' + - '/consents/GRU_CC' + - '/consents/HMB' + - '/consents/DS_ChronicObstructivePulmonaryDisease' + + - id: 'services.sheepdog-admin' + description: 'CRUD access to programs and projects' + role_ids: + - 'sheepdog_admin' + resource_paths: + - '/services/sheepdog/submission/program' + - '/services/sheepdog/submission/project' + + # indexd + - id: 'indexd_admin' + description: 'full access to indexd API' + role_ids: + - 'indexd_admin' + resource_paths: + - '/programs' + - '/services/indexd/admin' + # # TODO resource path '/' is not valid right now in arborist, trying to decide + # # how to handle all resources + # - id: 'indexd_admin' + # description: '' + # role_ids: + # - 'indexd_record_creator' + # - 'indexd_record_reader' + # - 'indexd_record_updater' + # - 'indexd_delete_record' + # - 'indexd_storage_reader' + # - 'indexd_storage_writer' + # resource_paths: ['/'] + # - id: 'indexd_record_reader' + # description: '' + # role_ids: + # - 'indexd_record_reader' + # resource_paths: ['/'] + # - id: 'indexd_record_editor' + # description: '' + # role_ids: + # - 'indexd_record_creator' + # - 'indexd_record_reader' + # - 'indexd_record_updater' + # - 'indexd_delete_record' + # resource_paths: ['/'] + # - id: 'indexd_storage_reader' + # description: '' + # role_ids: + # - 'indexd_storage_reader' + # resource_paths: ['/'] + # - id: 'indexd_storage_editor' + # description: '' + # role_ids: + # - 'indexd_storage_reader' + # - 'indexd_storage_writer' + # resource_paths: ['/'] + + # argo + - id: argo + description: be able to use argo + resource_paths: [/argo] + role_ids: [argo_user] + + #PCDC specific + - id: 'services.amanuensis-admin' + description: 'admin access to amanuensis' + role_ids: + - 'amanuensis_admin' + resource_paths: + - '/services/amanuensis' + - id: analysis + description: be able to use analysis tool service + resource_paths: + - /analysis + role_ids: + - analysis_user + - id: privacy_policy + description: User agreed on the privacy policy + resource_paths: + - /privacy + role_ids: + - reader + - id: login_no_access + role_ids: + - reader + resource_paths: + - /portal + - id: 'data_admin' + description: 'policy test, should write a policy per resource and assign to user in order to avoid duplicating policies' + role_ids: + - admin + resource_paths: + - /programs + - /programs/pcdc + resources: + # General Access + - name: 'data_file' + description: 'data files, stored in S3' + - name: 'dashboard' + description: 'commons /dashboard' + - name: 'mds_gateway' + description: 'commons /mds-admin' + - name: 'prometheus' + description: 'commons /prometheus and /grafana' + - name: 'ttyadmin' + description: 'commons /ttyadmin' + - name: 'workspace' + description: jupyter notebooks + - name: "sower" + description: 'sower resource' + - name: 'mariner' + description: 'workflow execution service' + - name: argo + #PCDC + - name: analysis + description: analysis tool service + - name: portal + description: data portal service + - name: privacy + description: User privacy policy + # OLD Data + - name: 'programs' + subresources: + #PCDC + - name: pcdc + - name: 'open' + subresources: + - name: 'projects' + subresources: + - name: '1000G' + - name: 'authnRequired' + - name: 'QA' + subresources: + - name: 'projects' + subresources: + - name: 'test' + - name: 'DEV' + subresources: + - name: 'projects' + subresources: + - name: 'test' + - name: 'jnkns' + subresources: + - name: 'projects' + subresources: + - name: 'jenkins' + - name: 'jenkins2' + - name: 'test' + subresources: + - name: 'projects' + subresources: + - name: 'test' + + # NEW Data WITH PREFIX + - name: 'gen3' + subresources: + - name: 'programs' + subresources: + - name: 'QA' + subresources: + - name: 'projects' + subresources: + - name: 'test' + - name: 'DEV' + subresources: + - name: 'projects' + subresources: + - name: 'test' + - name: 'jnkns' + subresources: + - name: 'projects' + subresources: + - name: 'jenkins' + - name: 'jenkins2' + - name: 'test' + subresources: + - name: 'projects' + subresources: + - name: 'test' + + # consents obtained from DUO and NIH + # https://github.com/EBISPOT/DUO + # https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4721915/ + - name: 'consents' + subresources: + - name: 'NRES' + description: 'no restriction' + - name: 'GRU' + description: 'general research use' + - name: 'GRU_CC' + description: 'general research use and clinical care' + - name: 'HMB' + description: 'health/medical/biomedical research' + - name: 'POA' + description: 'population origins or ancestry research' + - name: 'NMDS' + description: 'no general methods research' + - name: 'NPU' + description: 'not-for-profit use only' + - name: 'PUB' + description: 'publication required' + - name: 'DS_LungDisease' + description: 'disease-specific research for lung disease' + - name: 'DS_ChronicObstructivePulmonaryDisease' + description: 'disease-specific research for chronic obstructive pulmonary disease' + + - name: 'abc' + subresources: + - name: 'programs' + subresources: + - name: 'foo' + subresources: + - name: 'projects' + subresources: + - name: 'bar' + - name: 'test_program' + subresources: + - name: 'projects' + subresources: + - name: 'test_project1' + - name: 'test_project2' + - name: 'test_program2' + subresources: + - name: 'projects' + subresources: + - name: 'test_project3' + + + # "Sheepdog admin" resources + - name: 'services' + subresources: + - name: 'sheepdog' + subresources: + - name: 'submission' + subresources: + - name: 'program' + - name: 'project' + #PCDC + - name: 'amanuensis' + - name: 'indexd' + subresources: + - name: 'admin' + - name: 'bundles' + - name: audit + subresources: + - name: presigned_url + - name: login + + - name: 'open' + + # action/methods: + # create, read, update, delete, read-storage, write-storage, + # file_upload, access + roles: + # General Access + - id: 'file_uploader' + description: 'can upload data files' + permissions: + - id: 'file_upload' + action: + service: '*' + method: 'file_upload' + - id: 'workspace_user' + permissions: + - id: 'workspace_access' + action: + service: 'jupyterhub' + method: 'access' + - id: 'dashboard_user' + permissions: + - id: 'dashboard_access' + action: + service: 'dashboard' + method: 'access' + - id: 'mds_user' + permissions: + - id: 'mds_access' + action: + service: 'mds_gateway' + method: 'access' + - id: 'prometheus_user' + permissions: + - id: 'prometheus_access' + action: + service: 'prometheus' + method: 'access' + - id: 'ttyadmin_user' + permissions: + - id: 'ttyadmin_access' + action: + service: 'ttyadmin' + method: 'access' + - id: 'sower_user' + permissions: + - id: 'sower_access' + action: + service: 'job' + method: 'access' + - id: 'mariner_admin' + permissions: + - id: 'mariner_access' + action: + service: 'mariner' + method: 'access' + - id: 'audit_reader' + permissions: + - id: 'audit_reader_action' + action: + service: 'audit' + method: 'read' + - id: 'analysis_user' + permissions: + - action: {method: 'access', service: 'analysis'} + id: 'analysis_access' + # All services + - id: 'admin' + description: '' + permissions: + - id: 'admin' + action: + service: '*' + method: '*' + - id: 'creator' + description: '' + permissions: + - id: 'creator' + action: + service: '*' + method: 'create' + - id: 'reader' + description: '' + permissions: + - id: 'reader' + action: + service: '*' + method: 'read' + - id: 'updater' + description: '' + permissions: + - id: 'updater' + action: + service: '*' + method: 'update' + - id: 'deleter' + description: '' + permissions: + - id: 'deleter' + action: + service: '*' + method: 'delete' + - id: 'storage_writer' + description: '' + permissions: + - id: 'storage_writer' + action: + service: '*' + method: 'write-storage' + - id: 'storage_reader' + description: '' + permissions: + - id: 'storage_reader' + action: + service: '*' + method: 'read-storage' + + + # Sheepdog admin role + - id: 'sheepdog_admin' + description: 'sheepdog admin role for program project crud' + permissions: + - id: 'sheepdog_admin_action' + action: + service: 'sheepdog' + method: '*' + + + # indexd + - id: 'indexd_admin' + # this only works if indexd.arborist is enabled in manifest! + description: 'full access to indexd API' + permissions: + - id: 'indexd_admin' + action: + service: 'indexd' + method: '*' + - id: 'indexd_record_creator' + description: '' + permissions: + - id: 'indexd_record_creator' + action: + service: 'indexd' + method: 'create' + - id: 'indexd_record_reader' + description: '' + permissions: + - id: 'indexd_record_reader' + action: + service: 'indexd' + method: 'read' + - id: 'indexd_record_updater' + description: '' + permissions: + - id: 'indexd_record_updater' + action: + service: 'indexd' + method: 'update' + - id: 'indexd_delete_record' + description: '' + permissions: + - id: 'indexd_delete_record' + action: + service: 'indexd' + method: 'delete' + - id: 'indexd_storage_reader' + description: '' + permissions: + - id: 'indexd_storage_reader' + action: + service: 'indexd' + method: 'read-storage' + - id: 'indexd_storage_writer' + description: '' + permissions: + - id: 'indexd_storage_writer' + action: + service: 'indexd' + method: 'write-storage' + + # arborist + - id: 'arborist_creator' + description: '' + permissions: + - id: 'arborist_creator' + action: + service: 'arborist' + method: 'create' + - id: 'arborist_reader' + description: '' + permissions: + - id: 'arborist_reader' + action: + service: 'arborist' + method: 'read' + - id: 'arborist_updater' + description: '' + permissions: + - id: 'arborist_updater' + action: + service: 'arborist' + method: 'update' + - id: 'arborist_deleter' + description: '' + permissions: + - id: 'arborist_deleter' + action: + service: 'arborist' + method: 'delete' + + # requestor + - id: requestor_admin + permissions: + - id: requestor_admin_action + action: + service: requestor + method: '*' + - id: requestor_reader + permissions: + - id: requestor_reader_action + action: + service: requestor + method: read + - id: requestor_creator + permissions: + - id: requestor_creator_action + action: + service: requestor + method: create + - id: requestor_updater + permissions: + - id: requestor_updater_action + action: + service: requestor + method: update + - id: requestor_deleter + permissions: + - id: requestor_deleter_action + action: + service: requestor + method: delete + # argo + - id: argo_user + permissions: + - id: argo_access + action: + service: argo + method: access + #PCDC specific + #amanuensis + - id: 'amanuensis_admin' + description: 'can do admin work on project/data request' + permissions: + - id: 'amanuensis_admin_action' + action: + service: 'amanuensis' + method: '*' + clients: + basic-test-client: + policies: + - abc-admin + - gen3-admin + basic-test-abc-client: + policies: + - abc-admin + wts: + policies: + - all_programs_reader + - workspace + + users: + ### BEGIN INTERNS SECTION ### + ### END INTERNS SECTION ### + qureshi@uchicago.edu: + admin: true + policies: + - data_upload + - workspace + - dashboard + - mds_admin + - prometheus + - sower + - services.sheepdog-admin + - programs.QA-admin + - programs.test-admin + - programs.DEV-admin + - programs.jnkns-admin + - indexd_admin + - ttyadmin + projects: + - auth_id: QA + privilege: [create, read, update, delete, upload, read-storage] + - auth_id: test + privilege: [create, read, update, delete, upload, read-storage] + - auth_id: DEV + privilege: [create, read, update, delete, upload, read-storage] + - auth_id: jenkins + privilege: [create, read, update, delete, upload, read-storage] + - auth_id: jenkins2 + privilege: [create, read, update, delete, upload, read-storage] + - auth_id: jnkns + privilege: [create, read, update, delete, upload, read-storage] + pmurdoch@uchicago.edu: + admin: true + policies: + - data_upload + - workspace + - dashboard + - mds_admin + - prometheus + - sower + - services.sheepdog-admin + - services.amanuensis-admin + - programs.QA-admin + - programs.test-admin + - programs.DEV-admin + - programs.jnkns-admin + - indexd_admin + - ttyadmin + - data_admin + - analysis + - privacy_policy + - login_no_access + projects: + - auth_id: QA + privilege: [create, read, update, delete, upload, read-storage] + - auth_id: test + privilege: [create, read, update, delete, upload, read-storage] + - auth_id: DEV + privilege: [create, read, update, delete, upload, read-storage] + - auth_id: jenkins + privilege: [create, read, update, delete, upload, read-storage] + - auth_id: jenkins2 + privilege: [create, read, update, delete, upload, read-storage] + - auth_id: jnkns + privilege: [create, read, update, delete, upload, read-storage] + graglia01@gmail.com: + admin: true + policies: + - data_upload + - workspace + - dashboard + - mds_admin + - prometheus + - sower + - services.sheepdog-admin + - services.amanuensis-admin + - programs.QA-admin + - programs.test-admin + - programs.DEV-admin + - programs.jnkns-admin + - indexd_admin + - ttyadmin + - data_admin + - analysis + - privacy_policy + - login_no_access + projects: + - auth_id: QA + privilege: [create, read, update, delete, upload, read-storage] + - auth_id: test + privilege: [create, read, update, delete, upload, read-storage] + - auth_id: DEV + privilege: [create, read, update, delete, upload, read-storage] + - auth_id: jenkins + privilege: [create, read, update, delete, upload, read-storage] + - auth_id: jenkins2 + privilege: [create, read, update, delete, upload, read-storage] + - auth_id: jnkns + privilege: [create, read, update, delete, upload, read-storage] + furner.brian@gmail.com: + admin: true + policies: + - data_upload + - workspace + - dashboard + - mds_admin + - prometheus + - sower + - services.sheepdog-admin + - services.amanuensis-admin + - programs.QA-admin + - programs.test-admin + - programs.DEV-admin + - programs.jnkns-admin + - indexd_admin + - ttyadmin + - data_admin + - analysis + - privacy_policy + - login_no_access + projects: + - auth_id: QA + privilege: [create, read, update, delete, upload, read-storage] + - auth_id: test + privilege: [create, read, update, delete, upload, read-storage] + - auth_id: DEV + privilege: [create, read, update, delete, upload, read-storage] + - auth_id: jenkins + privilege: [create, read, update, delete, upload, read-storage] + - auth_id: jenkins2 + privilege: [create, read, update, delete, upload, read-storage] + - auth_id: jnkns + privilege: [create, read, update, delete, upload, read-storage] guppy: enabled: true From 871b88e54091a6c52466f74ca56ecb02351d217f Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Mon, 6 May 2024 13:55:34 -0700 Subject: [PATCH 010/126] fix gen3-helm --- .gitignore | 8 +- values.yaml => default-values.yaml | 178 ++++++++++-------- helm/amanuensis/templates/_helpers.tpl | 20 +- .../templates/amanuensis-db-migrate-job.yaml | 100 ++++++++++ helm/common/templates/_postgres_secrets.tpl | 2 +- helm/fence/values.yaml | 5 - helm/gen3/templates/global-manifest.yaml | 1 + helm/pcdcanalysistools/templates/_helpers.tpl | 20 +- helm/pcdcanalysistools/templates/service.yaml | 2 +- helm/peregrine/peregrine-secret/settings.py | 119 ++++++------ helm/peregrine/templates/deployment.yaml | 6 +- helm/peregrine/templates/peregrine-creds.yaml | 19 ++ helm/peregrine/values.yaml | 3 + .../pcdcanalysistools-service.conf | 2 +- helm/revproxy/nginx/nginx.conf | 17 +- helm/sheepdog/sheepdog-secret/settings.py | 56 ++++++ helm/sheepdog/sheepdog-secret/wsgi.py | 82 -------- helm/sheepdog/templates/deployment.yaml | 14 +- helm/sheepdog/templates/sheepdog-creds.yaml | 19 ++ helm/sheepdog/values.yaml | 8 +- pcdc_data/generate_data.sh | 23 +++ pcdc_data/load_elasticsearch.sh | 82 ++++++++ pcdc_data/load_gen3_scripts.sh | 38 ++++ pcdc_data/load_graph_db.sh | 48 +++++ pcdc_data/run_all.sh | 37 ++++ tools/clear_elasticsearch.sh | 16 ++ tools/connect_to_db.sh | 15 ++ tools/connect_to_pod.sh | 21 +++ tools/job.sh | 22 +++ tools/load_data.sh | 8 + tools/logs.sh | 33 ++++ tools/pcdc | 66 +++++++ tools/restart_pod.sh | 21 +++ tools/roll.sh | 50 +++++ 34 files changed, 905 insertions(+), 256 deletions(-) rename values.yaml => default-values.yaml (89%) create mode 100644 helm/amanuensis/templates/amanuensis-db-migrate-job.yaml create mode 100644 helm/peregrine/templates/peregrine-creds.yaml create mode 100644 helm/sheepdog/sheepdog-secret/settings.py delete mode 100644 helm/sheepdog/sheepdog-secret/wsgi.py create mode 100644 helm/sheepdog/templates/sheepdog-creds.yaml create mode 100755 pcdc_data/generate_data.sh create mode 100755 pcdc_data/load_elasticsearch.sh create mode 100755 pcdc_data/load_gen3_scripts.sh create mode 100755 pcdc_data/load_graph_db.sh create mode 100755 pcdc_data/run_all.sh create mode 100755 tools/clear_elasticsearch.sh create mode 100755 tools/connect_to_db.sh create mode 100755 tools/connect_to_pod.sh create mode 100755 tools/job.sh create mode 100755 tools/load_data.sh create mode 100755 tools/logs.sh create mode 100755 tools/pcdc create mode 100755 tools/restart_pod.sh create mode 100755 tools/roll.sh diff --git a/.gitignore b/.gitignore index df2d04fe8..42aec08b6 100644 --- a/.gitignore +++ b/.gitignore @@ -3,4 +3,10 @@ postgres.txt notes/ Chart.lock .DS_Store -_sample-*/ \ No newline at end of file +_sample-*/ +secret-values.yaml +.env +credentials.json +CA/ +temp.yaml +values.yaml diff --git a/values.yaml b/default-values.yaml similarity index 89% rename from values.yaml rename to default-values.yaml index 13930e78b..5f4e25502 100644 --- a/values.yaml +++ b/default-values.yaml @@ -3,6 +3,56 @@ global: hostname: localhost portalApp: pcdc dictionaryUrl: https://pcdc-dev-dictionaries.s3.amazonaws.com/pcdc-schema-dev-20230912.json + authz_entity_name: "subject" + tls: + cert: | + -----BEGIN CERTIFICATE----- + MIIDDTCCAfWgAwIBAgIQcMmHCSPIuchREDNi1OpQ5DANBgkqhkiG9w0BAQsFADAP + MQ0wCwYDVQQDEwRnZW4zMB4XDTI0MDMyNTIyMDgwNFoXDTI1MDMyNTIyMDgwNFow + FDESMBAGA1UEAxMJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB + CgKCAQEAoTT4Y1GJ5aTwA8emKdCiPL9RCOIALYygX9lmpQ6loSE5rS4u6bgbztSg + 9EV5mvV+oNA1g473rgwyjNlJEIQJvEO323okDjXI47j09N5a9aGhRc0bhfGlwmt/ + gE6NZUuI/u9wjIh8nLysh4rOYrN2+Us9WRfBL8wjmRZWNFRygrEMB9lHzPefEQoU + p4BcOh4oLbu8//x3Of5fwDMitL5gQtSmZFnLMbK0HD7CJWNz9DQA1n9e8rRg5KWA + N7Tu6MIaY/H/DVCL3LfswIBSF6eQ73umGHf1zhS7PLSHPxYxkEeg35KKriN+7492 + 38Ra3bi0CzTey0It+EK+zjtt1fd7XQIDAQABo2AwXjAOBgNVHQ8BAf8EBAMCBaAw + HQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHwYD + VR0jBBgwFoAUIK7MtOCIs/DygzZ1/vR3ieAwplAwDQYJKoZIhvcNAQELBQADggEB + AIWgFxpLfZ9LUc90qdiC2yHaLKyTN/LMdkUjw82ULVLYJ0ot0JJaJvO4iDFe/Ra9 + t13LUhcE+W4ChentUHvwYKO5zghf4UtiryM8/wqmcZ03xyswdVaKuk5Ov39/9sTJ + 6rfzMpf3mJZDO6JYC475TCQQ3hKAUUzOiFg41CMeqAy9vn0zgBk58IzZmruvdn43 + YH6N/ooqVTj3CnkmVkWoB4zBjDzX9DuxpYvqI3seD7qLtXK2cm2X+Pqv90UoPsB/ + XegALjODFpTbN5Scvbpb3npXEKbvR7X9+xy7BbVYD2K0FQ9+S1UTU8Rz7Dh9SDHM + Ixy5W9o6gVFhB5mxceOxKNc= + -----END CERTIFICATE----- + key: | + -----BEGIN RSA PRIVATE KEY----- + MIIEogIBAAKCAQEAoTT4Y1GJ5aTwA8emKdCiPL9RCOIALYygX9lmpQ6loSE5rS4u + 6bgbztSg9EV5mvV+oNA1g473rgwyjNlJEIQJvEO323okDjXI47j09N5a9aGhRc0b + hfGlwmt/gE6NZUuI/u9wjIh8nLysh4rOYrN2+Us9WRfBL8wjmRZWNFRygrEMB9lH + zPefEQoUp4BcOh4oLbu8//x3Of5fwDMitL5gQtSmZFnLMbK0HD7CJWNz9DQA1n9e + 8rRg5KWAN7Tu6MIaY/H/DVCL3LfswIBSF6eQ73umGHf1zhS7PLSHPxYxkEeg35KK + riN+749238Ra3bi0CzTey0It+EK+zjtt1fd7XQIDAQABAoIBAG+AhfWcZncSuHjE + 1SfDCRRfeBtexqS6ygrCAn9UPDfRFWW1tfkuwP00kQekLlKCT03H9dvDPYmDIkvN + 1v23dkxjwn3qY5n4lbT9L2FXky6m1zfCgLEKzY5GcHA85QwVTPKYhw6NMTPwRJ2T + 4uDeJQKVih9fkN4Qoua2TnXvmyzNU49ffgFMJ0Ec7vNmS7MCUtlGec1Y0xKgTflt + yqhChpG2MBxdX8tLNgSC+lgRBZSzRaP/0oGZuV3FQ7W4fuXLNN8CdhSzHbVlbK+D + CO1f6nqZ8PZKJ/7SGwB2Q05EqscNAF3tl1dsGpnLqOLpnqJ2+f/H4W6/uB2tAILd + ySaC53kCgYEAwOHrNulo7HLgSXEs4/Ta9nLPoZxpDxkNKCRENDJCsoakQ2O33aL4 + mrHdSmxGqecpoCvkifx/ZCN073ZykVIoSY70+N7f79EytGjkAzDxEAnYMKSU7SSJ + TGA+c8Juqh6uvbMuJy/ZiQE6DZsweqhxopov7xSd89RIvNaBZdXq3QcCgYEA1fWJ + VHCEeQGl6eMtSWOfiADUnmOG70xlvmpzlD18pTTCIF7V1rFaAXjJl0ldI3mASJy/ + usiHZq54bUWcvof8DjI7YJ0OS8e7pmUZK9+O9fGTLIf8TIz6qq0PfERk+SyWGdAo + Z8HQMJBKWX809KPkJ9isd62wfREHVazfljxdL3sCgYBwxKTsWdKKSy9uQMjqDcHm + zIEwD24s8YyLp4hoq+nqzmVDMQ3SevG2H78tP9ighRIFHyRiuEkSlthLGIkrBUmg + mAAJcOSkJT7r01dbtkV6BwdqiQ65Bt9u0+Yvb8GbnIy1RAj7yDH6s8jpI45YaBrn + 4hWcRgWDBN3x6ceFbmf+CQKBgA5vwNJnvSiFCfLcF0Qqaqs8kxwUzxf6aasNd7r6 + 4xwqkSap/3e7A72xrrh8hMJOAm/j07QAr9In14xX9BmPB1zV2tfMARjv4yN5Ete4 + /+ZsZGfOzSFFKey2PKM/4ihF7+LR/sfxdeCw+7NKOAKBxHVD029H0u69ZWdMgNGc + RRVdAoGAFH7huA61ylOGh/W6IMU0wvJqg9SeT53JoZTr++0MS+0sdYoRGrq4RzSZ + bXKuvqZaSrXMRB9JZ72FfpjwZhDPZtNOXJV00K4yjZIui6h+TPsDk4lnxVSPYMpP + My/zrtJTCPM+Gqa6mhYTz4fyITv7igxqyECakrCa/Ct0SVDZbSI= + -----END RSA PRIVATE KEY----- arborist: image: @@ -12,48 +62,22 @@ arborist: amanuensis: image: repository: quay.io/pcdc/amanuensis - tag: "pcdc_dev_2023-09-06T16_36_49-05_00" - -ambassador: - # -- (bool) Whether to deploy the ambassador subchart. - enabled: false - -argo-wrapper: - # -- (bool) Whether to deploy the argo-wrapper subchart. - enabled: false - -audit: - # -- (bool) Whether to deploy the audit subchart. - enabled: false - -aws-es-proxy: - enabled: true - image: - repository: abutaha/aws-es-proxy - tag: 0.8 - -elasticsearch: - enabled: true + tag: "2.16.1" fence: FENCE_CONFIG: DEBUG: true MOCK_STORAGE: true #fill in - AMANUENSIS_PUBLIC_KEY_PATH: - ENCRYPTION_KEY: - #uncomment and add user email to fake google login - # MOCK_GOOGLE_AUTH: true - OPENID_CONNECT: - google: - client_id: - client_secret: - # mock_default_user: 'test@example.com' + AMANUENSIS_PUBLIC_KEY_PATH: '/fence/keys/key/jwt_public_key.pem' + MOCK_GOOGLE_AUTH: true + mock_default_user: 'test@example.com' image: repository: quay.io/pcdc/fence - tag: 1.12.2 + tag: "helm-test" + pullPolicy: Always USER_YAML: | cloud_providers: {} groups: {} @@ -1084,71 +1108,32 @@ fence: guppy: enabled: true image: - repository: quay.io/pcdc/guppy - tag: 1.5.0 - # -- (int) Only relevant if tireAccessLevel is set to "regular". - # The minimum amount of files unauthorized users can filter down to - tierAccessLimit: 1000 - - secrets: - # -- (string) AWS access key. - awsAccessKeyId: "test_key" - # -- (string) AWS secret access key. - awsSecretAccessKey: "test_secrect_key" - - - - # -- (list) Elasticsearch index configurations - indices: - - index: dev_case - type: case - - index: dev_file - type: file - - # -- (string) The Elasticsearch configuration index - configIndex: dev_case-array-config - # -- (string) The field used for access control and authorization filters - authFilterField: auth_resource_path - # -- (bool) Whether or not to enable encryption for specified fields - enableEncryptWhitelist: true - # -- (string) A comma-separated list of fields to encrypt - encryptWhitelist: test1 - -hatchery: - enabled: false + repository: quay.io/pcdc/guppy + tag: 1.8.0 + authFilterField: "auth_resource_path" manifestservice: image: repository: quay.io/cdis/manifestservice tag: 2023.08 -metadata: - # -- (bool) Whether to deploy the metadata subchart. - enabled: false - pcdcanalysistools: image: repository: quay.io/pcdc/pcdcanalysistools tag: 1.8.4 -pidgin: - # -- (bool) Whether to deploy the pidgin subchart. - enabled: false - peregrine: image: repository: quay.io/pcdc/peregrine - tag: "helm-test" + tag: "1.3.5" portal: image: repository: quay.io/pcdc/windmill - tag: 1.19.0 - pullPolicy: Always + tag: 1.25.0 resources: requests: - cpu: 0.2 - memory: 500Mi + cpu: 1.0 gitops: json: "" @@ -1160,14 +1145,47 @@ revproxy: sheepdog: image: repository: quay.io/pcdc/sheepdog - tag: "helm-test" + tag: "1.5.6" sower: image: repository: quay.io/cdis/sower - tag: 2023.08 + tag: 2024.04 wts: image: repository: quay.io/cdis/workspace-token-service - tag: 2023.08 \ No newline at end of file + tag: 2024.04 + +######################################################################################## +# DISABLED SERVICES # +######################################################################################## + +ambassador: + # -- (bool) Whether to deploy the ambassador subchart. + enabled: false + +argo-wrapper: + # -- (bool) Whether to deploy the argo-wrapper subchart. + enabled: false + +audit: + # -- (bool) Whether to deploy the audit subchart. + enabled: false + +aws-es-proxy: + enabled: false + +metadata: + # -- (bool) Whether to deploy the metadata subchart. + enabled: false + +pidgin: + # -- (bool) Whether to deploy the pidgin subchart. + enabled: false + +indexd: + enabled: false + +hatchery: + enabled: false \ No newline at end of file diff --git a/helm/amanuensis/templates/_helpers.tpl b/helm/amanuensis/templates/_helpers.tpl index b46ed2a9f..d576d4089 100644 --- a/helm/amanuensis/templates/_helpers.tpl +++ b/helm/amanuensis/templates/_helpers.tpl @@ -34,20 +34,26 @@ Create chart name and version as used by the chart label. Common labels */}} {{- define "amanuensis.labels" -}} -helm.sh/chart: {{ include "amanuensis.chart" . }} -{{ include "amanuensis.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- if .Values.commonLabels }} + {{- with .Values.commonLabels }} + {{- toYaml . }} + {{- end }} +{{- else }} + {{- (include "common.commonLabels" .)}} {{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} {{- end }} {{/* Selector labels */}} {{- define "amanuensis.selectorLabels" -}} -app.kubernetes.io/name: {{ include "amanuensis.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} +{{- if .Values.selectorLabels }} + {{- with .Values.selectorLabels }} + {{- toYaml . }} + {{- end }} +{{- else }} + {{- (include "common.selectorLabels" .)}} +{{- end }} {{- end }} {{/* diff --git a/helm/amanuensis/templates/amanuensis-db-migrate-job.yaml b/helm/amanuensis/templates/amanuensis-db-migrate-job.yaml new file mode 100644 index 000000000..9831fce14 --- /dev/null +++ b/helm/amanuensis/templates/amanuensis-db-migrate-job.yaml @@ -0,0 +1,100 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: amanuensis-db-migrate +spec: + template: + metadata: + labels: + app: gen3job + spec: + automountServiceAccountToken: false + volumes: + - name: yaml-merge + configMap: + name: "amanuensis-yaml-merge" + optional: true + - name: config-volume + secret: + secretName: "amanuensis-config" + - name: amanuensis-volume + secret: + secretName: "amanuensis-creds" + - name: tmp-pod + emptyDir: {} + containers: + - name: amanuensis + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: PGHOST + valueFrom: + secretKeyRef: + name: amanuensis-dbcreds + key: host + optional: false + - name: PGUSER + valueFrom: + secretKeyRef: + name: amanuensis-dbcreds + key: username + optional: false + - name: PGPASSWORD + valueFrom: + secretKeyRef: + name: amanuensis-dbcreds + key: password + optional: false + - name: PGDB + valueFrom: + secretKeyRef: + name: amanuensis-dbcreds + key: database + optional: false + - name: DBREADY + valueFrom: + secretKeyRef: + name: amanuensis-dbcreds + key: dbcreated + optional: false + - name: DB + value: postgresql://$(PGUSER):$(PGPASSWORD)@$(PGHOST):5432/$(PGDB) + - name: AMANUENSIS_DB + value: postgresql://$(PGUSER):$(PGPASSWORD)@$(PGHOST):5432/$(PGDB) + - name: PYTHONPATH + value: /var/www/amanuensis + - name: AMANUENSIS_PUBLIC_CONFIG + valueFrom: + configMapKeyRef: + name: manifest-amanuensis + key: amanuensis-config-public.yaml + optional: true + volumeMounts: + - name: "config-volume" + readOnly: true + mountPath: "/var/www/amanuensis/amanuensis-config.yaml" + subPath: amanuensis-config.yaml + - name: "yaml-merge" + readOnly: true + mountPath: "/var/www/amanuensis/yaml_merge.py" + subPath: yaml_merge.py + - name: "amanuensis-volume" + readOnly: true + mountPath: "/var/www/amanuensis/creds.json" + subPath: creds.json + - mountPath: /tmp/pod + name: tmp-pod + command: ["/bin/bash"] + args: + - "-c" + - | + # echo "${AMANUENSIS_PUBLIC_CONFIG:-""}" > "/var/www/amanuensis/amanuensis-config-public.yaml" + # python /var/www/amanuensis/yaml_merge.py /var/www/amanuensis/amanuensis-config-public.yaml /var/www/amanuensis/amanuensis-config-secret.yaml > /var/www/amanuensis/amanuensis-config.yaml + cd /amanuensis + fence-create migrate + if [[ $? != 0 ]]; then + echo "WARNING: non zero exit code: $?" + fi + touch /tmp/pod/completed + restartPolicy: OnFailure \ No newline at end of file diff --git a/helm/common/templates/_postgres_secrets.tpl b/helm/common/templates/_postgres_secrets.tpl index c256b10f7..4c086af1d 100644 --- a/helm/common/templates/_postgres_secrets.tpl +++ b/helm/common/templates/_postgres_secrets.tpl @@ -17,7 +17,7 @@ */}} {{- define "gen3.service-postgres" -}} - {{- $chartName := default "" .context.Chart.Name }} + {{- $chartName := default .context.Chart.Name $.service }} {{- $valuesPostgres := get .context.Values.postgres .key }} {{- $localSecretPass := "" }} {{- $secretData := (lookup "v1" "Secret" $.context.Release.Namespace (printf "%s-%s" $chartName "dbcreds")).data }} diff --git a/helm/fence/values.yaml b/helm/fence/values.yaml index f03523464..df3e8ad50 100644 --- a/helm/fence/values.yaml +++ b/helm/fence/values.yaml @@ -284,11 +284,6 @@ env: optional: false - name: DB value: postgresql://$(PGUSER):$(PGPASSWORD)@$(PGHOST):5432/$(PGDB) - - name: INDEXD_PASSWORD - valueFrom: - secretKeyRef: - name: indexd-service-creds - key: fence - name: gen3Env valueFrom: configMapKeyRef: diff --git a/helm/gen3/templates/global-manifest.yaml b/helm/gen3/templates/global-manifest.yaml index 945088d59..87b0892c7 100644 --- a/helm/gen3/templates/global-manifest.yaml +++ b/helm/gen3/templates/global-manifest.yaml @@ -14,6 +14,7 @@ data: "netpolicy": {{ .Values.global.netPolicy | quote }} "dispatcher_job_num": {{ .Values.global.dispatcherJobNum | quote }} "dd_enabled": {{ .Values.global.ddEnabled | quote }} + "authz_entity_name": {{.Values.global.authz_entity_name}} {{- with .Values.global.origins_allow_credentials }} "origins_allow_credentials": {{ . | toJson | quote }} {{- end -}} \ No newline at end of file diff --git a/helm/pcdcanalysistools/templates/_helpers.tpl b/helm/pcdcanalysistools/templates/_helpers.tpl index e2d7ec287..6b4035d8d 100644 --- a/helm/pcdcanalysistools/templates/_helpers.tpl +++ b/helm/pcdcanalysistools/templates/_helpers.tpl @@ -34,20 +34,26 @@ Create chart name and version as used by the chart label. Common labels */}} {{- define "pcdcanalysistools.labels" -}} -helm.sh/chart: {{ include "pcdcanalysistools.chart" . }} -{{ include "pcdcanalysistools.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- if .Values.commonLabels }} + {{- with .Values.commonLabels }} + {{- toYaml . }} + {{- end }} +{{- else }} + {{- (include "common.commonLabels" .)}} {{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} {{- end }} {{/* Selector labels */}} {{- define "pcdcanalysistools.selectorLabels" -}} -app.kubernetes.io/name: {{ include "pcdcanalysistools.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} +{{- if .Values.selectorLabels }} + {{- with .Values.selectorLabels }} + {{- toYaml . }} + {{- end }} +{{- else }} + {{- (include "common.selectorLabels" .)}} +{{- end }} {{- end }} {{/* diff --git a/helm/pcdcanalysistools/templates/service.yaml b/helm/pcdcanalysistools/templates/service.yaml index 721f830df..8fff1ae27 100644 --- a/helm/pcdcanalysistools/templates/service.yaml +++ b/helm/pcdcanalysistools/templates/service.yaml @@ -1,7 +1,7 @@ apiVersion: v1 kind: Service metadata: - name: "pcdcanalysis-service" + name: "pcdcanalysistools-service" labels: {{- include "pcdcanalysistools.labels" . | nindent 4 }} spec: diff --git a/helm/peregrine/peregrine-secret/settings.py b/helm/peregrine/peregrine-secret/settings.py index 1a623a907..bfa26abb8 100644 --- a/helm/peregrine/peregrine-secret/settings.py +++ b/helm/peregrine/peregrine-secret/settings.py @@ -1,87 +1,84 @@ -##################################################### -# DO NOT CHANGE THIS FILE # -# config updates should be done in the service code # -##################################################### - from peregrine.api import app, app_init from os import environ -# import config_helper +import confighelper + +APP_NAME = "peregrine" + + +def load_json(file_name): + return confighelper.load_json(file_name, APP_NAME) -APP_NAME='peregrine' -# def load_json(file_name): -# return config_helper.load_json(file_name, APP_NAME) -# conf_data = load_json('creds.json') +conf_data = load_json("creds.json") config = app.config -# config["AUTH"] = 'https://auth.service.consul:5000/v3/' -# config["AUTH_ADMIN_CREDS"] = None -# config["INTERNAL_AUTH"] = None +config["AUTH"] = "https://auth.service.consul:5000/v3/" +config["AUTH_ADMIN_CREDS"] = None +config["INTERNAL_AUTH"] = None # ARBORIST deprecated, replaced by ARBORIST_URL # ARBORIST_URL is initialized in app_init() directly -# config["ARBORIST"] = "http://arborist-service/" +config["ARBORIST"] = "http://arborist-service/" -config['INDEX_CLIENT'] = { - 'host': environ.get('INDEX_CLIENT_HOST') or 'http://indexd-service', - 'version': 'v0', - 'auth': ('gdcapi', environ.get( "PGHOST") ), +# Signpost: deprecated, replaced by index client. +config["SIGNPOST"] = { + "host": environ.get("SIGNPOST_HOST") or "http://indexd-service", + "version": "v0", + "auth": ("gdcapi", conf_data.get("indexd_password", "{{indexd_password}}")), } -# config["FAKE_AUTH"] = environ.get( "FAKE_AUTH", False) +config["INDEX_CLIENT"] = { + "host": environ.get("INDEX_CLIENT_HOST") or "http://indexd-service", + "version": "v0", + "auth": ("gdcapi", conf_data.get("indexd_password", "{{indexd_password}}")), +} +config["FAKE_AUTH"] = False config["PSQLGRAPH"] = { - 'host': environ.get( "PGHOST"), - 'user': environ.get( "PGUSER"), - 'password': environ.get( "PGPASSWORD"), - 'database': environ.get( "PGDB"), + "host": environ.get("PGHOST"), + "user": environ.get("PGUSER"), + "password": environ.get("PGPASSWORD"), + "database": environ.get("PGDB"), } -config['HMAC_ENCRYPTION_KEY'] = environ.get( "HMAC_ENCRYPTION_KEY") -config['FLASK_SECRET_KEY'] = environ.get( "FLASK_SECRET_KEY") - -fence_username = environ.get( "FENCE_DB_USER") -fence_password = environ.get( "FENCE_DB_PASS") -fence_host = environ.get( "FENCE_DB_HOST") -fence_database = environ.get( "FENCE_DB_DBNAME") -config['PSQL_USER_DB_CONNECTION'] = 'postgresql://%s:%s@%s:5432/%s' % (fence_username, fence_password, fence_host, fence_database) +config["HMAC_ENCRYPTION_KEY"] = conf_data.get("hmac_key", "{{hmac_key}}") +config["FLASK_SECRET_KEY"] = conf_data.get("gdcapi_secret_key", "{{gdcapi_secret_key}}") +config['PSQL_USER_DB_CONNECTION'] = 'postgresql://%s:%s@%s:5432/%s' % (environ.get("FENCE_DB_USER"), environ.get("FENCE_DB_PASS"), environ.get("FENCE_DB_HOST"), environ.get("FENCE_DB_DBNAME")) -config['DICTIONARY_URL'] = environ.get('DICTIONARY_URL','https://s3.amazonaws.com/dictionary-artifacts/datadictionary/develop/schema.json') +config["DICTIONARY_URL"] = environ.get( + "DICTIONARY_URL", + "https://s3.amazonaws.com/dictionary-artifacts/datadictionary/develop/schema.json", +) -# config['SUBMISSION'] = { -# 'bucket': conf_data.get( 'bagit_bucket', '' ) -# } +config["SUBMISSION"] = {"bucket": conf_data.get("bagit_bucket", "{{bagit_bucket}}")} -# config['STORAGE'] = { -# "s3": -# { -# "access_key": conf_data.get( 's3_access', '' ), -# 'secret_key': conf_data.get( 's3_secret', '' ) -# } -# } +config["STORAGE"] = { + "s3": { + "access_key": conf_data.get("s3_access", "{{s3_access}}"), + "secret_key": conf_data.get("s3_secret", "{{s3_secret}}"), + } +} -hostname = environ.get("CONF_HOSTNAME") -config['OIDC_ISSUER'] = 'https://%s/user' % hostname +config["OIDC_ISSUER"] = "https://%s/user" % conf_data["hostname"] -config['OAUTH2'] = { - 'client_id': "conf_data.get('oauth2_client_id', '{{oauth2_client_id}}')", - 'client_secret': "conf_data.get('oauth2_client_secret', '{{oauth2_client_secret}}')", - 'api_base_url': 'https://%s/user/' % hostname, - 'authorize_url': 'https://%s/user/oauth2/authorize' % hostname, - 'access_token_url': 'https://%s/user/oauth2/token' % hostname, - 'refresh_token_url': 'https://%s/user/oauth2/token' % hostname, - 'client_kwargs': { - 'redirect_uri': 'https://%s/api/v0/oauth2/authorize' % hostname, - 'scope': 'openid data user', +config["OAUTH2"] = { + "client_id": conf_data.get("oauth2_client_id", "{{oauth2_client_id}}"), + "client_secret": conf_data.get("oauth2_client_secret", "{{oauth2_client_secret}}"), + "api_base_url": "https://%s/user/" % conf_data["hostname"], + "authorize_url": "https://%s/user/oauth2/authorize" % conf_data["hostname"], + "access_token_url": "https://%s/user/oauth2/token" % conf_data["hostname"], + "refresh_token_url": "https://%s/user/oauth2/token" % conf_data["hostname"], + "client_kwargs": { + "redirect_uri": "https://%s/api/v0/oauth2/authorize" % conf_data["hostname"], + "scope": "openid data user", }, # deprecated key values, should be removed after all commons use new oidc - 'internal_oauth_provider': 'http://fence-service/oauth2/', - 'oauth_provider': 'https://%s/user/oauth2/' % hostname, - 'redirect_uri': 'https://%s/api/v0/oauth2/authorize' % hostname + "internal_oauth_provider": "http://fence-service/oauth2/", + "oauth_provider": "https://%s/user/oauth2/" % conf_data["hostname"], + "redirect_uri": "https://%s/api/v0/oauth2/authorize" % conf_data["hostname"], } -config['USER_API'] = environ.get('FENCE_URL') or 'http://fence-service/' +config["USER_API"] = "http://fence-service/" # for use by authutils # use the USER_API URL instead of the public issuer URL to accquire JWT keys -config['FORCE_ISSUER'] = True -print(config) +config["FORCE_ISSUER"] = True app_init(app) application = app -application.debug = (environ.get('GEN3_DEBUG') == "True") +application.debug = environ.get("GEN3_DEBUG") == "True" \ No newline at end of file diff --git a/helm/peregrine/templates/deployment.yaml b/helm/peregrine/templates/deployment.yaml index 561df45c5..0daf039b8 100644 --- a/helm/peregrine/templates/deployment.yaml +++ b/helm/peregrine/templates/deployment.yaml @@ -150,8 +150,12 @@ spec: volumeMounts: - name: "config-volume" readOnly: true - mountPath: "/var/www/peregrine/wsgi.py" + mountPath: "/var/www/peregrine/settings.py" subPath: "settings.py" + - name: "peregrine-volume" + readOnly: true + mountPath: "/var/www/peregrine/creds.json" + subPath: "creds.json" ports: - name: http containerPort: 80 diff --git a/helm/peregrine/templates/peregrine-creds.yaml b/helm/peregrine/templates/peregrine-creds.yaml new file mode 100644 index 000000000..14d898ac0 --- /dev/null +++ b/helm/peregrine/templates/peregrine-creds.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Secret +metadata: + name: peregrine-creds +type: Opaque +stringData: + creds.json: |- + { + "db_host": "{{ include "gen3.service-postgres" (dict "key" "host" "service" "sheepdog" "context" $) }}", + "db_username": "{{include "gen3.service-postgres" (dict "key" "username" "service" "sheepdog" "context" $) }}", + "db_password": "{{include "gen3.service-postgres" (dict "key" "password" "service" "sheepdog" "context" $) }}", + "db_database": "{{ include "gen3.service-postgres" (dict "key" "database" "service" "sheepdog" "context" $)}}", + "hostname": "{{ .Values.global.hostname }}", + "indexd_password": "", + "fence_username": "{{include "gen3.service-postgres" (dict "key" "username" "service" "fence" "context" $) }}", + "fence_password": "{{include "gen3.service-postgres" (dict "key" "password" "service" "fence" "context" $) }}", + "fence_host": "{{ include "gen3.service-postgres" (dict "key" "host" "service" "fence" "context" $) }}", + "fence_database": "{{ include "gen3.service-postgres" (dict "key" "database" "service" "fence" "context" $)}}" + } diff --git a/helm/peregrine/values.yaml b/helm/peregrine/values.yaml index 3335f8e3a..39e5d4e34 100644 --- a/helm/peregrine/values.yaml +++ b/helm/peregrine/values.yaml @@ -182,6 +182,9 @@ volumes: - name: config-volume secret: secretName: "peregrine-secret" +- name: peregrine-volume + secret: + secretName: "peregrine-creds" # -- (list) Volumes to mount to the container. volumeMounts: diff --git a/helm/revproxy/gen3.nginx.conf/pcdcanalysistools-service.conf b/helm/revproxy/gen3.nginx.conf/pcdcanalysistools-service.conf index b170da423..1244faf21 100644 --- a/helm/revproxy/gen3.nginx.conf/pcdcanalysistools-service.conf +++ b/helm/revproxy/gen3.nginx.conf/pcdcanalysistools-service.conf @@ -20,7 +20,7 @@ location /analysis/ { send_timeout 300; set $proxy_service "pcdcanalysistools"; - set $upstream http://pcdcanalysis-service.default.svc.cluster.local; + set $upstream http://pcdcanalysistools-service.default.svc.cluster.local; rewrite ^/analysis/(.*) /$1 break; proxy_pass $upstream; } \ No newline at end of file diff --git a/helm/revproxy/nginx/nginx.conf b/helm/revproxy/nginx/nginx.conf index d2be4bd36..787ed30b8 100644 --- a/helm/revproxy/nginx/nginx.conf +++ b/helm/revproxy/nginx/nginx.conf @@ -159,6 +159,21 @@ http { } } + server { + listen 9200; + + location / { + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + set $proxy_service "gen3-elasticsearch-master"; + set $upstream "http://gen3-elasticsearch-master.$namespace.svc.cluster.local"; # Updated this line + proxy_pass $upstream; + } + } + server { listen 80; @@ -334,4 +349,4 @@ http { return 302 https://$host/no-workspace-access; } } -} +} \ No newline at end of file diff --git a/helm/sheepdog/sheepdog-secret/settings.py b/helm/sheepdog/sheepdog-secret/settings.py new file mode 100644 index 000000000..4d1465429 --- /dev/null +++ b/helm/sheepdog/sheepdog-secret/settings.py @@ -0,0 +1,56 @@ +from sheepdog.api import app, app_init +from os import environ +import confighelper + +APP_NAME = "sheepdog" + + +def load_json(file_name): + return confighelper.load_json(file_name, APP_NAME) + + +conf_data = load_json("creds.json") +config = app.config + +config["AUTH"] = "https://auth.service.consul:5000/v3/" +config["AUTH_ADMIN_CREDS"] = None +config["INTERNAL_AUTH"] = None + +# ARBORIST deprecated, replaced by ARBORIST_URL +# ARBORIST_URL is initialized in app_init() directly +config["ARBORIST"] = "http://arborist-service/" + +# Signpost: deprecated, replaced by index client. +config["SIGNPOST"] = { + "host": environ.get("SIGNPOST_HOST") or "http://indexd-service", + "version": "v0", + "auth": ("gdcapi", conf_data.get("indexd_password", "{{indexd_password}}")), +} +config["INDEX_CLIENT"] = { + "host": environ.get("INDEX_CLIENT_HOST") or "http://indexd-service", + "version": "v0", + "auth": ("gdcapi", conf_data.get("indexd_password", "{{indexd_password}}")), +} +config["FAKE_AUTH"] = False +config["PSQLGRAPH"] = { + "host": environ.get("PGHOST"), + "user": environ.get("PGUSER"), + "password": environ.get("PGPASSWORD"), + "database": environ.get("PGDB"), +} + +config["FLASK_SECRET_KEY"] = conf_data.get("gdcapi_secret_key", "{{gdcapi_secret_key}}") +config['PSQL_USER_DB_CONNECTION'] = 'postgresql://%s:%s@%s:5432/%s' % (environ.get("FENCE_DB_USER"), environ.get("FENCE_DB_PASS"), environ.get("FENCE_DB_HOST"), environ.get("FENCE_DB_DBNAME")) + +config["BASE_URL"] = "https://%s/user" % conf_data["hostname"] # for use by authutils remove when authutils gets updated +config["USER_API"] = "http://fence-service/" # for use by authutils og: "https://%s/user" % conf_data["hostname"] +# use the USER_API URL instead of the public issuer URL to accquire JWT keys +config["FORCE_ISSUER"] = True +config["DICTIONARY_URL"] = environ.get( + "DICTIONARY_URL", + "https://s3.amazonaws.com/dictionary-artifacts/datadictionary/develop/schema.json", +) + +app_init(app) +application = app +application.debug = environ.get("GEN3_DEBUG") == "True" \ No newline at end of file diff --git a/helm/sheepdog/sheepdog-secret/wsgi.py b/helm/sheepdog/sheepdog-secret/wsgi.py deleted file mode 100644 index 2818d1699..000000000 --- a/helm/sheepdog/sheepdog-secret/wsgi.py +++ /dev/null @@ -1,82 +0,0 @@ -##################################################### -# DO NOT CHANGE THIS FILE # -# config updates should be done in the service code # -##################################################### - -from sheepdog.api import app, app_init -from os import environ -# import config_helper - -APP_NAME='sheepdog' -# def load_json(file_name): -# return config_helper.load_json(file_name, APP_NAME) - -# conf_data = load_json('creds.json') -config = app.config - - -config['INDEX_CLIENT'] = { - 'host': environ.get('INDEX_CLIENT_HOST') or 'http://indexd-service', - 'version': 'v0', - 'auth': (environ.get( "INDEXD_USER", 'sheepdog'), environ.get( "INDEXD_PASS") ), -} - -config["PSQLGRAPH"] = { - 'host': environ.get( "PGHOST"), - 'user': environ.get( "PGUSER"), - 'password': environ.get( "PGPASSWORD"), - 'database': environ.get( "PGDB"), -} - -config['HMAC_ENCRYPTION_KEY'] = environ.get( "HMAC_ENCRYPTION_KEY") -config['FLASK_SECRET_KEY'] = environ.get( "FLASK_SECRET_KEY") - -fence_username = environ.get( "FENCE_DB_USER") -fence_password = environ.get( "FENCE_DB_PASS") -fence_host = environ.get( "FENCE_DB_HOST") -fence_database = environ.get( "FENCE_DB_DBNAME") -config['PSQL_USER_DB_CONNECTION'] = 'postgresql://%s:%s@%s:5432/%s' % (fence_username, fence_password, fence_host, fence_database) - -config['DICTIONARY_URL'] = environ.get('DICTIONARY_URL','https://s3.amazonaws.com/dictionary-artifacts/datadictionary/develop/schema.json') - - -# config['SUBMISSION'] = { -# 'bucket': conf_data.get( 'bagit_bucket', '{{bagit_bucket}}' ) -# } - -# config['STORAGE'] = { -# "s3": -# { -# "access_key": conf_data.get( 's3_access', '{{s3_access}}' ), -# 'secret_key': conf_data.get( 's3_secret', '{{s3_secret}}' ) -# } -# } - -hostname = environ.get("CONF_HOSTNAME", "localhost") - -config['OIDC_ISSUER'] = 'https://%s/user' % hostname - -config['OAUTH2'] = { - 'client_id': "conf_data.get('oauth2_client_id', '{{oauth2_client_id}}')", - 'client_secret': "conf_data.get('oauth2_client_secret', '{{oauth2_client_secret}}')", - 'api_base_url': 'https://%s/user/' % hostname, - 'authorize_url': 'https://%s/user/oauth2/authorize' % hostname, - 'access_token_url': 'https://%s/user/oauth2/token' % hostname, - 'refresh_token_url': 'https://%s/user/oauth2/token' % hostname, - 'client_kwargs': { - 'redirect_uri': 'https://%s/api/v0/oauth2/authorize' % hostname, - 'scope': 'openid data user', - }, - # deprecated key values, should be removed after all commons use new oidc - 'internal_oauth_provider': 'http://fence-service/oauth2/', - 'oauth_provider': 'https://%s/user/oauth2/' % hostname, - 'redirect_uri': 'https://%s/api/v0/oauth2/authorize' % hostname -} - -config['USER_API'] = environ.get('FENCE_URL') or 'http://fence-service/' -# use the USER_API URL instead of the public issuer URL to accquire JWT keys -config['FORCE_ISSUER'] = True -print(config) -app_init(app) -application = app -application.debug = (environ.get('GEN3_DEBUG') == "True") diff --git a/helm/sheepdog/templates/deployment.yaml b/helm/sheepdog/templates/deployment.yaml index b4673242c..75e62e656 100644 --- a/helm/sheepdog/templates/deployment.yaml +++ b/helm/sheepdog/templates/deployment.yaml @@ -41,9 +41,12 @@ spec: {{- end }} automountServiceAccountToken: {{ .Values.automountServiceAccountToken }} volumes: + - name: sheepdog-volume + secret: + secretName: "sheepdog-creds" - name: config-volume secret: - secretName: "sheepdog-secret" + secretName: sheepdog-secret - name: config-helper configMap: name: config-helper @@ -196,12 +199,11 @@ spec: name: manifest-global key: public_datasets optional: true - - name: INDEXD_PASS + - name: AUTHZ_ENTITY_NAME valueFrom: - secretKeyRef: - name: indexd-service-creds - key: sheepdog - optional: false + configMapKeyRef: + name: manifest-global + key: authz_entity_name - name: GEN3_UWSGI_TIMEOUT value: "600" - name: DICTIONARY_URL diff --git a/helm/sheepdog/templates/sheepdog-creds.yaml b/helm/sheepdog/templates/sheepdog-creds.yaml new file mode 100644 index 000000000..736e6db56 --- /dev/null +++ b/helm/sheepdog/templates/sheepdog-creds.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Secret +metadata: + name: sheepdog-creds +type: Opaque +stringData: + creds.json: |- + { + "db_host": "{{ include "gen3.service-postgres" (dict "key" "host" "service" $.Chart.Name "context" $) }}", + "db_username": "{{include "gen3.service-postgres" (dict "key" "username" "service" $.Chart.Name "context" $) }}", + "db_password": "{{include "gen3.service-postgres" (dict "key" "password" "service" $.Chart.Name "context" $) }}", + "db_database": "{{ include "gen3.service-postgres" (dict "key" "database" "service" $.Chart.Name "context" $)}}", + "hostname": "{{ .Values.global.hostname }}", + "indexd_password": "", + "fence_username": "{{include "gen3.service-postgres" (dict "key" "username" "service" "fence" "context" $) }}", + "fence_password": "{{include "gen3.service-postgres" (dict "key" "password" "service" "fence" "context" $) }}", + "fence_host": "{{ include "gen3.service-postgres" (dict "key" "host" "service" "fence" "context" $) }}", + "fence_database": "{{ include "gen3.service-postgres" (dict "key" "database" "service" "fence" "context" $)}}" + } diff --git a/helm/sheepdog/values.yaml b/helm/sheepdog/values.yaml index 941c33c61..b53dca0d5 100644 --- a/helm/sheepdog/values.yaml +++ b/helm/sheepdog/values.yaml @@ -171,8 +171,12 @@ authNamespace: default volumeMounts: - name: "config-volume" readOnly: true - mountPath: "/var/www/sheepdog/wsgi.py" - subPath: "wsgi.py" + mountPath: "/var/www/sheepdog/settings.py" + subPath: "settings.py" + - name: "sheepdog-volume" + readOnly: true + mountPath: "/var/www/sheepdog/creds.json" + subPath: "creds.json" # -- (map) Resource requests and limits for the containers in the pod resources: diff --git a/pcdc_data/generate_data.sh b/pcdc_data/generate_data.sh new file mode 100755 index 000000000..d48f85752 --- /dev/null +++ b/pcdc_data/generate_data.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +# Define the file path +generate_file="generate.sh" + +cd ./gen3_scripts/gen3_load + +# Check if the file exists +if [ ! -f "$generate_file" ]; then + echo "Error: $generate_file not found." + exit 1 +fi + +chmod +x ./generate.sh + +# Use sed to replace the line +sed -i '' 's/GEN3_SCRIPTS_REPO_BRANCH="origin\/pcdc_dev"/GEN3_SCRIPTS_REPO_BRANCH="origin\/pyyaml-patch"/' "$generate_file" + +echo "data-simulator branch changed to pyyaml-patch change when PR is completed" + +./generate.sh + +cd ../.. \ No newline at end of file diff --git a/pcdc_data/load_elasticsearch.sh b/pcdc_data/load_elasticsearch.sh new file mode 100755 index 000000000..16ff71b65 --- /dev/null +++ b/pcdc_data/load_elasticsearch.sh @@ -0,0 +1,82 @@ +#!/bin/bash + +pcdc clear_elasticsearch + +cd ./gen3_scripts/es_etl_patch + +rm -rf env/ + +# Check if the 'env' directory exists +if [ ! -d "env" ]; then + # If 'env' directory doesn't exist, create a virtual environment + echo "Creating virtual environment..." + python -m venv env +else + echo "Virtual environment 'env' already exists." +fi + +source env/bin/activate + +pip install pyyaml==5.3.1 +pip install python-dotenv +pip install gen3==4.5.0 + +pip install -r requirements.txt + +curr_dir=$(pwd) +auth_file_path="$curr_dir/env/lib/python3.9/site-packages/gen3/auth.py" +submission_file_path="$curr_dir/env/lib/python3.9/site-packages/gen3/submission.py" + +if [ ! -f "$auth_file_path" ]; then + echo "Error: File not found: $auth_file_path" + exit 1 +fi + +if [ ! -f "$submission_file_path" ]; then + echo "Error: File not found: $submission_file_path" + exit 1 +fi +# Find the line number where the text to be replaced is located +line_number=$(grep -n "resp = requests.post(auth_url, json=api_key)" "$auth_file_path" | cut -d ":" -f 1) + +# Edit the specified line +sed -i "" "${line_number}s/resp = requests.post(auth_url, json=api_key)/resp = requests.post(auth_url, json=api_key, verify=False)/" "$auth_file_path" + +echo "AUTH file edited successfully." + +sed -i "" -E 's/(requests\..*)\)/\1, verify=False)/' "$submission_file_path" + +echo "submission file edited successfully." + +cd etl + +python etl.py et + +#update the env variable to the mapping +latest_file=$(ls -1 ../files/nested_mapping_*.json | sort -r | head -n 1) +cp $latest_file ../files/nested_mapping.json + +python etl.py l + +cd ../../../.. + +INDEX=$(grep 'INDEX_NAME' .env | cut -d '=' -f 2-) +INDEX=$(echo "$INDEX" | sed "s/'//g") + + +cat << EOF > temp.yaml +guppy: + indices: + - index: "$INDEX" + type: "subject" + configIndex: "$INDEX-array-config" + authFilterField: "auth_resource_path" +EOF + +yq eval '. * load("./temp.yaml")' secret-values.yaml > updated-secret-values.yaml && mv updated-secret-values.yaml secret-values.yaml + + +pcdc roll revproxy guppy + + +rm ./temp.yaml \ No newline at end of file diff --git a/pcdc_data/load_gen3_scripts.sh b/pcdc_data/load_gen3_scripts.sh new file mode 100755 index 000000000..9485accc4 --- /dev/null +++ b/pcdc_data/load_gen3_scripts.sh @@ -0,0 +1,38 @@ +GEN3_SCRIPTS_REPO="https://github.com/chicagopcdc/gen3_scripts.git" +GEN3_SCRIPTS_REPO_BRANCH="origin/gen3-helm" + + +#------------------------------------------------------ +# Clean up +#------------------------------------------------------ +rm -rf ./gen3_scripts +echo "removed old folder" + +#------------------------------------------------------ +# Clone or Update chicagopcdc/data-simulator repo +#------------------------------------------------------ +echo "Clone or Update chicagopcdc/gen3-scripts repo from github" + +# Does the repo exist? If not, go get it! +if [ ! -d "./gen3_scripts" ]; then + git clone $GEN3_SCRIPTS_REPO + + cd ./gen3_scripts + + git checkout -t $GEN3_SCRIPTS_REPO_BRANCH + git pull + + cd .. +fi + +#here I will set the defaults for the .env file if left blank + + + +#load in files to gen3_load +cp ../.env ./gen3_scripts/gen3_load +cp ../credentials.json ./gen3_scripts/gen3_load + +#load in files to es_etl_patch +cp ../.env ./gen3_scripts/es_etl_patch +cp ../credentials.json ./gen3_scripts/es_etl_patch \ No newline at end of file diff --git a/pcdc_data/load_graph_db.sh b/pcdc_data/load_graph_db.sh new file mode 100755 index 000000000..8b3f7663f --- /dev/null +++ b/pcdc_data/load_graph_db.sh @@ -0,0 +1,48 @@ +#!/bin/bash + +cd ./gen3_scripts/gen3_load + +rm -rf env/ + +# Check if the 'env' directory exists +if [ ! -d "env" ]; then + # If 'env' directory doesn't exist, create a virtual environment + echo "Creating virtual environment..." + python -m venv env +else + echo "Virtual environment 'env' already exists." +fi + +source env/bin/activate + +pip install -r requirements.txt + +curr_dir=$(pwd) +auth_file_path="$curr_dir/env/lib/python3.9/site-packages/gen3/auth.py" +submission_file_path="$curr_dir/env/lib/python3.9/site-packages/gen3/submission.py" + +if [ ! -f "$auth_file_path" ]; then + echo "Error: File not found: $auth_file_path" + exit 1 +fi + +if [ ! -f "$submission_file_path" ]; then + echo "Error: File not found: $submission_file_path" + exit 1 +fi +# Find the line number where the text to be replaced is located +line_number=$(grep -n "resp = requests.post(auth_url, json=api_key)" "$auth_file_path" | cut -d ":" -f 1) + +# Edit the specified line +sed -i "" "${line_number}s/resp = requests.post(auth_url, json=api_key)/resp = requests.post(auth_url, json=api_key, verify=False)/" "$auth_file_path" + +echo "AUTH file edited successfully." + + +sed -i "" -E '/requests\./ s/^(.*\()(.*)(\)$)/\1\2, verify=False\3/' "$submission_file_path" + +echo "submission file edited successfully." + +cd ./operations + +python etl.py load \ No newline at end of file diff --git a/pcdc_data/run_all.sh b/pcdc_data/run_all.sh new file mode 100755 index 000000000..db3dbd959 --- /dev/null +++ b/pcdc_data/run_all.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +ENV_FILE="../.env" + +# Check if .env file exists +if [ ! -f "$ENV_FILE" ]; then + touch "$ENV_FILE" + + # Get today's date in the format YYYYMMDD + DATE=$(date +"%Y%m%d") + + # Populate .env file with variables + echo "DICTIONARY_URL='https://pcdc-dev-dictionaries.s3.amazonaws.com/pcdc-schema-dev-20230912.json'" > "$ENV_FILE" + echo "PROGRAM_NAME='pcdc'" >> "$ENV_FILE" + echo "PROJECT_CODE='$DATE'" >> "$ENV_FILE" + echo "SAMPLE=400" >> "$ENV_FILE" + echo "BASE_URL=''" >> "$ENV_FILE" + echo "LOCAL_FILE_PATH='../fake_data/data-simulator'" >> "$ENV_FILE" + echo "FILE_TYPE='json'" >> "$ENV_FILE" + echo "TYPES=[\"program\", \"adverse_event\", \"biopsy_surgical_procedure\", \"biospecimen\", \"cellular_immunotherapy\", \"concomitant_medication\", \"core_metadata_collection\", \"cytology\", \"disease_characteristic\", \"external_reference\", \"family_medical_history\", \"function_test\", \"growing_teratoma_syndrome\", \"histology\", \"imaging\", \"immunohistochemistry\", \"lab\", \"late_effect\", \"lesion_characteristic\", \"medical_history\", \"minimal_residual_disease\", \"molecular_analysis\", \"myeloid_sarcoma_involvement\", \"non_protocol_therapy\", \"off_protocol_therapy_study\", \"patient_reported_outcomes_metadata\", \"person\", \"project\", \"protocol_treatment_modification\", \"radiation_therapy\", \"secondary_malignant_neoplasm\", \"staging\", \"stem_cell_transplant\", \"study\", \"subject\", \"subject_response\", \"survival_characteristic\", \"timing\", \"total_dose\", \"transfusion_medicine_procedure\", \"tumor_assessment\", \"vital\"]" >> "$ENV_FILE" + echo "REQUESTS_CA_BUNDLE='/Users/pmurdoch/Documents/PCDC/gen3-helm/CA/ca.pem'" >> "$ENV_FILE" + echo "PROJECT_LIST=[\"pcdc-$DATE\"]" >> "$ENV_FILE" + echo "CREDENTIALS='../credentials.json'" >> "$ENV_FILE" + echo "LOCAL_ES_FILE_PATH='../files/pcdc_data.json'" >> "$ENV_FILE" + echo "ES_PORT=9200" >> "$ENV_FILE" + echo "INDEX_NAME='pcdc-$DATE'" >> "$ENV_FILE" +fi + +chmod +x "$(dirname "$0")"/*.sh + +./load_gen3_scripts.sh + +./generate_data.sh + +./load_graph_db.sh + +./load_elasticsearch.sh \ No newline at end of file diff --git a/tools/clear_elasticsearch.sh b/tools/clear_elasticsearch.sh new file mode 100755 index 000000000..2cfd2e1c8 --- /dev/null +++ b/tools/clear_elasticsearch.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +ESHOST="http://localhost:9200" + + +function es_indices() { + curl -X GET "${ESHOST}/_cat/indices?v" +} + +indexList=$(es_indices 2> /dev/null | awk '{ print $3 }' | grep -v "^index$") + + +for name in $indexList; do + echo curl -iv -X DELETE "${ESHOST}/$name" + curl -iv -X DELETE "${ESHOST}/$name" +done diff --git a/tools/connect_to_db.sh b/tools/connect_to_db.sh new file mode 100755 index 000000000..e277b928e --- /dev/null +++ b/tools/connect_to_db.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +# Check if service name is provided as an argument +if [ $# -eq 0 ]; then + echo "Usage: $0 " + exit 1 +fi + +service_name=$1 + +# Retrieve password from secret +password=$(kubectl get secret ${service_name}-dbcreds -o jsonpath="{.data.password}" | base64 --decode) + +# Execute command in the pod +kubectl exec -it pcdc-postgresql-0 -- /bin/bash -c "PGPASSWORD='${password}' psql -h pcdc-postgresql -U ${service_name}_pcdc -d ${service_name}_pcdc" \ No newline at end of file diff --git a/tools/connect_to_pod.sh b/tools/connect_to_pod.sh new file mode 100755 index 000000000..d624193cb --- /dev/null +++ b/tools/connect_to_pod.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# Check if service name is provided as an argument +if [ $# -eq 0 ]; then + echo "Usage: $0 " + exit 1 +fi + +service_name=$1 + +# Get pod name associated with the service +pod_name=$(kubectl get pods -l app="$service_name" -o jsonpath='{.items[*].metadata.name}') + +# Check if pod name is empty +if [ -z "$pod_name" ]; then + echo "Error: No pod found for service $service_name" + exit 1 +fi + +# Execute command in the pod +kubectl exec -it "$pod_name" -- /bin/bash \ No newline at end of file diff --git a/tools/job.sh b/tools/job.sh new file mode 100755 index 000000000..36feaba3c --- /dev/null +++ b/tools/job.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +# Check if argument is provided +if [ $# -eq 0 ]; then + echo "Usage: $0 " + exit 1 +fi + +# Extract the job name +job_name="$1" + +# Delete the specified Job +kubectl delete job "$job_name" + +# Check if the deletion was successful +if [ $? -ne 0 ]; then + echo "Error: Failed to delete Job $job_name" + exit 1 +fi + +# Run roll.sh script +roll.sh \ No newline at end of file diff --git a/tools/load_data.sh b/tools/load_data.sh new file mode 100755 index 000000000..99f6f6b09 --- /dev/null +++ b/tools/load_data.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +cd "$(dirname "$0")/../pcdc_data" || exit 1 || exit 1 + +chmod +x ./run_all.sh + + +./run_all.sh \ No newline at end of file diff --git a/tools/logs.sh b/tools/logs.sh new file mode 100755 index 000000000..374f055f1 --- /dev/null +++ b/tools/logs.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +# Check if service name is provided as an argument +if [ $# -eq 0 ]; then + echo "Usage: $0 [-f] " + exit 1 +fi + +# Check if the first argument is "-f" +if [ "$1" = "-f" ]; then + follow_logs=true + shift # Remove the "-f" argument from the argument list +else + follow_logs=false +fi + +service_name=$1 + +# Get pod name associated with the service +pod_name=$(kubectl get pods -l app="$service_name" -o jsonpath='{.items[*].metadata.name}') + +# Check if pod name is empty +if [ -z "$pod_name" ]; then + echo "Error: No pod found for service $service_name" + exit 1 +fi + +# Execute kubectl logs with or without "-f" option based on the flag +if [ "$follow_logs" = true ]; then + kubectl logs -f "$pod_name" +else + kubectl logs "$pod_name" +fi \ No newline at end of file diff --git a/tools/pcdc b/tools/pcdc new file mode 100755 index 000000000..6f1c1a615 --- /dev/null +++ b/tools/pcdc @@ -0,0 +1,66 @@ +#!/bin/bash + + +# Define script names +CONNECT_SCRIPT="connect_to_db.sh" +ROLL_SCRIPT="roll.sh" +JOB_SCRIPT="job.sh" +POD_SCRIPT="connect_to_pod.sh" +CLEAR_ELASTICSEARCH="clear_elasticsearch.sh" +RESTART_POD="restart_pod.sh" +LOGS="logs.sh" +LOAD_DATA="load_data.sh" + + +# Check if command is provided as an argument +if [ $# -eq 0 ]; then + echo "Usage: $0 [...]" + exit 1 +fi + +# Give execute permission to specific .sh scripts in the specified directory +chmod +x "$(dirname "$0")"/*.sh + +# Extract the command +command="$1" +shift # Remove the first argument (command) + +# Check if the command is valid +case "$command" in + "psql") + # Run the connect_to_db.sh script with the remaining arguments + "$CONNECT_SCRIPT" "$@" + ;; + "roll") + # Run the roll.sh script with the remaining arguments + "$ROLL_SCRIPT" "$@" + ;; + "job") + # Run the job.sh script with the remaining arguments + "$JOB_SCRIPT" "$@" + ;; + "pod") + # Run the connect_to_pod.sh script with the remaining arguments + "$POD_SCRIPT" "$@" + ;; + "clear_elasticsearch") + # Run the connect_to_pod.sh script with the remaining arguments + "$CLEAR_ELASTICSEARCH" "$@" + ;; + "restart") + # Run the connect_to_pod.sh script with the remaining arguments + "$RESTART_POD" "$@" + ;; + "logs") + # Run the connect_to_pod.sh script with the remaining arguments + "$LOGS" "$@" + ;; + "load_data") + # Run the connect_to_pod.sh script with the remaining arguments + "$LOAD_DATA" "$@" + ;; + *) + echo "Invalid command: $command" + exit 1 + ;; +esac \ No newline at end of file diff --git a/tools/restart_pod.sh b/tools/restart_pod.sh new file mode 100755 index 000000000..90fa1a8b9 --- /dev/null +++ b/tools/restart_pod.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# Check if service name is provided as an argument +if [ $# -eq 0 ]; then + echo "Usage: $0 " + exit 1 +fi + +service_name=$1 + +# Get pod name associated with the service +pod_name=$(kubectl get pods -l app="$service_name" -o jsonpath='{.items[*].metadata.name}') + +# Check if pod name is empty +if [ -z "$pod_name" ]; then + echo "Error: No pod found for service $service_name" + exit 1 +fi + +# Execute delete pod +kubectl delete pod "$pod_name" \ No newline at end of file diff --git a/tools/roll.sh b/tools/roll.sh new file mode 100755 index 000000000..1e0f60b7f --- /dev/null +++ b/tools/roll.sh @@ -0,0 +1,50 @@ +#!/bin/bash + +# Change directory to the helm chart directory +cd "$(dirname "$0")/../helm/gen3" || exit 1 || exit 1 + +rm ../../values.yaml + +# Check if ../../secret-values.yaml exists +if [ -f ../../secret-values.yaml ]; then + yq '. *= load("../../secret-values.yaml")' ../../default-values.yaml > ../../values.yaml +else + cp ../../default-values.yaml ../../values.yaml +fi +# Directory to store CA certificate +ca_dir=../../CA +ca_file=$ca_dir/ca.pem + +# Create the CA certificate directory if it doesn't exist +if [ ! -d "$ca_dir" ]; then + mkdir -p "$ca_dir" +fi + +# Check if CA certificate file exists, if not, create it +if [ ! -f "$ca_file" ]; then + # Create the CA certificate file + echo "Creating CA certificate file..." + touch "$ca_file" +fi + +# Extract the value from ../../values.yaml +ca_cert=$(yq eval '.global.tls.cert' ../../values.yaml) + +# Write the extracted value to the CA certificate file +echo "$ca_cert" > "$ca_file" + +# Check if arguments are passed +if [ $# -gt 0 ]; then + # Iterate over each argument (service name) + for service_name in "$@" + do + # Delete the deployment corresponding to the service name + kubectl delete deployment ${service_name}-deployment + done +fi + +# Run helm dependency update +helm dependency update + +# Run helm upgrade --install command +helm upgrade --install pcdc . -f ../../values.yaml \ No newline at end of file From a8997744dc69041c27d1305f016f923bd5cc48b9 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Fri, 14 Jun 2024 16:14:17 -0700 Subject: [PATCH 011/126] Update gitingore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 42aec08b6..d7aaf7146 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,4 @@ credentials.json CA/ temp.yaml values.yaml +gen3_scripts/ \ No newline at end of file From da96649605cb8cda051c752e9b7eece1e76e1bc2 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Mon, 17 Jun 2024 17:53:26 -0700 Subject: [PATCH 012/126] fix peregrine --- helm/peregrine/templates/deployment.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/helm/peregrine/templates/deployment.yaml b/helm/peregrine/templates/deployment.yaml index 26b3d7c5c..0daf039b8 100644 --- a/helm/peregrine/templates/deployment.yaml +++ b/helm/peregrine/templates/deployment.yaml @@ -147,7 +147,6 @@ spec: value: "False" - name: CONF_HOSTNAME value: {{ .Values.global.hostname }} - {{- with .Values.volumeMounts }} volumeMounts: - name: "config-volume" readOnly: true From f4d6baa945b58c3160e011860268aa04449a47f2 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Thu, 11 Jul 2024 10:43:54 -0700 Subject: [PATCH 013/126] add gearbox to helm --- .gitignore | 2 +- ...values.yaml => gearbox-default-values.yaml | 145 +- helm/amanuensis/templates/db-init.yaml | 4 +- helm/gearbox-middleware/.helmignore | 23 + helm/gearbox-middleware/Chart.yaml | 32 + helm/gearbox-middleware/templates/NOTES.txt | 1 + .../gearbox-middleware/templates/_helpers.tpl | 79 + .../templates/deployment.yaml | 82 + .../templates/gearbox-create-public-key.yaml | 68 + .../templates/gearbox-middleware-secret.yaml | 21 + helm/gearbox-middleware/templates/hpa.yaml | 32 + .../gearbox-middleware/templates/service.yaml | 15 + .../templates/serviceaccount.yaml | 12 + .../templates/tests/test-connection.yaml | 15 + helm/gearbox-middleware/values.yaml | 161 ++ helm/gearbox/.helmignore | 23 + helm/gearbox/Chart.yaml | 33 + helm/gearbox/templates/NOTES.txt | 1 + helm/gearbox/templates/_helpers.tpl | 68 + .../templates/create-gearbox-config.yaml | 69 + helm/gearbox/templates/db-init.yaml | 5 + helm/gearbox/templates/deployment.yaml | 95 + helm/gearbox/templates/gearbox-secret.yaml | 19 + helm/gearbox/templates/hpa.yaml | 32 + helm/gearbox/templates/service.yaml | 15 + helm/gearbox/templates/serviceaccount.yaml | 13 + .../templates/tests/test-connection.yaml | 15 + helm/gearbox/values.yaml | 248 ++ helm/gen3/Chart.yaml | 11 +- helm/gen3/templates/global-manifest.yaml | 2 +- helm/portal/defaults/gitops.json | 1054 +++------ helm/portal/templates/deployment.yaml | 4 + helm/portal/values.yaml | 1 - .../gearbox-middleware-service.conf | 41 + .../gen3.nginx.conf/gearbox-service.conf | 41 + helm/revproxy/nginx/nginx.conf | 12 +- helm/revproxy/templates/deployment.yaml | 12 + pcdc-default-values.yaml | 2057 +++++++++++++++++ pcdc_data/load_elasticsearch.sh | 2 +- tools/gearbox | 56 + tools/pcdc | 2 +- tools/roll.sh | 12 +- 42 files changed, 3762 insertions(+), 873 deletions(-) rename default-values.yaml => gearbox-default-values.yaml (94%) create mode 100644 helm/gearbox-middleware/.helmignore create mode 100644 helm/gearbox-middleware/Chart.yaml create mode 100644 helm/gearbox-middleware/templates/NOTES.txt create mode 100644 helm/gearbox-middleware/templates/_helpers.tpl create mode 100644 helm/gearbox-middleware/templates/deployment.yaml create mode 100644 helm/gearbox-middleware/templates/gearbox-create-public-key.yaml create mode 100644 helm/gearbox-middleware/templates/gearbox-middleware-secret.yaml create mode 100644 helm/gearbox-middleware/templates/hpa.yaml create mode 100644 helm/gearbox-middleware/templates/service.yaml create mode 100644 helm/gearbox-middleware/templates/serviceaccount.yaml create mode 100644 helm/gearbox-middleware/templates/tests/test-connection.yaml create mode 100644 helm/gearbox-middleware/values.yaml create mode 100644 helm/gearbox/.helmignore create mode 100644 helm/gearbox/Chart.yaml create mode 100644 helm/gearbox/templates/NOTES.txt create mode 100644 helm/gearbox/templates/_helpers.tpl create mode 100644 helm/gearbox/templates/create-gearbox-config.yaml create mode 100644 helm/gearbox/templates/db-init.yaml create mode 100644 helm/gearbox/templates/deployment.yaml create mode 100644 helm/gearbox/templates/gearbox-secret.yaml create mode 100644 helm/gearbox/templates/hpa.yaml create mode 100644 helm/gearbox/templates/service.yaml create mode 100644 helm/gearbox/templates/serviceaccount.yaml create mode 100644 helm/gearbox/templates/tests/test-connection.yaml create mode 100644 helm/gearbox/values.yaml create mode 100644 helm/revproxy/gen3.nginx.conf/gearbox-middleware-service.conf create mode 100644 helm/revproxy/gen3.nginx.conf/gearbox-service.conf create mode 100644 pcdc-default-values.yaml create mode 100755 tools/gearbox diff --git a/.gitignore b/.gitignore index d7aaf7146..cba7d7ee5 100644 --- a/.gitignore +++ b/.gitignore @@ -9,5 +9,5 @@ secret-values.yaml credentials.json CA/ temp.yaml -values.yaml +/values.yaml gen3_scripts/ \ No newline at end of file diff --git a/default-values.yaml b/gearbox-default-values.yaml similarity index 94% rename from default-values.yaml rename to gearbox-default-values.yaml index 5f4e25502..3b603de9a 100644 --- a/default-values.yaml +++ b/gearbox-default-values.yaml @@ -1,8 +1,8 @@ global: dev: true hostname: localhost - portalApp: pcdc - dictionaryUrl: https://pcdc-dev-dictionaries.s3.amazonaws.com/pcdc-schema-dev-20230912.json + portal_app: gitops + dictionaryUrl: https://pcdc-gen3-dictionaries.s3.amazonaws.com/pcdc-schema-demo-amia.json authz_entity_name: "subject" tls: cert: | @@ -56,13 +56,8 @@ global: arborist: image: - repository: quay.io/pcdc/arborist - tag: 2023.08 - -amanuensis: - image: - repository: quay.io/pcdc/amanuensis - tag: "2.16.1" + repository: quay.io/cdis/arborist + tag: 2024.03 fence: FENCE_CONFIG: @@ -98,6 +93,10 @@ fence: jnkns: /programs/jnkns policies: + # GEARBOX + - id: gearbox_admin + resource_paths: ['/gearbox_gateway'] + role_ids: ['gearbox_user'] # General Access - id: 'workspace' description: 'be able to use workspace' @@ -547,6 +546,10 @@ fence: - /programs - /programs/pcdc resources: + #GEARBOX + - name: 'portal' + description: 'data portal service' + - name: 'gearbox_gateway' # General Access - name: 'data_file' description: 'data files, stored in S3' @@ -706,6 +709,13 @@ fence: # create, read, update, delete, read-storage, write-storage, # file_upload, access roles: + #GEARBOX + - id: 'gearbox_user' + permissions: + - id: 'gearbox_access' + action: + service: '*' + method: '*' # General Access - id: 'file_uploader' description: 'can upload data files' @@ -1005,6 +1015,7 @@ fence: pmurdoch@uchicago.edu: admin: true policies: + - gearbox_admin - data_upload - workspace - dashboard @@ -1022,7 +1033,6 @@ fence: - data_admin - analysis - privacy_policy - - login_no_access projects: - auth_id: QA privilege: [create, read, update, delete, upload, read-storage] @@ -1070,96 +1080,69 @@ fence: privilege: [create, read, update, delete, upload, read-storage] - auth_id: jnkns privilege: [create, read, update, delete, upload, read-storage] - furner.brian@gmail.com: - admin: true - policies: - - data_upload - - workspace - - dashboard - - mds_admin - - prometheus - - sower - - services.sheepdog-admin - - services.amanuensis-admin - - programs.QA-admin - - programs.test-admin - - programs.DEV-admin - - programs.jnkns-admin - - indexd_admin - - ttyadmin - - data_admin - - analysis - - privacy_policy - - login_no_access - projects: - - auth_id: QA - privilege: [create, read, update, delete, upload, read-storage] - - auth_id: test - privilege: [create, read, update, delete, upload, read-storage] - - auth_id: DEV - privilege: [create, read, update, delete, upload, read-storage] - - auth_id: jenkins - privilege: [create, read, update, delete, upload, read-storage] - - auth_id: jenkins2 - privilege: [create, read, update, delete, upload, read-storage] - - auth_id: jnkns - privilege: [create, read, update, delete, upload, read-storage] - -guppy: - enabled: true - image: - repository: quay.io/pcdc/guppy - tag: 1.8.0 - authFilterField: "auth_resource_path" - -manifestservice: - image: - repository: quay.io/cdis/manifestservice - tag: 2023.08 - -pcdcanalysistools: - image: - repository: quay.io/pcdc/pcdcanalysistools - tag: 1.8.4 - -peregrine: - image: - repository: quay.io/pcdc/peregrine - tag: "1.3.5" - portal: image: - repository: quay.io/pcdc/windmill - tag: 1.25.0 + repository: quay.io/pcdc/gearbox_fe + tag: "dev" resources: requests: cpu: 1.0 gitops: - json: "" + json: | + { + "s3_bucket": "https://test-compose-gearbox-data-bucket-with-versioning.s3.amazonaws.com" + } + + gearboxS3Bucket: "https://test-compose-gearbox-data-bucket-with-versioning.s3.amazonaws.com" revproxy: image: repository: quay.io/cdis/nginx tag: 2023.09 -sheepdog: - image: - repository: quay.io/pcdc/sheepdog - tag: "1.5.6" - -sower: +gearbox: + enabled: true image: - repository: quay.io/cdis/sower - tag: 2024.04 + repository: quay.io/pcdc/gearbox_be + tag: 1.3.0 + pullPolicy: Always -wts: +gearbox-middleware: + enabled: true image: - repository: quay.io/cdis/workspace-token-service - tag: 2024.04 + repository: quay.io/pcdc/gearbox-middleware + tag: "helm-test" + pullPolicy: Always ######################################################################################## # DISABLED SERVICES # ######################################################################################## +elasticsearch: + enabled: false + +amanuensis: + enabled: false + +guppy: + enabled: false + +manifestservice: + enabled: false + +pcdcanalysistools: + enabled: false + +peregrine: + enabled: false + +sheepdog: + enabled: false + +sower: + enabled: false + +wts: + enabled: false ambassador: # -- (bool) Whether to deploy the ambassador subchart. diff --git a/helm/amanuensis/templates/db-init.yaml b/helm/amanuensis/templates/db-init.yaml index abbefb6eb..d99ca1b2e 100644 --- a/helm/amanuensis/templates/db-init.yaml +++ b/helm/amanuensis/templates/db-init.yaml @@ -1,6 +1,6 @@ -{{ include "common.db_setup_job" . }} ---- {{ include "common.db-secret" . }} --- +{{ include "common.db_setup_job" . }} +--- {{ include "common.db_setup_sa" . }} --- diff --git a/helm/gearbox-middleware/.helmignore b/helm/gearbox-middleware/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/helm/gearbox-middleware/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/helm/gearbox-middleware/Chart.yaml b/helm/gearbox-middleware/Chart.yaml new file mode 100644 index 000000000..3264d7d74 --- /dev/null +++ b/helm/gearbox-middleware/Chart.yaml @@ -0,0 +1,32 @@ +apiVersion: v2 +name: gearbox-middleware +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" + +dependencies: +- name: common + version: 0.1.7 + repository: file://../common +- name: gearbox + version: 0.1.0 + repository: file://../gearbox \ No newline at end of file diff --git a/helm/gearbox-middleware/templates/NOTES.txt b/helm/gearbox-middleware/templates/NOTES.txt new file mode 100644 index 000000000..c1e7e1aef --- /dev/null +++ b/helm/gearbox-middleware/templates/NOTES.txt @@ -0,0 +1 @@ +{{ .Chart.Name }} has been deployed successfully. diff --git a/helm/gearbox-middleware/templates/_helpers.tpl b/helm/gearbox-middleware/templates/_helpers.tpl new file mode 100644 index 000000000..b506cb67c --- /dev/null +++ b/helm/gearbox-middleware/templates/_helpers.tpl @@ -0,0 +1,79 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "gearbox-middleware.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "gearbox-middleware.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "gearbox-middleware.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "gearbox-middleware.labels" -}} +{{- if .Values.commonLabels }} + {{- with .Values.commonLabels }} + {{- toYaml . }} + {{- end }} +{{- else }} + {{- (include "common.commonLabels" .)}} +{{- end }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "gearbox-middleware.selectorLabels" -}} +{{- if .Values.selectorLabels }} + {{- with .Values.selectorLabels }} + {{- toYaml . }} + {{- end }} +{{- else }} + {{- (include "common.selectorLabels" .)}} +{{- end }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "gearbox-middleware.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "gearbox-middleware.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + + +{{- define "getOrCreateRSAPrivateKey" -}} +{{- $secretName := "gearbox-middleware-jwt-keys-g3auto" }} +{{- $existingSecret := (lookup "v1" "Secret" .Release.Namespace $secretName) }} +{{- if $existingSecret }} +{{- index $existingSecret.data "jwt_private_key.pem" }} +{{- else }} +{{- genPrivateKey "rsa" | b64enc }} +{{- end }} +{{- end -}} \ No newline at end of file diff --git a/helm/gearbox-middleware/templates/deployment.yaml b/helm/gearbox-middleware/templates/deployment.yaml new file mode 100644 index 000000000..9e7b27172 --- /dev/null +++ b/helm/gearbox-middleware/templates/deployment.yaml @@ -0,0 +1,82 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gearbox-middleware-deployment + labels: + {{- include "gearbox-middleware.labels" . | nindent 4 }} + {{- if .Values.global.ddEnabled }} + {{- include "common.datadogLabels" . | nindent 4 }} + {{- end }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "gearbox-middleware.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "gearbox-middleware.selectorLabels" . | nindent 8 }} + {{- if .Values.global.ddEnabled }} + {{- include "common.datadogLabels" . | nindent 8 }} + {{- end }} + spec: + volumes: + {{- toYaml .Values.volumes | nindent 8 }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "gearbox-middleware.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + livenessProbe: + httpGet: + path: /_status + port: 80 + initialDelaySeconds: 30 + periodSeconds: 60 + timeoutSeconds: 30 + readinessProbe: + httpGet: + path: /_status + port: http + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumeMounts: + {{- toYaml .Values.volumeMounts | nindent 12 }} + env: + {{- if .Values.global.ddEnabled }} + {{- include "common.datadogEnvVar" . | nindent 12 }} + {{- end }} + {{- toYaml .Values.env | nindent 12 }} + initContainers: + - name: wait-for-gearbox + image: busybox:1.31.0 + command: ["/bin/sh", "-c", "until [ $(wget -q --spider http://gearbox-service/_status && echo '200' || echo '500') -eq 200 ]; do echo 'Waiting for gearbox to be ready...'; sleep 2; done"] {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} \ No newline at end of file diff --git a/helm/gearbox-middleware/templates/gearbox-create-public-key.yaml b/helm/gearbox-middleware/templates/gearbox-create-public-key.yaml new file mode 100644 index 000000000..ef285b4fe --- /dev/null +++ b/helm/gearbox-middleware/templates/gearbox-create-public-key.yaml @@ -0,0 +1,68 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Chart.Name }}-jwt-patch-sa +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ .Chart.Name }}-jwt-patch-role +rules: +- apiGroups: [""] + resources: ["secrets"] + verbs: ["*"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ .Chart.Name }}-jwt-patch-rolebinding +subjects: +- kind: ServiceAccount + name: {{ .Chart.Name }}-jwt-patch-sa + namespace: {{ .Release.Namespace }} +roleRef: + kind: Role + name: {{ .Chart.Name }}-jwt-patch-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: gearbox-middleware-create-public-key + labels: + app: gen3job +spec: + template: + spec: + serviceAccountName: {{ .Chart.Name }}-jwt-patch-sa + containers: + - name: public-keygen + image: quay.io/cdis/awshelper:master + env: + - name: PRIVATE_KEY_PEM + valueFrom: + secretKeyRef: + name: gearbox-middleware-jwt-keys-g3auto + key: jwt_private_key.pem + optional: false + command: ["/bin/sh", "-c"] + args: + - | + set -e + + # Read the private key from the secret + private_key=$(kubectl get secret gearbox-middleware-jwt-keys-g3auto -o jsonpath='{.data.jwt_private_key\.pem}' | base64 --decode) + + # Create a temporary file for the private key + echo "${private_key}" > /tmp/private_key.pem + + # Generate the public key from the private key + openssl rsa -in /tmp/private_key.pem -pubout -out /tmp/public_key.pem + + # Base64 encode the public key + public_key=$(base64 -w 0 /tmp/public_key.pem) + + # Update the secret with the public key + kubectl patch secret gearbox-middleware-jwt-keys-g3auto -p="{\"data\": {\"jwt_public_key.pem\": \"${public_key}\"}}" + + restartPolicy: OnFailure \ No newline at end of file diff --git a/helm/gearbox-middleware/templates/gearbox-middleware-secret.yaml b/helm/gearbox-middleware/templates/gearbox-middleware-secret.yaml new file mode 100644 index 000000000..3442aea62 --- /dev/null +++ b/helm/gearbox-middleware/templates/gearbox-middleware-secret.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Secret +metadata: + name: gearbox-middleware-jwt-keys-g3auto +type: Opaque +data: + jwt_private_key.pem: {{ include "getOrCreateRSAPrivateKey" . }} + +--- + +apiVersion: v1 +kind: Secret +metadata: + name: gearbox-middleware-g3auto +type: Opaque +stringData: + gearbox-middleware.env: | + DEBUG=True + FORCE_ISSUER=True + USER_API="http://fence-service/" + ALLOWED_ISSUERS="http://fence-service/,https://localhost/user" \ No newline at end of file diff --git a/helm/gearbox-middleware/templates/hpa.yaml b/helm/gearbox-middleware/templates/hpa.yaml new file mode 100644 index 000000000..0830fef12 --- /dev/null +++ b/helm/gearbox-middleware/templates/hpa.yaml @@ -0,0 +1,32 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "gearbox-middleware.fullname" . }} + labels: + {{- include "gearbox-middleware.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "gearbox-middleware.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/helm/gearbox-middleware/templates/service.yaml b/helm/gearbox-middleware/templates/service.yaml new file mode 100644 index 000000000..0a11f55c5 --- /dev/null +++ b/helm/gearbox-middleware/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: gearbox-middleware-service + labels: + {{- include "gearbox-middleware.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "gearbox-middleware.selectorLabels" . | nindent 4 }} \ No newline at end of file diff --git a/helm/gearbox-middleware/templates/serviceaccount.yaml b/helm/gearbox-middleware/templates/serviceaccount.yaml new file mode 100644 index 000000000..0d406cf0f --- /dev/null +++ b/helm/gearbox-middleware/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "gearbox-middleware.serviceAccountName" . }} + labels: + {{- include "gearbox-middleware.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/helm/gearbox-middleware/templates/tests/test-connection.yaml b/helm/gearbox-middleware/templates/tests/test-connection.yaml new file mode 100644 index 000000000..4637273af --- /dev/null +++ b/helm/gearbox-middleware/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "gearbox-middleware.fullname" . }}-test-connection" + labels: + {{- include "gearbox-middleware.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "gearbox-middleware.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/helm/gearbox-middleware/values.yaml b/helm/gearbox-middleware/values.yaml new file mode 100644 index 000000000..fe428edb9 --- /dev/null +++ b/helm/gearbox-middleware/values.yaml @@ -0,0 +1,161 @@ +# Default values for gearbox. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +global: + # -- (bool) Whether the deployment is for development purposes. + dev: true + # -- (map) Postgres database configuration. + postgres: + # -- (bool) Whether the database should be created. + dbCreate: true + # -- (map) Master credentials to postgres. This is going to be the default postgres server being used for each service, unless each service specifies their own postgres + master: + # -- (string) hostname of postgres server + host: + # -- (string) username of superuser in postgres. This is used to create or restore databases + username: postgres + # -- (string) password for superuser in postgres. This is used to create or restore databases + password: + # -- (string) Port for Postgres. + port: "5432" + # -- (string) Environment name. This should be the same as vpcname if you're doing an AWS deployment. Currently this is being used to share ALB's if you have multiple namespaces. Might be used other places too. + environment: default + # -- (string) Hostname for the deployment. + hostname: localhost + # -- (string) ARN of the reverse proxy certificate. + revproxyArn: arn:aws:acm:us-east-1:123456:certificate + # -- (string) URL of the data dictionary. + dictionaryUrl: https://s3.amazonaws.com/dictionary-artifacts/datadictionary/develop/schema.json + # -- (string) Portal application name. + portalApp: gitops + # -- (string) S3 bucket name for Kubernetes manifest files. + kubeBucket: kube-gen3 + # -- (string) S3 bucket name for log files. + logsBucket: logs-gen3 + # -- (bool) Whether to sync data from dbGaP. + syncFromDbgap: false + # -- (bool) Whether public datasets are enabled. + publicDataSets: true + # -- (string) Access level for tiers. acceptable values for `tier_access_level` are: `libre`, `regular` and `private`. If omitted, by default common will be treated as `private` + tierAccessLevel: libre + # -- (int) Only relevant if tireAccessLevel is set to "regular". Summary charts below this limit will not appear for aggregated data. + tierAccessLimit: 1000 + # -- (bool) Whether network policies are enabled. + netPolicy: true + # -- (int) Number of dispatcher jobs. + dispatcherJobNum: 10 + # -- (bool) Whether Datadog is enabled. + ddEnabled: false + # -- (bool) If the service will be deployed with a Pod Disruption Budget. Note- you need to have more than 2 replicas for the pdb to be deployed. + pdb: false + # -- (int) The minimum amount of pods that are available at all times if the PDB is deployed. + minAvialable: 1 + +replicaCount: 1 + +image: + repository: quay.io/pcdc/gearbox-middleware + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: test_criterion_patch_Tue__09_Apr_2024_22_32_33_GMT + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: + requests: + cpu: 0.8 + memory: 1024Mi + limits: + cpu: 2 + memory: 2048Mi + +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +env: + +volumes: + - name: gearbox-middleware-jwt-keys + secret: + secretName: "gearbox-middleware-jwt-keys-g3auto" + items: + - key: jwt_private_key.pem + path: jwt_private_key.pem + - name: config-volume-g3auto + secret: + secretName: "gearbox-middleware-g3auto" + optional: true + # This volume may or may not be needed or available. See kube-setup-gearbox.sh and note that this + # is only available if a /gearbox directory exists. + - name: config-volume + secret: + secretName: gearbox-middleware-config + optional: true + +volumeMounts: + - name: "gearbox-middleware-jwt-keys" + readOnly: true + mountPath: "/gearbox_middleware/gearbox_middleware/keys/jwt_private_key.pem" + subPath: jwt_private_key.pem + - name: config-volume-g3auto + readOnly: true + mountPath: "/gearbox_middleware/.env" + subPath: gearbox-middleware.env + - name: config-volume + readOnly: true + mountPath: /aggregate_config.json + subPath: aggregate_config.json diff --git a/helm/gearbox/.helmignore b/helm/gearbox/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/helm/gearbox/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/helm/gearbox/Chart.yaml b/helm/gearbox/Chart.yaml new file mode 100644 index 000000000..6038c80b3 --- /dev/null +++ b/helm/gearbox/Chart.yaml @@ -0,0 +1,33 @@ +apiVersion: v2 +name: gearbox +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" + +dependencies: +- name: common + version: 0.1.7 + repository: file://../common +- name: postgresql + version: 11.9.13 + repository: "https://charts.bitnami.com/bitnami" + condition: postgres.separate \ No newline at end of file diff --git a/helm/gearbox/templates/NOTES.txt b/helm/gearbox/templates/NOTES.txt new file mode 100644 index 000000000..304ff546f --- /dev/null +++ b/helm/gearbox/templates/NOTES.txt @@ -0,0 +1 @@ +{{ .Chart.Name }} has been deployed successfully. \ No newline at end of file diff --git a/helm/gearbox/templates/_helpers.tpl b/helm/gearbox/templates/_helpers.tpl new file mode 100644 index 000000000..00e8f4b2c --- /dev/null +++ b/helm/gearbox/templates/_helpers.tpl @@ -0,0 +1,68 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "gearbox.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "gearbox.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "gearbox.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "gearbox.labels" -}} +{{- if .Values.commonLabels }} + {{- with .Values.commonLabels }} + {{- toYaml . }} + {{- end }} +{{- else }} + {{- (include "common.commonLabels" .)}} +{{- end }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "gearbox.selectorLabels" -}} +{{- if .Values.selectorLabels }} + {{- with .Values.selectorLabels }} + {{- toYaml . }} + {{- end }} +{{- else }} + {{- (include "common.selectorLabels" .)}} +{{- end }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "gearbox.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "gearbox.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/helm/gearbox/templates/create-gearbox-config.yaml b/helm/gearbox/templates/create-gearbox-config.yaml new file mode 100644 index 000000000..63d5892e4 --- /dev/null +++ b/helm/gearbox/templates/create-gearbox-config.yaml @@ -0,0 +1,69 @@ +# DB Setup ServiceAccount +# Needs to update/ create secrets to signal that db is ready for use. +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Chart.Name }}-secret-patch-sa +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ .Chart.Name }}-secret-patch-role +rules: +- apiGroups: [""] + resources: ["secrets"] + verbs: ["*"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ .Chart.Name }}-secret-patch-rolebinding +subjects: +- kind: ServiceAccount + name: {{ .Chart.Name }}-secret-patch-sa + namespace: {{ .Release.Namespace }} +roleRef: + kind: Role + name: {{ .Chart.Name }}-secret-patch-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: gearbox-g3auto-patch +spec: + template: + metadata: + labels: + app: gen3job + spec: + serviceAccountName: {{ .Chart.Name }}-secret-patch-sa + containers: + - name: gearbox-g3auto-patch + image: quay.io/cdis/awshelper:master + command: ["/bin/sh", "-c"] + args: + - | + while true; do + if kubectl get secret gearbox-dbcreds; then + echo "gearbox-dbcreds secret found" + password=$(kubectl get secret gearbox-dbcreds -o jsonpath="{.data.password}" | base64 --decode) + echo "Retrieved password from gearbox-dbcreds" + gearbox_env=$(kubectl get secret gearbox-g3auto -o jsonpath="{.data.gearbox\.env}" | base64 --decode) + echo "Current gearbox.env content: $gearbox_env" + updated_gearbox_env=$(echo "${gearbox_env}\nDB_PASSWORD=${password}") + echo "Updated gearbox.env content: $updated_gearbox_env" + encoded_gearbox_env=$(echo -n "$updated_gearbox_env" | base64 -w 0) + kubectl patch secret gearbox-g3auto -p "{\"data\":{\"gearbox.env\":\"${encoded_gearbox_env}\"}}" + echo "Patched gearbox-g3auto with updated gearbox.env" + kubectl patch secret gearbox-g3auto -p '{"data":{"secretready":"dHJ1ZQo="}}' + echo "Patched gearbox-g3auto with secretready" + break + else + echo "Waiting for gearbox-dbcreds secret to be created" + sleep 5 + fi + done + restartPolicy: Never + + diff --git a/helm/gearbox/templates/db-init.yaml b/helm/gearbox/templates/db-init.yaml new file mode 100644 index 000000000..bdf813f7b --- /dev/null +++ b/helm/gearbox/templates/db-init.yaml @@ -0,0 +1,5 @@ +{{ include "common.db-secret" . }} +--- +{{ include "common.db_setup_job" . }} +--- +{{ include "common.db_setup_sa" . }} \ No newline at end of file diff --git a/helm/gearbox/templates/deployment.yaml b/helm/gearbox/templates/deployment.yaml new file mode 100644 index 000000000..b342a9c66 --- /dev/null +++ b/helm/gearbox/templates/deployment.yaml @@ -0,0 +1,95 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gearbox-deployment + labels: + {{- include "gearbox.labels" . | nindent 4 }} + {{- if .Values.global.ddEnabled }} + {{- include "common.datadogLabels" . | nindent 4 }} + {{- end }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "gearbox.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "gearbox.selectorLabels" . | nindent 8 }} + {{- if .Values.global.ddEnabled }} + {{- include "common.datadogLabels" . | nindent 8 }} + {{- end }} + spec: + volumes: + {{- toYaml .Values.volumes | nindent 8 }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "gearbox.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + livenessProbe: + httpGet: + path: /_status + port: 80 + initialDelaySeconds: 30 + periodSeconds: 60 + timeoutSeconds: 30 + readinessProbe: + httpGet: + path: /_status + port: http + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumeMounts: + {{- toYaml .Values.volumeMounts | nindent 12 }} + env: + {{- if .Values.global.ddEnabled }} + {{- include "common.datadogEnvVar" . | nindent 12 }} + {{- end }} + {{- toYaml .Values.env | nindent 12 }} + initContainers: + - name: gearbox-db-migrate + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + volumeMounts: + {{- toYaml .Values.initVolumeMounts | nindent 12 }} + env: + {{- toYaml .Values.env | nindent 12 }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + command: ["/bin/sh"] + args: + - "-c" + - | + /env/bin/alembic upgrade head + + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/helm/gearbox/templates/gearbox-secret.yaml b/helm/gearbox/templates/gearbox-secret.yaml new file mode 100644 index 000000000..4fc33dd81 --- /dev/null +++ b/helm/gearbox/templates/gearbox-secret.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Secret +metadata: + name: gearbox-g3auto +type: Opaque +stringData: + {{- $randomPass := printf "%s%s" "gateway:" (randAlphaNum 32) }} + base64Authz.txt: {{ $randomPass | quote | b64enc }} + gearbox.env: | + DEBUG=0 + FORCE_ISSUER=True + USER_API="http://fence-service/" + ALLOWED_ISSUERS="http://fence-service/,https://localhost/user" + DUMMY_S3=True + DB_DATABASE={{ ( $.Values.postgres.database | default (printf "%s_%s" $.Chart.Name $.Release.Name) ) }} + DB_HOST={{ (printf "%s-%s" $.Release.Name "postgresql" ) }} + DB_USER={{ ( $.Values.postgres.username | default (printf "%s_%s" $.Chart.Name $.Release.Name) ) }} + ADMIN_LOGINS={{ $randomPass }} + ENABLE_PHI=True diff --git a/helm/gearbox/templates/hpa.yaml b/helm/gearbox/templates/hpa.yaml new file mode 100644 index 000000000..b97170cfa --- /dev/null +++ b/helm/gearbox/templates/hpa.yaml @@ -0,0 +1,32 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "gearbox.fullname" . }} + labels: + {{- include "gearbox.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "gearbox.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/helm/gearbox/templates/service.yaml b/helm/gearbox/templates/service.yaml new file mode 100644 index 000000000..92c7bd807 --- /dev/null +++ b/helm/gearbox/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: gearbox-service + labels: + {{- include "gearbox.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "gearbox.selectorLabels" . | nindent 4 }} diff --git a/helm/gearbox/templates/serviceaccount.yaml b/helm/gearbox/templates/serviceaccount.yaml new file mode 100644 index 000000000..7dac719ce --- /dev/null +++ b/helm/gearbox/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "gearbox.serviceAccountName" . }} + labels: + {{- include "gearbox.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} + diff --git a/helm/gearbox/templates/tests/test-connection.yaml b/helm/gearbox/templates/tests/test-connection.yaml new file mode 100644 index 000000000..d7364bd2c --- /dev/null +++ b/helm/gearbox/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "gearbox.fullname" . }}-test-connection" + labels: + {{- include "gearbox.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "gearbox.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/helm/gearbox/values.yaml b/helm/gearbox/values.yaml new file mode 100644 index 000000000..e7e5c1a03 --- /dev/null +++ b/helm/gearbox/values.yaml @@ -0,0 +1,248 @@ +# Default values for gearbox. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +global: + # -- (bool) Whether the deployment is for development purposes. + dev: true + # -- (map) Postgres database configuration. + postgres: + # -- (bool) Whether the database should be created. + dbCreate: true + # -- (map) Master credentials to postgres. This is going to be the default postgres server being used for each service, unless each service specifies their own postgres + master: + # -- (string) hostname of postgres server + host: + # -- (string) username of superuser in postgres. This is used to create or restore databases + username: postgres + # -- (string) password for superuser in postgres. This is used to create or restore databases + password: + # -- (string) Port for Postgres. + port: "5432" + # -- (string) Environment name. This should be the same as vpcname if you're doing an AWS deployment. Currently this is being used to share ALB's if you have multiple namespaces. Might be used other places too. + environment: default + # -- (string) Hostname for the deployment. + hostname: localhost + # -- (string) ARN of the reverse proxy certificate. + revproxyArn: arn:aws:acm:us-east-1:123456:certificate + # -- (string) URL of the data dictionary. + dictionaryUrl: https://s3.amazonaws.com/dictionary-artifacts/datadictionary/develop/schema.json + # -- (string) Portal application name. + portalApp: gitops + # -- (string) S3 bucket name for Kubernetes manifest files. + kubeBucket: kube-gen3 + # -- (string) S3 bucket name for log files. + logsBucket: logs-gen3 + # -- (bool) Whether to sync data from dbGaP. + syncFromDbgap: false + # -- (bool) Whether public datasets are enabled. + publicDataSets: true + # -- (string) Access level for tiers. acceptable values for `tier_access_level` are: `libre`, `regular` and `private`. If omitted, by default common will be treated as `private` + tierAccessLevel: libre + # -- (int) Only relevant if tireAccessLevel is set to "regular". Summary charts below this limit will not appear for aggregated data. + tierAccessLimit: 1000 + # -- (bool) Whether network policies are enabled. + netPolicy: true + # -- (int) Number of dispatcher jobs. + dispatcherJobNum: 10 + # -- (bool) Whether Datadog is enabled. + ddEnabled: false + # -- (bool) If the service will be deployed with a Pod Disruption Budget. Note- you need to have more than 2 replicas for the pdb to be deployed. + pdb: false + # -- (int) The minimum amount of pods that are available at all times if the PDB is deployed. + minAvialable: 1 + +postgres: + # (bool) Whether the database should be restored from s3. Default to global.postgres.dbRestore + dbRestore: false + # -- (bool) Whether the database should be created. Default to global.postgres.dbCreate + dbCreate: + # -- (string) Hostname for postgres server. This is a service override, defaults to global.postgres.host + host: + # -- (string) Database name for postgres. This is a service override, defaults to - + database: + # -- (string) Username for postgres. This is a service override, defaults to - + username: + # -- (string) Port for Postgres. + port: "5432" + # -- (string) Password for Postgres. Will be autogenerated if left empty. + password: + # -- (string) Will create a Database for the individual service to help with developing it. + separate: false + +postgresql: + primary: + persistence: + # -- (bool) Option to persist the dbs data. + enabled: false + +replicaCount: 1 + +image: + repository: quay.io/pcdc/gearbox_be + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: 1.3.0 + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +resources: + requests: + cpu: 0.4 + memory: 512Mi + limits: + cpu: 1 + memory: 2048Mi + +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +env: + - name: GEN3_DEBUG + value: "False" + - name: GEN3_ES_ENDPOINT + value: http://esproxy-service:9200 + - name: AWS_REGION + value: "us-east-1" + - name: PGHOST + valueFrom: + secretKeyRef: + name: gearbox-dbcreds + key: host + optional: false + - name: PGUSER + valueFrom: + secretKeyRef: + name: gearbox-dbcreds + key: username + optional: false + - name: PGPASSWORD + valueFrom: + secretKeyRef: + name: gearbox-dbcreds + key: password + optional: false + - name: PGDB + valueFrom: + secretKeyRef: + name: gearbox-dbcreds + key: database + optional: false + - name: DBREADY + valueFrom: + secretKeyRef: + name: gearbox-dbcreds + key: dbcreated + optional: false + - name: GB_SECRET_READY + valueFrom: + secretKeyRef: + name: gearbox-g3auto + key: secretready + optional: false + +volumes: + - name: gearbox-middleware-jwt-keys + secret: + secretName: "gearbox-middleware-jwt-keys-g3auto" + items: + - key: jwt_public_key.pem + path: jwt_public_key.pem + + - name: config-volume-g3auto + secret: + secretName: gearbox-g3auto + # This volume may or may not be needed or available. See kube-setup-gearbox.sh and note that this + # is only available if a /gearbox directory exists. + - name: config-volume + secret: + secretName: gearbox-config + optional: true + # This volume may or may not be needed or available. See kube-setup-gearbox.sh and note that this + # may not exist if the commons does not have any gearbox manifest configuration. + - name: config-manifest + configMap: + name: manifest-gearbox + optional: true + +volumeMounts: + - name: "gearbox-middleware-jwt-keys" + readOnly: true + mountPath: "/src/src/gearbox/keys/jwt_public_key.pem" + subPath: jwt_public_key.pem + - name: config-volume-g3auto + readOnly: true + mountPath: /src/.env + subPath: gearbox.env + - name: config-volume + readOnly: true + mountPath: /aggregate_config.json + subPath: aggregate_config.json + - name: config-manifest + readOnly: true + mountPath: /gearbox.json + subPath: json + +initVolumeMounts: + - name: config-volume-g3auto + readOnly: true + mountPath: /src/.env + subPath: gearbox.env + + +# Values to determine the labels that are used for the deployment, pod, etc. +# -- (string) Valid options are "production" or "dev". If invalid option is set- the value will default to "dev". +release: "production" +# -- (string) Valid options are "true" or "false". If invalid option is set- the value will default to "false". +criticalService: "true" +# -- (string) Label to help organize pods and their use. Any value is valid, but use "_" or "-" to divide words. +partOf: "Core-Service" +# -- (map) Will completely override the selectorLabels defined in the common chart's _label_setup.tpl +selectorLabels: +# -- (map) Will completely override the commonLabels defined in the common chart's _label_setup.tpl +commonLabels: + +# Values to configure datadog if ddEnabled is set to "true". +# -- (bool) If enabled, the Datadog Agent will automatically inject Datadog-specific metadata into your application logs. +datadogLogsInjection: true +# -- (bool) If enabled, the Datadog Agent will collect profiling data for your application using the Continuous Profiler. This data can be used to identify performance bottlenecks and optimize your application. +datadogProfilingEnabled: true +# -- (int) A value between 0 and 1, that represents the percentage of requests that will be traced. For example, a value of 0.5 means that 50% of requests will be traced. +datadogTraceSampleRate: 1 \ No newline at end of file diff --git a/helm/gen3/Chart.yaml b/helm/gen3/Chart.yaml index cbb283187..6d49fb7a7 100644 --- a/helm/gen3/Chart.yaml +++ b/helm/gen3/Chart.yaml @@ -103,11 +103,20 @@ dependencies: version: "0.1.0" repository: "file://../amanuensis" condition: amanuensis.enabled +- name: gearbox + version: "0.1.0" + repository: "file://../gearbox" + condition: gearbox.enabled +- name: gearbox-middleware + version: "0.1.0" + repository: "file://../gearbox-middleware" + condition: gearbox-middleware.enabled + - name: elasticsearch version: 7.10.2 repository: "https://helm.elastic.co" - condition: global.dev + condition: elasticsearch.enabled - name: postgresql version: 11.9.13 repository: "https://charts.bitnami.com/bitnami" diff --git a/helm/gen3/templates/global-manifest.yaml b/helm/gen3/templates/global-manifest.yaml index a08c2e730..2e70635a7 100644 --- a/helm/gen3/templates/global-manifest.yaml +++ b/helm/gen3/templates/global-manifest.yaml @@ -14,7 +14,7 @@ data: "netpolicy": {{ .Values.global.netPolicy | quote }} "dispatcher_job_num": {{ .Values.global.dispatcherJobNum | quote }} "dd_enabled": {{ .Values.global.ddEnabled | quote }} - "authz_entity_name": {{.Values.global.authz_entity_name}} + "authz_entity_name": {{.Values.global.authz_entity_name | quote }} "frontend_root": {{ .Values.global.frontendRoot | quote }} {{- with .Values.global.origins_allow_credentials }} "origins_allow_credentials": {{ . | toJson | quote }} diff --git a/helm/portal/defaults/gitops.json b/helm/portal/defaults/gitops.json index 9c6358f3b..0f3ee0f0a 100644 --- a/helm/portal/defaults/gitops.json +++ b/helm/portal/defaults/gitops.json @@ -1,858 +1,352 @@ + { - "gaTrackingId": "undefined", + "subcommons": [ + { + "URL": "https://tb.diseasedatahub.org/", + "name": "TB" + }, + { + "URL": "https://aids.diseasedatahub.org/", + "name": "AIDS" + }, + { + "URL": "https://flu.diseasedatahub.org/", + "name": "FLU" + }, + { + "URL": "https://microbiome.diseasedatahub.org/", + "name": "Microbiome" + } + ], + "gaTrackingId": "UA-119127212-1", "graphql": { "boardCounts": [ - { - "graphql": "_person_count", - "name": "Person", - "plural": "Persons" - }, { "graphql": "_subject_count", "name": "Subject", "plural": "Subjects" + }, + { + "graphql": "_study_count", + "name": "Study", + "plural": "Studies" + }, + { + "graphql": "_summary_lab_result_count", + "name": "Lab record", + "plural": "Lab records" } ], "chartCounts": [ - { - "graphql": "_person_count", - "name": "Person" - }, { "graphql": "_subject_count", "name": "Subject" + }, + { + "graphql": "_study_count", + "name": "Study" } ], "projectDetails": "boardCounts" }, "components": { - "appName": "Pediatric Cancer Data Commons Portal", + "appName": "Gen3 Disease Data Hub", "index": { "introduction": { - "heading": "Pediatric Cancer Data Commons", - "text": "The Pediatric Cancer Data Commons supports the management, analysis and sharing of data for the research community.", - "link": "/submission" + "heading": "Gen3 Disease Data Hub Datasets", + "text": "The Gen3 Disease Data Hub hosts data related to infectious diseases and aims to make data findable, accessible, interoperable, and reusable (FAIR).", + "link": "/datasets" }, "buttons": [ { - "name": "Define Data Field", - "icon": "data-field-define", - "body": "The Pediatric Cancer Data Commons defines the data. Please study the dictionary before you start browsing.", - "link": "/DD", - "label": "Learn more" + "name": "TB Environment", + "icon": "data-explore", + "body": "Explore TB data.", + "external_link": "https://tb.diseasedatahub.org" }, { - "name": "Explore Data", + "name": "AIDS Environment", "icon": "data-explore", - "body": "The Exploration Page gives you insights and a clear overview under selected factors.", - "link": "/explorer", - "label": "Explore data" + "body": "Explore AIDS data.", + "external_link": "https://aids.diseasedatahub.org" + }, + { + "name": "Flu Environment", + "icon": "data-explore", + "body": "Explore influenza data.", + "external_link": "https://flu.diseasedatahub.org" }, { - "name": "Access Data", - "icon": "data-access", - "body": "Use our selected tool to filter out the data you need.", - "link": "/query", - "label": "Query data" + "name": "Microbiome Environment", + "icon": "data-explore", + "body": "Explore data from a collection of open-access microbiome-related studies.", + "external_link": "https://microbiome.diseasedatahub.org" } - ], - "barChart": { - "showPercentage": true - } + ] }, "navigation": { "items": [ { - "icon": "dictionary", - "link": "/DD", + "icon": "query", + "link": "/datasets", "color": "#a2a2a2", - "name": "Dictionary" + "name": "Dataset Browser" }, { "icon": "exploration", "link": "/explorer", "color": "#a2a2a2", - "name": "Exploration" - }, - { - "icon": "query", - "link": "/query", - "color": "#a2a2a2", - "name": "Query" + "name": "Eco Explorer" } ] }, "topBar": { "items": [ { - "icon": "external-link", - "leftOrientation": true, - "link": "https://commons.cri.uchicago.edu/pcdc/", - "name": "About PCDC" - }, - { - "icon": "external-link", - "leftOrientation": true, - "link": "https://commons.cri.uchicago.edu/sponsors/", - "name": "Our Sponsors" + "link": "https://gen3.org/resources/user/", + "name": "Documentation" } ] }, "login": { - "title": "Pediatric Cancer Data Commons", - "subTitle": "Connect. Share. Cure.", - "text": "The Pediatric Cancer Data Commons (PCDC) harnesses pediatric cancer clinical data from around the globe into a single combined platform, connecting the data to other sources and making it available to clinicians and researchers everywhere. Headquartered at the University of Chicago, the PCDC team works with international leaders in pediatric cancers to develop and apply uniform data standards that facilitate the collection, combination, and analysis of data from many different sources. The PCDC currently houses the world's largest sets of clinical data for pediatric neuroblastoma and soft tissue sarcoma and is in the process of onboarding additional pediatric cancer disease groups.", + "title": "Gen3 Disease Data Hub", + "subTitle": "Cross Environment Datasets", + "text": "The website combines open access datasets from multiple disciplines to create clean, easy to navigate visualizations for data-driven discovery within the fields of allergy and infectious diseases.", "contact": "If you have any questions about access or the registration process, please contact ", - "email": "pcdc_help@lists.uchicago.edu" + "email": "support@datacommons.io" }, "footerLogos": [ { - "src": "/src/img/gen3.png", + "src": "/custom/sponsors/gitops-sponsors/gen3.png", "href": "https://ctds.uchicago.edu/gen3", - "alt": "Gen3 Data Commons", - "height": 40 + "alt": "Gen3 Data Commons" }, { - "src": "/src/img/uchicago.png", - "href": "https://www.uchicago.edu/", - "alt": "The University of Chicago", - "height": 40 + "src": "/src/img/createdby.png", + "href": "https://ctds.uchicago.edu/", + "alt": "Center for Translational Data Science at the University of Chicago" } ] }, - "explorerConfig": [ - { - "id": 1, - "label": "data", - "charts": { - "sex": { - "chartType": "bar", - "title": "Sex" - }, - "race": { - "chartType": "bar", - "title": "Race" - }, - "ethnicity": { - "chartType": "bar", - "title": "Ethnicity" - }, - "consortium": { - "chartType": "bar", - "title": "Consortium" - } - }, - "filters": { - "anchor": { - "field": "disease_phase", - "options": ["Initial Diagnosis", "Relapse"], - "tabs": ["Disease", "Molecular", "Surgery", "Radiation", "Response", "SMN", "Imaging", "Labs", "SCT"] - }, - "tabs": [ - { - "title": "Subject", - "fields": [ - "consortium", - "data_contributor_id", - "studies.study_id", - "studies.treatment_arm", - "sex", - "race", - "ethnicity", - "year_at_disease_phase", - "survival_characteristics.lkss_obfuscated", - "censor_status", - "age_at_censor_status", - "medical_histories.medical_history", - "medical_histories.medical_history_status", - "external_references.external_resource_name" - ] - }, - { - "title": "Disease", - "fields": [ - "histologies.histology", - "histologies.histology_grade", - "histologies.histology_inpc", - "tumor_assessments.age_at_tumor_assessment", - "tumor_assessments.tumor_classification", - "tumor_assessments.tumor_site", - "tumor_assessments.tumor_state", - "tumor_assessments.longest_diam_dim1", - "tumor_assessments.depth", - "tumor_assessments.tumor_size", - "tumor_assessments.invasiveness", - "tumor_assessments.nodal_clinical", - "tumor_assessments.nodal_pathology", - "tumor_assessments.parameningeal_extension", - "tumor_assessments.necrosis", - "tumor_assessments.necrosis_pct", - "tumor_assessments.tumor_laterality", - "stagings.irs_group", - "stagings.tnm_finding", - "stagings.stage_system", - "stagings.stage", - "stagings.AB", - "stagings.E", - "stagings.S", - "disease_characteristics.mki", - "disease_characteristics.bulk_disease", - "disease_characteristics.BULK_MED_MASS", - "disease_characteristics.bulky_nodal_aggregate" - ] - }, - { - "title": "Molecular", - "fields": [ - "molecular_analysis.anaplasia", - "molecular_analysis.anaplasia_extent", - "molecular_analysis.molecular_abnormality", - "molecular_analysis.molecular_abnormality_result", - "molecular_analysis.gene1", - "molecular_analysis.gene2", - "molecular_analysis.dna_index", - "molecular_analysis.age_at_molecular_analysis", - "molecular_analysis.mitoses", - "molecular_analysis.cytodifferentiation" - ] - }, - { - "title": "Surgery", - "fields": [ - "biopsy_surgical_procedures.tumor_classification", - "biopsy_surgical_procedures.procedure_type", - "biopsy_surgical_procedures.margins" - ] - }, - { - "title": "Radiation", - "fields": [ - "radiation_therapies.tumor_classification", - "radiation_therapies.energy_type", - "radiation_therapies.rt_dose" - ] - }, - { - "title": "Response", - "fields": [ - "subject_responses.tx_prior_response", - "subject_responses.response", - "subject_responses.interim_response", - "subject_responses.response_method" - ] - }, - { - "title": "SMN", - "fields": [ - "secondary_malignant_neoplasm.age_at_smn", - "secondary_malignant_neoplasm.smn_site", - "secondary_malignant_neoplasm.smn_type", - "secondary_malignant_neoplasm.smn_morph_icdo" - ] - }, - { - "title": "Imaging", - "fields": [ - "imagings.imaging_method", - "imagings.imaging_result" - ] - }, - { - "title": "Labs", - "fields": [ - "labs.lab_test", - "labs.lab_result", - "labs.lab_result_numeric", - "labs.lab_result_unit" - ] - }, - { - "title": "SCT", - "fields": [ - "stem_cell_transplants.sct_type" - ] - } + "requiredCerts": [], + "featureFlags": { + "explorer": true, + "analysis": true + }, + "datasetBrowserConfig": { + "filterSections": [ + { + "title": "Supported Data Resources", + "options": [ + { "text": "TB", "filterType": "singleSelect"}, + { "text": "AIDS", "filterType": "singleSelect"}, + { "text": "Flu", "filterType": "singleSelect"}, + { "text": "Microbiome", "filterType": "singleSelect"} ] }, - "projectId": "search", - "graphqlField": "subject", - "index": "", - "buttons": [ - { - "enabled": true, - "type": "export-to-pfb", - "title": "Export to PFB", - "leftIcon": "datafile", - "rightIcon": "download" - }, - { - "enabled": false, - "type": "data", - "title": "Download Data", - "leftIcon": "user", - "rightIcon": "download", - "fileName": "data.json", - "tooltipText": "You can only download data accessible to you" - } - ], - "table": { - "enabled": true, - "fields": [ - "external_references.external_links", - "consortium", - "data_contributor_id", - "subject_submitter_id", - "sex", - "race", - "ethnicity", - "survival_characteristics.lkss", - "survival_characteristics.age_at_lkss" + { + "title": "Research Focus", + "options": [ + { "text": "AIDS", "filterType": "singleSelect"}, + { "text": "TB", "filterType": "singleSelect"}, + { "text": "Flu", "filterType": "singleSelect"}, + { "text": "Immune Response", "filterType": "singleSelect"}, + { "text": "Immune Phenotype", "filterType": "singleSelect"}, + { "text": "Allergy", "filterType": "singleSelect"}, + { "text": "Atopy", "filterType": "singleSelect"}, + { "text": "Infection Response", "filterType": "singleSelect"}, + { "text": "Vaccine Response", "filterType": "singleSelect"}, + { "text": "Transplantation", "filterType": "singleSelect"}, + { "text": "Oncology", "filterType": "singleSelect"}, + { "text": "Autoimmune", "filterType": "singleSelect"}, + { "text": "Preterm Birth", "filterType": "singleSelect"} ] + } + ], + "fieldMapping" : [ + { "field": "link", "name": "View" }, + { "field": "dataset_name", "name": "Study" }, + { "field": "supported_data_resource", "name": "Supported Data Resource" }, + { "field": "research_focus", "name": "Research Focus" }, + { "field": "description", "name": "Description of Dataset" } + ], + "filterConfig": { + "tabs": [{ + "title": "Filters", + "fields": ["supported_data_resource", "research_focus"] + }] + } + }, + "dataExplorerConfig": { + "charts": { + "project_id": { + "chartType": "count", + "title": "Projects" }, - "patientIds": { - "filter": false, - "export": true + "subject_id": { + "chartType": "count", + "title": "Subjects" }, - "survivalAnalysis": { - "result": { - "pval": false, - "risktable": true, - "survival": true - } + "dataset": { + "chartType": "pie", + "title": "Resources", + "chartRow": 0 }, - "guppyConfig": { - "dataType": "subject", - "nodeCountTitle": "Subjects", - "fieldMapping": [ - { - "field": "data_contributor_id", - "name": "Data Contributor", - "tooltip": "Survival analysis is not allowed for this variable. Filtering by this variable will disable survival analysis." - }, - { - "field": "studies.study_id", - "name": "Study Id", - "tooltip": "Survival analysis is not allowed for this variable. Filtering by this variable will disable survival analysis." - }, - { - "field": "studies.treatment_arm", - "name": "Treatment Arm", - "tooltip": "Survival analysis is not allowed for this variable. Filtering by this variable will disable survival analysis." - }, - { - "field": "year_at_disease_phase", - "name": "Year at Initial Diagnosis" - }, - { - "field": "survival_characteristics.lkss", - "name": "Last Known Survival Status (LKSS)" - }, - { - "field": "survival_characteristics.lkss_obfuscated", - "name": "Last Known Survival Status (LKSS)" - }, - { - "field": "medical_histories.medical_history", - "name": "Medical History" - }, - { - "field": "medical_histories.medical_history_status", - "name": "Medical History Status" - }, - { - "field": "external_references.external_resource_name", - "name": "External Resource Name" - }, - { - "field": "histologies.histology", - "name": "Histology" - }, - { - "field": "histologies.histology_grade", - "name": "Histology Grade" - }, - { - "field": "histologies.histology_inpc", - "name": "INPC Classification" - }, - { - "field": "tumor_assessments.age_at_tumor_assessment", - "name": "Age at Tumor Assessment" - }, - { - "field": "tumor_assessments.tumor_classification", - "name": "Tumor Classification" - }, - { - "field": "tumor_assessments.tumor_site", - "name": "Tumor Site" - }, - { - "field": "tumor_assessments.tumor_state", - "name": "Tumor State" - }, - { - "field": "tumor_assessments.longest_diam_dim1", - "name": "Longest Diameter Dimension 1" - }, - { - "field": "tumor_assessments.depth", - "name": "Tumor Depth" - }, - { - "field": "tumor_assessments.tumor_size", - "name": "Tumor Size" - }, - { - "field": "tumor_assessments.invasiveness", - "name": "Invasiveness" - }, - { - "field": "tumor_assessments.nodal_clinical", - "name": "Nodal Clinical" - }, - { - "field": "tumor_assessments.nodal_pathology", - "name": "Nodal Pathology" - }, - { - "field": "tumor_assessments.parameningeal_extension", - "name": "Parameningeal Extension" - }, - { - "field": "tumor_assessments.necrosis", - "name": "Necrosis" - }, - { - "field": "tumor_assessments.necrosis_pct", - "name": "Necrosis PCT" - }, - { - "field": "tumor_assessments.tumor_laterality", - "name": "Tumor Laterality" - }, - { - "field": "stagings.irs_group", - "name": "IRS Group" - }, - { - "field": "stagings.tnm_finding", - "name": "TNM Finding" - }, - { - "field": "stagings.stage_system", - "name": "Stage System" - }, - { - "field": "stagings.stage", - "name": "Stage" - }, - { - "field": "stagings.AB", - "name": "Ann Arbor AB" - }, - { - "field": "stagings.E", - "name": "Ann Arbor E" - }, - { - "field": "stagings.S", - "name": "Ann Arbor S" - }, - { - "field": "disease_characteristics.mki", - "name": "MKI" - }, - { - "field": "disease_characteristics.bulk_disease", - "name": "Bulky Disease" - }, - { - "field": "disease_characteristics.BULK_MED_MASS", - "name": "Bulky Mediastinal Mass" - }, - { - "field": "disease_characteristics.bulky_nodal_aggregate", - "name": "Bulky Nodal Aggregate" - }, - { - "field": "molecular_analysis.anaplasia", - "name": "Anaplasia" - }, - { - "field": "molecular_analysis.anaplasia_extent", - "name": "Anaplasia Extent" - }, - { - "field": "molecular_analysis.molecular_abnormality", - "name": "Molecular Abnormality" - }, - { - "field": "molecular_analysis.molecular_abnormality_result", - "name": "Molecular Abnormality Result" - }, - { - "field": "molecular_analysis.gene1", - "name": "Gene 1" - }, - { - "field": "molecular_analysis.gene2", - "name": "Gene 2" - }, - { - "field": "molecular_analysis.dna_index", - "name": "DNA Index" - }, - { - "field": "molecular_analysis.age_at_molecular_analysis", - "name": "Age at Molecular Analysis" - }, - { - "field": "molecular_analysis.mitoses", - "name": "Mitoses" - }, - { - "field": "molecular_analysis.cytodifferentiation", - "name": "Cytodifferentiation" - }, - { - "field": "biopsy_surgical_procedures.tumor_classification", - "name": "Tumor Classification" - }, - { - "field": "biopsy_surgical_procedures.procedure_type", - "name": "Procedure Type" - }, - { - "field": "biopsy_surgical_procedures.procedure_site", - "name": "Procedure Site" - }, - { - "field": "biopsy_surgical_procedures.margins", - "name": "Margins" - }, - { - "field": "radiation_therapies.tumor_classification", - "name": "Tumor Classification" - }, - { - "field": "radiation_therapies.age_at_rt_start", - "name": "Age at Radiation Therapy" - }, - { - "field": "radiation_therapies.rt_site", - "name": "Radiation Site" - }, - { - "field": "radiation_therapies.energy_type", - "name": "Energy Type" - }, - { - "field": "radiation_therapies.rt_dose", - "name": "Radiation Dose" - }, - { - "field": "radiation_therapies.rt_unit", - "name": "Radiation Unit" - }, - { - "field": "subject_responses.age_at_response", - "name": "Age at Response" - }, - { - "field": "subject_responses.tx_prior_response", - "name": "Treatment Prior Response" - }, - { - "field": "subject_responses.response", - "name": "Response" - }, - { - "field": "subject_responses.interim_response", - "name": "Interim Response" - }, - { - "field": "subject_responses.response_method", - "name": "Response Method" - }, - { - "field": "subject_responses.necrosis", - "name": "Necrosis" - }, - { - "field": "secondary_malignant_neoplasm.age_at_smn", - "name": "Age at SMN" - }, - { - "field": "secondary_malignant_neoplasm.smn_site", - "name": "SMN Site" - }, - { - "field": "secondary_malignant_neoplasm.smn_type", - "name": "SMN Type" - }, - { - "field": "secondary_malignant_neoplasm.smn_morph_icdo", - "name": "ICD-O Morphology" - }, - { - "field": "imagings.imaging_method", - "name": "Imaging Method" - }, - { - "field": "imagings.imaging_result", - "name": "Imaging Result" - }, - { - "field": "labs.lab_result_numeric", - "name": "Numeric Lab Result" - }, - { - "field": "labs.lab_result_unit", - "name": "Lab Result Unit" - }, - { - "field": "labs.lab_result", - "name": "Lab Result" - }, - { - "field": "labs.lab_test", - "name": "Lab Test" - }, - { - "field": "stem_cell_transplants.sct_type", - "name": "SCT Type" - } - ] + "data_format": { + "chartType": "bar", + "title": "Data Format", + "chartRow": 0 }, - "dataRequests": { - "enabled": false + "data_type": { + "chartType": "pie", + "title": "Data Type", + "chartRow": 0 }, - "getAccessButtonLink": "https://docs.pedscommons.org/PCDCProjectRequestForm/" + "experimental_strategies": { + "chartType": "bar", + "title": "Experimental Strategies", + "chartRow": 0 + }, + "species": { + "chartType": "bar", + "title": "Genus species", + "chartRow": 0 + }, + "gender": { + "chartType": "pie", + "title": "Gender", + "chartRow": 1 + }, + "race": { + "chartType": "pie", + "title": "Race", + "chartRow": 1 + }, + "ethnicity": { + "chartType": "pie", + "title": "Ethnicity", + "chartRow": 1 + }, + "biospecimen_anatomic_site": { + "chartType": "pie", + "title": "Biospecimen Anatomic Site", + "chartRow": 1 + } }, - { - "id": 2, - "label": "data - survival", - "charts": { - "sex": { - "chartType": "bar", - "title": "Sex" - }, - "race": { - "chartType": "bar", - "title": "Race" - }, - "ethnicity": { - "chartType": "bar", - "title": "Ethnicity" - } + "fieldMapping" : [ + { "field": "dataset", "name": "Resource" }, + { "field": "studyAccession", "name": "Study" }, + { "field": "phenotype", "name": "Phenotype" }, + { "field": "gender", "name": "Gender" }, + { "field": "ethnicity", "name": "Ethnicity" }, + { "field": "strain", "name": "Strain" }, + { "field": "species", "name": "Genus species" }, + { "field": "submitter_id", "name": "Submitter ID" }, + { "field": "race", "name": "Race" }, + { "field": "hiv_status", "name": "HIV Status" }, + { "field": "study_submitter_id", "name": "Study"}, + { "field": "frstdthd", "name": "Year of Death" }, + { "field": "arthxbase", "name": "ART Use Prior to Baseline"}, + { "field": "bshbvstat", "name": "Baseline HBV Sero-status"}, + { "field": "bshcvstat", "name": "Baseline HCV Sero-status"}, + { "field": "cd4nadir", "name": "CD4 Nadir Prior to HAART"}, + { "field": "status", "name": "Summarized HIV Sero-status"}, + {"field": "project_id", "name": "Project ID"}, + {"field": "frstcncrd", "name": "First Confirmed Cancer Year"}, + {"field": "frstdmd", "name": "First Visit Year with Diabetes"}, + {"field": "frstdmmd", "name": "First Visit Year with All Necessary Components to Determine Diabetes"}, + {"field": "frsthtnd", "name": "First Visit Year with Hypertension"}, + {"field": "frsthtnmd", "name": "First Visit Year with All Necessary Components to Determine Hypertension"}, + {"field": "fcd4lowd", "name": "First Year Seen CD4N < 200 or CD4% < 14"}, + {"field": "fposdate", "name": "First Year Seen Seropositive"}, + {"field": "frstaidd", "name": "First Reported AIDS Year"}, + {"field": "lastafrd", "name": "Last Reported AIDS Free Year"}, + {"field": "lastcond", "name": "Year of Last Study Visit Attended"}, + {"field": "lastcontact", "name": "Last Year of Contact"}, + {"field": "lcd4higd", "name": "Last Year Seen with CD4N >= 200 and CD4% >= 14"}, + {"field": "lnegdate", "name": "Last Year Seen Seronegative"}, + {"field": "amikacin_res_phenotype", "name": "Amikacin Phenotype" }, + {"field": "capreomycin_res_phenotype", "name": "Capreomycin Phenotype" }, + {"field": "isoniazid_res_phenotype", "name": "Isoniazid Phenotype" }, + {"field": "kanamycin_res_phenotype", "name": "Kanamycin Phenotype" }, + {"field": "ofloxacin_res_phenotype", "name": "Ofloxacin Phenotype" }, + {"field": "pyrazinamide_res_phenotype", "name": "Pyrazinamide Phenotype" }, + {"field": "rifampicin_res_phenotype", "name": "Rifampicin Phenotype" }, + {"field": "rifampin_res_phenotype", "name": "Rifampin Phenotype" }, + {"field": "streptomycin_res_phenotype", "name": "streptomycin Phenotype" } + ], + "filterConfig": { + "tabs": [{ + "title": "Resource", + "fields": ["dataset", "data_format", "data_type"] }, - "adminAppliedPreFilters": { - "consortium": { - "selectedValues": ["INSTRuCT"] - } + { + "title": "Subject", + "fields": ["ethnicity", "gender", "species", "race"] }, - "filters": { - "anchor": { - "field": "disease_phase", - "options": ["Initial Diagnosis", "Relapse"], - "tabs": ["Disease", "Molecular"], - "tooltip": "You can describe this filter here" - }, - "tabs": [ - { - "title": "Subject", - "fields": [ - "consortium", - "sex", - "race", - "ethnicity", - "survival_characteristics.lkss", - "survival_characteristics.age_at_lkss" - ] - }, - { - "title": "Disease", - "fields": [ - "histologies.histology", - "tumor_assessments.age_at_tumor_assessment", - "tumor_assessments.tumor_classification", - "tumor_assessments.tumor_site", - "tumor_assessments.longest_diam_dim1", - "tumor_assessments.invasiveness", - "tumor_assessments.nodal_clinical", - "tumor_assessments.nodal_pathology", - "tumor_assessments.parameningeal_extension", - "stagings.irs_group", - "stagings.tnm_finding" - ] - }, - { - "title": "Molecular", - "fields": [ - "molecular_analysis.anaplasia", - "molecular_analysis.anaplasia_extent", - "molecular_analysis.molecular_abnormality", - "molecular_analysis.molecular_abnormality_result", - "molecular_analysis.gene1", - "molecular_analysis.gene2" - ] - } + { + "title": "Diagnosis", + "fields": [ + "arthxbase", + "bshbvstat", + "bshcvstat", + "cd4nadir", + "status", + "hiv_status" ] }, - "projectId": "search", - "graphqlField": "subject", - "index": "", - "buttons": [ - { - "enabled": true, - "type": "export-to-pfb", - "title": "Export to PFB", - "leftIcon": "datafile", - "rightIcon": "download" - }, - { - "enabled": false, - "type": "data", - "title": "Download Data", - "leftIcon": "user", - "rightIcon": "download", - "fileName": "data.json", - "tooltipText": "You can only download data accessible to you" - } - ], - "table": { - "enabled": true, + { + "title": "Comorbidity", "fields": [ - "external_references.external_links", - "consortium", - "data_contributor_id", - "subject_submitter_id", - "sex", - "race", - "ethnicity", - "survival_characteristics.lkss", - "survival_characteristics.age_at_lkss" + "frstcncrd", + "frstdmd", + "frstdmmd", + "frsthtnd", + "frsthtnmd" ] - }, - "patientIds": { - "filter": false, - "export": true - }, - "survivalAnalysis": { - "result": { - "pval": false, - "risktable": true, - "survival": true - } - }, - "guppyConfig": { - "dataType": "subject", - "nodeCountTitle": "Subjects", - "fieldMapping": [ - { - "field": "survival_characteristics.lkss", - "name": "Last Known Survival Status (LKSS)", - "tooltip": "test tooltip" - }, - { - "field": "survival_characteristics.age_at_lkss", - "name": "Age at LKSS" - }, - { - "field": "external_references.external_resource_name", - "name": "External Resource Name" - }, - { - "field": "tumor_assessments.age_at_tumor_assessment", - "name": "Age at Tumor Assessment" - }, - { - "field": "tumor_assessments.tumor_classification", - "name": "Tumor Classification" - }, - { - "field": "tumor_assessments.tumor_site", - "name": "Tumor Site" - }, - { - "field": "tumor_assessments.tumor_size", - "name": "Tumor Size" - }, - { - "field": "tumor_assessments.longest_diam_dim1", - "name": "Longest Diameter Dimension 1" - }, - { - "field": "tumor_assessments.invasiveness", - "name": "Invasiveness" - }, - { - "field": "tumor_assessments.nodal_clinical", - "name": "Nodal Clinical" - }, - { - "field": "tumor_assessments.nodal_pathology", - "name": "Nodal Pathology" - }, - { - "field": "tumor_assessments.parameningeal_extension", - "name": "Parameningeal Extension" - }, - { - "field": "histologies.histology", - "name": "Histology" - }, - { - "field": "histologies.histology_grade", - "name": "Histology Grade" - }, - { - "field": "histologies.histology_inpc", - "name": "Histology Inpc" - }, - { - "field": "molecular_analysis.anaplasia", - "name": "Anaplasia" - }, - { - "field": "molecular_analysis.anaplasia_extent", - "name": "Anaplasia Extent" - }, - { - "field": "molecular_analysis.molecular_abnormality", - "name": "Molecular Abnormality" - }, - { - "field": "molecular_analysis.molecular_abnormality_result", - "name": "Molecular Abnormality Result" - }, - { - "field": "molecular_analysis.gene1", - "name": "Gene 1" - }, - { - "field": "molecular_analysis.gene2", - "name": "Gene 2" - }, - { - "field": "project_id", - "name": "Data Release Version" - }, - { - "field": "stagings.irs_group", - "name": "IRS Group" - }, - { - "field": "stagings.tnm_finding", - "name": "TNM Finding" - } + }, { + "title": "HIV History", + "fields": [ + "cd4nadir", + "fcd4lowd", + "fposdate", + "frstaidd", + "lastafrd", + "lastcond", + "lastcontact", + "lcd4higd", + "lnegdate", + "status" ] }, - "dataRequests": { - "enabled": false + { + "title": "Drug Resistance", + "fields": [ + "amikacin_res_phenotype", + "capreomycin_res_phenotype", + "isoniazid_res_phenotype", + "kanamycin_res_phenotype", + "ofloxacin_res_phenotype", + "pyrazinamide_res_phenotype", + "rifampicin_res_phenotype", + "rifampin_res_phenotype", + "streptomycin_res_phenotype" + ] }, - "getAccessButtonLink": "https://docs.pedscommons.org/PCDCProjectRequestForm/" + { + "title": "Experiment", + "fields": [ + "experimental_strategies", + "virus_type", + "virus_subtype", + "analyte_type", + "biospecimen_anatomic_site", + "cell_line", + "sample_type", + "composition", + "strain" + ] + }] } - ] -} \ No newline at end of file + } +} diff --git a/helm/portal/templates/deployment.yaml b/helm/portal/templates/deployment.yaml index 162d8ca44..dc7a9e2b7 100644 --- a/helm/portal/templates/deployment.yaml +++ b/helm/portal/templates/deployment.yaml @@ -209,6 +209,10 @@ spec: # S3 bucket name for data upload, for setting up CSP #GEN3_DATA_UPLOAD_BUCKET|-value: ""-| # - name: BASENAME + {{- with .Values.gearboxS3Bucket }} + - name: GEARBOX_S3_BUCKET + value: {{ . }} + {{- end }} volumeMounts: {{- if .Values.extraImages }} - name: extra-images diff --git a/helm/portal/values.yaml b/helm/portal/values.yaml index 7e0c6723c..d697bb551 100644 --- a/helm/portal/values.yaml +++ b/helm/portal/values.yaml @@ -208,7 +208,6 @@ datadogTraceSampleRate: 1 extraImages: # - url: https://raw.githubusercontent.com/uc-cdis/gen3-helm/master/docs/images/gen3-blue-dark.png - # -- (map) GitOps configuration for portal gitops: # -- (string) multiline string - gitops.json diff --git a/helm/revproxy/gen3.nginx.conf/gearbox-middleware-service.conf b/helm/revproxy/gen3.nginx.conf/gearbox-middleware-service.conf new file mode 100644 index 000000000..28c6e8f60 --- /dev/null +++ b/helm/revproxy/gen3.nginx.conf/gearbox-middleware-service.conf @@ -0,0 +1,41 @@ + location /gearbox-middleware/ { + if ($csrf_check !~ ^ok-\S.+$) { + return 403 "failed csrf check"; + } + + set $proxy_service "gearbox-middleware-service"; + set $upstream http://gearbox-middleware-service$des_domain; + rewrite ^/gearbox-middleware/(.*) /$1 break; + proxy_pass $upstream; + proxy_redirect http://$host/ https://$host/gearbox-middleware/; + client_max_body_size 0; + } + + location /gearbox-middleware-admin/ { + if ($csrf_check !~ ^ok-\S.+$) { + return 403 "failed csrf check"; + } + set $authz_resource "/gearbox_gateway"; + set $authz_method "access"; + set $authz_service "gearbox_gateway"; + # be careful - sub-request runs in same context as this request + auth_request /gen3-authz; + + set $gearbox_middleware_password "Basic ${gearbox_middleware_b64}"; + + # For testing: + #add_header Set-Cookie "X-Frickjack=${gearbox_middleware_password};Path=/;Max-Age=600"; + set $proxy_service "gearbox-middleware-service"; + set $upstream http://gearbox-middleware-service$des_domain; + rewrite ^/gearbox-admin/(.*) /$1 break; + proxy_set_header Host $host; + proxy_set_header X-Forwarded-For "$realip"; + proxy_set_header X-UserId "$userid"; + proxy_set_header X-SessionId "$session_id"; + proxy_set_header X-VisitorId "$visitor_id"; + proxy_set_header Authorization "$gearbox_middleware_password"; + + proxy_pass $upstream; + proxy_redirect http://$host/ https://$host/gearbox-middleware-admin/; + client_max_body_size 0; + } diff --git a/helm/revproxy/gen3.nginx.conf/gearbox-service.conf b/helm/revproxy/gen3.nginx.conf/gearbox-service.conf new file mode 100644 index 000000000..e31d55b96 --- /dev/null +++ b/helm/revproxy/gen3.nginx.conf/gearbox-service.conf @@ -0,0 +1,41 @@ + location /gearbox/ { + if ($csrf_check !~ ^ok-\S.+$) { + return 403 "failed csrf check"; + } + + set $proxy_service "gearbox-service"; + set $upstream http://gearbox-service$des_domain; + rewrite ^/gearbox/(.*) /$1 break; + proxy_pass $upstream; + proxy_redirect http://$host/ https://$host/gearbox/; + client_max_body_size 0; + } + + location /gearbox-admin/ { + if ($csrf_check !~ ^ok-\S.+$) { + return 403 "failed csrf check"; + } + set $authz_resource "/gearbox_gateway"; + set $authz_method "access"; + set $authz_service "gearbox_gateway"; + # be careful - sub-request runs in same context as this request + auth_request /gen3-authz; + + set $gearbox_password "Basic ${gearbox_b64}"; + + # For testing: + #add_header Set-Cookie "X-Frickjack=${gearbox_password};Path=/;Max-Age=600"; + set $proxy_service "gearbox-service"; + set $upstream http://gearbox-service$des_domain; + rewrite ^/gearbox-admin/(.*) /$1 break; + proxy_set_header Host $host; + proxy_set_header X-Forwarded-For "$realip"; + proxy_set_header X-UserId "$userid"; + proxy_set_header X-SessionId "$session_id"; + proxy_set_header X-VisitorId "$visitor_id"; + proxy_set_header Authorization "$gearbox_password"; + + proxy_pass $upstream; + proxy_redirect http://$host/ https://$host/gearbox-admin/; + client_max_body_size 0; + } diff --git a/helm/revproxy/nginx/nginx.conf b/helm/revproxy/nginx/nginx.conf index 4be081dd5..2f6426e95 100644 --- a/helm/revproxy/nginx/nginx.conf +++ b/helm/revproxy/nginx/nginx.conf @@ -19,6 +19,8 @@ env DES_NAMESPACE; env MAINTENANCE_MODE; env INDEXD_AUTHZ; env MDS_AUTHZ; +env GEARBOX_AUTHZ; +env GEARBOX_MIDDLEWARE_AUTHZ; env FRONTEND_ROOT; env DOCUMENT_URL; @@ -143,7 +145,10 @@ http { perl_set $indexd_b64 'sub { $_ = $ENV{"INDEXD_AUTHZ"}; chomp; return "$_"; }'; # # metadata service password for admin endpoint perl_set $mds_b64 'sub { $_ = $ENV{"MDS_AUTHZ"}; chomp; return "$_"; }'; - + # gearbox service password for admin endpoint + perl_set $gearbox_b64 'sub { $_ = $ENV{"GEARBOX_AUTHZ"}; chomp; return "$_"; }'; + #gearbox-middleware service password for admin endpoint + perl_set $gearbox_middleware_b64 'sub { $_ = $ENV{"GEARBOX_MIDDLEWARE_AUTHZ"}; chomp; return "$_"; }'; server { listen 6567; @@ -177,6 +182,11 @@ http { server { listen 80; + + location /login { + try_files $uri /index.html; + } + server_tokens off; proxy_hide_header server; proxy_hide_header X-Powered-By; diff --git a/helm/revproxy/templates/deployment.yaml b/helm/revproxy/templates/deployment.yaml index 7be59ba44..9ab408997 100644 --- a/helm/revproxy/templates/deployment.yaml +++ b/helm/revproxy/templates/deployment.yaml @@ -132,6 +132,18 @@ spec: name: metadata-g3auto key: base64Authz.txt optional: true + - name: GEARBOX_AUTHZ + valueFrom: + secretKeyRef: + name: gearbox-g3auto + key: base64Authz.txt + optional: true + - name: GEARBOX_MIDDLEWARE_AUTHZ + valueFrom: + secretKeyRef: + name: gearbox-middleware-g3auto + key: base64Authz.txt + optional: true volumeMounts: - name: "revproxy-conf" readOnly: true diff --git a/pcdc-default-values.yaml b/pcdc-default-values.yaml new file mode 100644 index 000000000..5897815b3 --- /dev/null +++ b/pcdc-default-values.yaml @@ -0,0 +1,2057 @@ +global: + dev: true + hostname: localhost + portalApp: pcdc + dictionaryUrl: https://pcdc-dev-dictionaries.s3.amazonaws.com/pcdc-schema-dev-20230912.json + authz_entity_name: subject + tls: + cert: | + -----BEGIN CERTIFICATE----- + MIIDDTCCAfWgAwIBAgIQcMmHCSPIuchREDNi1OpQ5DANBgkqhkiG9w0BAQsFADAP + MQ0wCwYDVQQDEwRnZW4zMB4XDTI0MDMyNTIyMDgwNFoXDTI1MDMyNTIyMDgwNFow + FDESMBAGA1UEAxMJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB + CgKCAQEAoTT4Y1GJ5aTwA8emKdCiPL9RCOIALYygX9lmpQ6loSE5rS4u6bgbztSg + 9EV5mvV+oNA1g473rgwyjNlJEIQJvEO323okDjXI47j09N5a9aGhRc0bhfGlwmt/ + gE6NZUuI/u9wjIh8nLysh4rOYrN2+Us9WRfBL8wjmRZWNFRygrEMB9lHzPefEQoU + p4BcOh4oLbu8//x3Of5fwDMitL5gQtSmZFnLMbK0HD7CJWNz9DQA1n9e8rRg5KWA + N7Tu6MIaY/H/DVCL3LfswIBSF6eQ73umGHf1zhS7PLSHPxYxkEeg35KKriN+7492 + 38Ra3bi0CzTey0It+EK+zjtt1fd7XQIDAQABo2AwXjAOBgNVHQ8BAf8EBAMCBaAw + HQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHwYD + VR0jBBgwFoAUIK7MtOCIs/DygzZ1/vR3ieAwplAwDQYJKoZIhvcNAQELBQADggEB + AIWgFxpLfZ9LUc90qdiC2yHaLKyTN/LMdkUjw82ULVLYJ0ot0JJaJvO4iDFe/Ra9 + t13LUhcE+W4ChentUHvwYKO5zghf4UtiryM8/wqmcZ03xyswdVaKuk5Ov39/9sTJ + 6rfzMpf3mJZDO6JYC475TCQQ3hKAUUzOiFg41CMeqAy9vn0zgBk58IzZmruvdn43 + YH6N/ooqVTj3CnkmVkWoB4zBjDzX9DuxpYvqI3seD7qLtXK2cm2X+Pqv90UoPsB/ + XegALjODFpTbN5Scvbpb3npXEKbvR7X9+xy7BbVYD2K0FQ9+S1UTU8Rz7Dh9SDHM + Ixy5W9o6gVFhB5mxceOxKNc= + -----END CERTIFICATE----- + key: | + -----BEGIN RSA PRIVATE KEY----- + MIIEogIBAAKCAQEAoTT4Y1GJ5aTwA8emKdCiPL9RCOIALYygX9lmpQ6loSE5rS4u + 6bgbztSg9EV5mvV+oNA1g473rgwyjNlJEIQJvEO323okDjXI47j09N5a9aGhRc0b + hfGlwmt/gE6NZUuI/u9wjIh8nLysh4rOYrN2+Us9WRfBL8wjmRZWNFRygrEMB9lH + zPefEQoUp4BcOh4oLbu8//x3Of5fwDMitL5gQtSmZFnLMbK0HD7CJWNz9DQA1n9e + 8rRg5KWAN7Tu6MIaY/H/DVCL3LfswIBSF6eQ73umGHf1zhS7PLSHPxYxkEeg35KK + riN+749238Ra3bi0CzTey0It+EK+zjtt1fd7XQIDAQABAoIBAG+AhfWcZncSuHjE + 1SfDCRRfeBtexqS6ygrCAn9UPDfRFWW1tfkuwP00kQekLlKCT03H9dvDPYmDIkvN + 1v23dkxjwn3qY5n4lbT9L2FXky6m1zfCgLEKzY5GcHA85QwVTPKYhw6NMTPwRJ2T + 4uDeJQKVih9fkN4Qoua2TnXvmyzNU49ffgFMJ0Ec7vNmS7MCUtlGec1Y0xKgTflt + yqhChpG2MBxdX8tLNgSC+lgRBZSzRaP/0oGZuV3FQ7W4fuXLNN8CdhSzHbVlbK+D + CO1f6nqZ8PZKJ/7SGwB2Q05EqscNAF3tl1dsGpnLqOLpnqJ2+f/H4W6/uB2tAILd + ySaC53kCgYEAwOHrNulo7HLgSXEs4/Ta9nLPoZxpDxkNKCRENDJCsoakQ2O33aL4 + mrHdSmxGqecpoCvkifx/ZCN073ZykVIoSY70+N7f79EytGjkAzDxEAnYMKSU7SSJ + TGA+c8Juqh6uvbMuJy/ZiQE6DZsweqhxopov7xSd89RIvNaBZdXq3QcCgYEA1fWJ + VHCEeQGl6eMtSWOfiADUnmOG70xlvmpzlD18pTTCIF7V1rFaAXjJl0ldI3mASJy/ + usiHZq54bUWcvof8DjI7YJ0OS8e7pmUZK9+O9fGTLIf8TIz6qq0PfERk+SyWGdAo + Z8HQMJBKWX809KPkJ9isd62wfREHVazfljxdL3sCgYBwxKTsWdKKSy9uQMjqDcHm + zIEwD24s8YyLp4hoq+nqzmVDMQ3SevG2H78tP9ighRIFHyRiuEkSlthLGIkrBUmg + mAAJcOSkJT7r01dbtkV6BwdqiQ65Bt9u0+Yvb8GbnIy1RAj7yDH6s8jpI45YaBrn + 4hWcRgWDBN3x6ceFbmf+CQKBgA5vwNJnvSiFCfLcF0Qqaqs8kxwUzxf6aasNd7r6 + 4xwqkSap/3e7A72xrrh8hMJOAm/j07QAr9In14xX9BmPB1zV2tfMARjv4yN5Ete4 + /+ZsZGfOzSFFKey2PKM/4ihF7+LR/sfxdeCw+7NKOAKBxHVD029H0u69ZWdMgNGc + RRVdAoGAFH7huA61ylOGh/W6IMU0wvJqg9SeT53JoZTr++0MS+0sdYoRGrq4RzSZ + bXKuvqZaSrXMRB9JZ72FfpjwZhDPZtNOXJV00K4yjZIui6h+TPsDk4lnxVSPYMpP + My/zrtJTCPM+Gqa6mhYTz4fyITv7igxqyECakrCa/Ct0SVDZbSI= + -----END RSA PRIVATE KEY----- + +arborist: + image: + repository: quay.io/pcdc/arborist + tag: 2023.08 + +amanuensis: + image: + repository: quay.io/pcdc/amanuensis + tag: "2.16.1" + +fence: + FENCE_CONFIG: + DEBUG: true + MOCK_STORAGE: true + #fill in + AMANUENSIS_PUBLIC_KEY_PATH: '/fence/keys/key/jwt_public_key.pem' + MOCK_GOOGLE_AUTH: true + mock_default_user: 'test@example.com' + + + image: + repository: quay.io/pcdc/fence + tag: "helm-test" + pullPolicy: Always + USER_YAML: | + cloud_providers: {} + groups: {} + authz: + # policies automatically given to anyone, even if they haven't authenticated + anonymous_policies: ['open_data_reader', 'full_open_access'] + + # policies automatically given to authenticated users (in addition to their other + # policies) + all_users_policies: ['open_data_reader', 'authn_open_access'] + + user_project_to_resource: + QA: /programs/QA + DEV: /programs/DEV + test: /programs/QA/projects/test + jenkins: /programs/jnkns/projects/jenkins + jenkins2: /programs/jnkns/projects/jenkins2 + jnkns: /programs/jnkns + + policies: + # General Access + - id: 'workspace' + description: 'be able to use workspace' + resource_paths: ['/workspace'] + role_ids: ['workspace_user'] + - id: 'dashboard' + description: 'be able to use the commons dashboard' + resource_paths: ['/dashboard'] + role_ids: ['dashboard_user'] + - id: 'prometheus' + description: 'be able to use prometheus' + resource_paths: ['/prometheus'] + role_ids: ['prometheus_user'] + - id: 'ttyadmin' + description: 'be able to use the admin tty' + resource_paths: ['/ttyadmin'] + role_ids: ['ttyadmin_user'] + - id: 'mds_admin' + description: 'be able to use metadata service' + resource_paths: ['/mds_gateway'] + role_ids: ['mds_user'] + - id: 'data_upload' + description: 'upload raw data files to S3' + role_ids: ['file_uploader'] + resource_paths: ['/data_file'] + - description: be able to use sower job + id: sower + resource_paths: [/sower] + role_ids: [sower_user] + - id: 'mariner_admin' + description: 'full access to mariner API' + resource_paths: ['/mariner'] + role_ids: ['mariner_admin'] + - id: audit_reader + role_ids: + - audit_reader + resource_paths: + - /services/audit + - id: audit_login_reader + role_ids: + - audit_reader + resource_paths: + - /services/audit/login + - id: audit_presigned_url_reader + role_ids: + - audit_reader + resource_paths: + - /services/audit/presigned_url + - id: requestor_admin + role_ids: + - requestor_admin + resource_paths: + - /programs + - id: requestor_reader + role_ids: + - requestor_reader + resource_paths: + - /programs + - id: requestor_creator + role_ids: + - requestor_creator + resource_paths: + - /programs + - id: requestor_updater + role_ids: + - requestor_updater + resource_paths: + - /programs + - id: requestor_deleter + role_ids: + - requestor_deleter + resource_paths: + - /programs + # Data Access + + # All programs policy + - id: 'all_programs_reader' + description: '' + role_ids: + - 'reader' + - 'storage_reader' + resource_paths: ['/programs'] + + # # example if need access to write to storage + # - id: 'programs.jnkns-storage_writer' + # description: '' + # role_ids: + # - 'storage_writer' + # resource_paths: ['/programs/jnkns'] + + - id: 'programs.jnkns-admin' + description: '' + role_ids: + - 'creator' + - 'reader' + - 'updater' + - 'deleter' + - 'storage_reader' + resource_paths: + - '/programs/jnkns' + - '/gen3/programs/jnkns' + + - id: 'programs.jnkns-viewer' + description: '' + role_ids: + - 'reader' + - 'storage_reader' + resource_paths: + - '/programs/jnkns' + - '/gen3/programs/jnkns' + + + - id: 'programs.QA-admin' + description: '' + role_ids: + - 'creator' + - 'reader' + - 'updater' + - 'deleter' + - 'storage_reader' + resource_paths: + - '/programs/QA' + - '/gen3/programs/QA' + + - id: 'programs.QA-admin-no-storage' + description: '' + role_ids: + - 'creator' + - 'reader' + - 'updater' + - 'deleter' + resource_paths: + - '/programs/QA' + - '/gen3/programs/QA' + + - id: 'programs.QA-viewer' + description: '' + role_ids: + - 'reader' + - 'storage_reader' + resource_paths: + - '/programs/QA' + - '/gen3/programs/QA' + + - id: 'programs.DEV-admin' + description: '' + role_ids: + - 'creator' + - 'reader' + - 'updater' + - 'deleter' + - 'storage_reader' + - 'storage_writer' + resource_paths: + - '/programs/DEV' + - '/gen3/programs/DEV' + + - id: 'programs.DEV-storage_writer' + description: '' + role_ids: + - 'storage_writer' + resource_paths: ['/programs/DEV'] + + - id: 'programs.DEV-viewer' + description: '' + role_ids: + - 'reader' + - 'storage_reader' + resource_paths: + - '/programs/DEV' + - '/gen3/programs/DEV' + + - id: 'programs.test-admin' + description: '' + role_ids: + - 'creator' + - 'reader' + - 'updater' + - 'deleter' + - 'storage_reader' + resource_paths: + - '/programs/test' + - '/gen3/programs/test' + + - id: 'programs.test-viewer' + description: '' + role_ids: + - 'reader' + - 'storage_reader' + resource_paths: + - '/programs/test' + - '/gen3/programs/test' + + - id: 'abc-admin' + description: '' + role_ids: + - 'creator' + - 'reader' + - 'updater' + - 'deleter' + - 'storage_reader' + resource_paths: + - '/abc' + + - id: 'gen3-admin' + description: '' + role_ids: + - 'creator' + - 'reader' + - 'updater' + - 'deleter' + - 'storage_reader' + resource_paths: + - '/gen3' + + - id: 'gen3-hmb-researcher' + description: '' + role_ids: + - 'creator' + - 'reader' + - 'updater' + - 'deleter' + - 'storage_reader' + resource_paths: + - '/consents/NRES' + - '/consents/GRU' + - '/consents/GRU_CC' + - '/consents/HMB' + - '/gen3' + + - id: 'abc.programs.test_program.projects.test_project1-viewer' + description: '' + role_ids: + - 'reader' + - 'storage_reader' + resource_paths: + - '/abc/programs/test_program/projects/test_project1' + + - id: 'abc.programs.test_program.projects.test_project2-viewer' + description: '' + role_ids: + - 'reader' + - 'storage_reader' + resource_paths: + - '/abc/programs/test_program/projects/test_project2' + + - id: 'abc.programs.test_program2.projects.test_project3-viewer' + description: '' + role_ids: + - 'reader' + - 'storage_reader' + resource_paths: + - '/abc/programs/test_program2/projects/test_project3' + + # Open data policies + - id: 'authn_open_access' + resource_paths: ['/programs/open/projects/authnRequired'] + description: '' + role_ids: + - 'reader' + - 'storage_reader' + - id: 'full_open_access' + resource_paths: ['/programs/open/projects/1000G'] + description: '' + role_ids: + - 'reader' + - 'storage_reader' + - id: 'open_data_reader' + description: '' + role_ids: + - 'reader' + - 'storage_reader' + resource_paths: ['/open'] + - id: 'open_data_admin' + description: '' + role_ids: + - 'creator' + - 'reader' + - 'updater' + - 'deleter' + - 'storage_writer' + - 'storage_reader' + resource_paths: ['/open'] + + # Consent Code Policies + - id: 'not-for-profit-researcher' + description: '' + role_ids: + - 'admin' + resource_paths: + - '/consents/NPU' + + - id: 'publication-required-researcher' + description: '' + role_ids: + - 'admin' + resource_paths: + - '/consents/PUB' + + - id: 'gru-researcher' + description: '' + role_ids: + - 'admin' + resource_paths: + - '/consents/NRES' + - '/consents/GRU' + + - id: 'gru-cc-researcher' + description: '' + role_ids: + - 'admin' + resource_paths: + - '/consents/NRES' + - '/consents/GRU' + - '/consents/GRU_CC' + + - id: 'hmb-researcher' + description: '' + role_ids: + - 'admin' + resource_paths: + - '/consents/NRES' + - '/consents/GRU' + - '/consents/GRU_CC' + - '/consents/HMB' + + - id: 'poa-researcher' + description: '' + role_ids: + - 'admin' + resource_paths: + - '/consents/NRES' + - '/consents/GRU' + - '/consents/GRU_CC' + - '/consents/POA' + + - id: 'ds-lung-researcher' + description: '' + role_ids: + - 'admin' + resource_paths: + - '/consents/NRES' + - '/consents/GRU' + - '/consents/GRU_CC' + - '/consents/HMB' + - '/consents/DS_LungDisease' + + - id: 'ds-chronic-obstructive-pulmonary-disease-researcher' + description: '' + role_ids: + - 'admin' + resource_paths: + - '/consents/NRES' + - '/consents/GRU' + - '/consents/GRU_CC' + - '/consents/HMB' + - '/consents/DS_ChronicObstructivePulmonaryDisease' + + - id: 'services.sheepdog-admin' + description: 'CRUD access to programs and projects' + role_ids: + - 'sheepdog_admin' + resource_paths: + - '/services/sheepdog/submission/program' + - '/services/sheepdog/submission/project' + + # indexd + - id: 'indexd_admin' + description: 'full access to indexd API' + role_ids: + - 'indexd_admin' + resource_paths: + - '/programs' + - '/services/indexd/admin' + # # TODO resource path '/' is not valid right now in arborist, trying to decide + # # how to handle all resources + # - id: 'indexd_admin' + # description: '' + # role_ids: + # - 'indexd_record_creator' + # - 'indexd_record_reader' + # - 'indexd_record_updater' + # - 'indexd_delete_record' + # - 'indexd_storage_reader' + # - 'indexd_storage_writer' + # resource_paths: ['/'] + # - id: 'indexd_record_reader' + # description: '' + # role_ids: + # - 'indexd_record_reader' + # resource_paths: ['/'] + # - id: 'indexd_record_editor' + # description: '' + # role_ids: + # - 'indexd_record_creator' + # - 'indexd_record_reader' + # - 'indexd_record_updater' + # - 'indexd_delete_record' + # resource_paths: ['/'] + # - id: 'indexd_storage_reader' + # description: '' + # role_ids: + # - 'indexd_storage_reader' + # resource_paths: ['/'] + # - id: 'indexd_storage_editor' + # description: '' + # role_ids: + # - 'indexd_storage_reader' + # - 'indexd_storage_writer' + # resource_paths: ['/'] + + # argo + - id: argo + description: be able to use argo + resource_paths: [/argo] + role_ids: [argo_user] + + #PCDC specific + - id: 'services.amanuensis-admin' + description: 'admin access to amanuensis' + role_ids: + - 'amanuensis_admin' + resource_paths: + - '/services/amanuensis' + - id: analysis + description: be able to use analysis tool service + resource_paths: + - /analysis + role_ids: + - analysis_user + - id: privacy_policy + description: User agreed on the privacy policy + resource_paths: + - /privacy + role_ids: + - reader + - id: login_no_access + role_ids: + - reader + resource_paths: + - /portal + - id: 'data_admin' + description: 'policy test, should write a policy per resource and assign to user in order to avoid duplicating policies' + role_ids: + - admin + resource_paths: + - /programs + - /programs/pcdc + resources: + # General Access + - name: 'data_file' + description: 'data files, stored in S3' + - name: 'dashboard' + description: 'commons /dashboard' + - name: 'mds_gateway' + description: 'commons /mds-admin' + - name: 'prometheus' + description: 'commons /prometheus and /grafana' + - name: 'ttyadmin' + description: 'commons /ttyadmin' + - name: 'workspace' + description: jupyter notebooks + - name: "sower" + description: 'sower resource' + - name: 'mariner' + description: 'workflow execution service' + - name: argo + #PCDC + - name: analysis + description: analysis tool service + - name: portal + description: data portal service + - name: privacy + description: User privacy policy + # OLD Data + - name: 'programs' + subresources: + #PCDC + - name: pcdc + - name: 'open' + subresources: + - name: 'projects' + subresources: + - name: '1000G' + - name: 'authnRequired' + - name: 'QA' + subresources: + - name: 'projects' + subresources: + - name: 'test' + - name: 'DEV' + subresources: + - name: 'projects' + subresources: + - name: 'test' + - name: 'jnkns' + subresources: + - name: 'projects' + subresources: + - name: 'jenkins' + - name: 'jenkins2' + - name: 'test' + subresources: + - name: 'projects' + subresources: + - name: 'test' + + # NEW Data WITH PREFIX + - name: 'gen3' + subresources: + - name: 'programs' + subresources: + - name: 'QA' + subresources: + - name: 'projects' + subresources: + - name: 'test' + - name: 'DEV' + subresources: + - name: 'projects' + subresources: + - name: 'test' + - name: 'jnkns' + subresources: + - name: 'projects' + subresources: + - name: 'jenkins' + - name: 'jenkins2' + - name: 'test' + subresources: + - name: 'projects' + subresources: + - name: 'test' + + # consents obtained from DUO and NIH + # https://github.com/EBISPOT/DUO + # https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4721915/ + - name: 'consents' + subresources: + - name: 'NRES' + description: 'no restriction' + - name: 'GRU' + description: 'general research use' + - name: 'GRU_CC' + description: 'general research use and clinical care' + - name: 'HMB' + description: 'health/medical/biomedical research' + - name: 'POA' + description: 'population origins or ancestry research' + - name: 'NMDS' + description: 'no general methods research' + - name: 'NPU' + description: 'not-for-profit use only' + - name: 'PUB' + description: 'publication required' + - name: 'DS_LungDisease' + description: 'disease-specific research for lung disease' + - name: 'DS_ChronicObstructivePulmonaryDisease' + description: 'disease-specific research for chronic obstructive pulmonary disease' + + - name: 'abc' + subresources: + - name: 'programs' + subresources: + - name: 'foo' + subresources: + - name: 'projects' + subresources: + - name: 'bar' + - name: 'test_program' + subresources: + - name: 'projects' + subresources: + - name: 'test_project1' + - name: 'test_project2' + - name: 'test_program2' + subresources: + - name: 'projects' + subresources: + - name: 'test_project3' + + + # "Sheepdog admin" resources + - name: 'services' + subresources: + - name: 'sheepdog' + subresources: + - name: 'submission' + subresources: + - name: 'program' + - name: 'project' + #PCDC + - name: 'amanuensis' + - name: 'indexd' + subresources: + - name: 'admin' + - name: 'bundles' + - name: audit + subresources: + - name: presigned_url + - name: login + + - name: 'open' + + # action/methods: + # create, read, update, delete, read-storage, write-storage, + # file_upload, access + roles: + # General Access + - id: 'file_uploader' + description: 'can upload data files' + permissions: + - id: 'file_upload' + action: + service: '*' + method: 'file_upload' + - id: 'workspace_user' + permissions: + - id: 'workspace_access' + action: + service: 'jupyterhub' + method: 'access' + - id: 'dashboard_user' + permissions: + - id: 'dashboard_access' + action: + service: 'dashboard' + method: 'access' + - id: 'mds_user' + permissions: + - id: 'mds_access' + action: + service: 'mds_gateway' + method: 'access' + - id: 'prometheus_user' + permissions: + - id: 'prometheus_access' + action: + service: 'prometheus' + method: 'access' + - id: 'ttyadmin_user' + permissions: + - id: 'ttyadmin_access' + action: + service: 'ttyadmin' + method: 'access' + - id: 'sower_user' + permissions: + - id: 'sower_access' + action: + service: 'job' + method: 'access' + - id: 'mariner_admin' + permissions: + - id: 'mariner_access' + action: + service: 'mariner' + method: 'access' + - id: 'audit_reader' + permissions: + - id: 'audit_reader_action' + action: + service: 'audit' + method: 'read' + - id: 'analysis_user' + permissions: + - action: {method: 'access', service: 'analysis'} + id: 'analysis_access' + # All services + - id: 'admin' + description: '' + permissions: + - id: 'admin' + action: + service: '*' + method: '*' + - id: 'creator' + description: '' + permissions: + - id: 'creator' + action: + service: '*' + method: 'create' + - id: 'reader' + description: '' + permissions: + - id: 'reader' + action: + service: '*' + method: 'read' + - id: 'updater' + description: '' + permissions: + - id: 'updater' + action: + service: '*' + method: 'update' + - id: 'deleter' + description: '' + permissions: + - id: 'deleter' + action: + service: '*' + method: 'delete' + - id: 'storage_writer' + description: '' + permissions: + - id: 'storage_writer' + action: + service: '*' + method: 'write-storage' + - id: 'storage_reader' + description: '' + permissions: + - id: 'storage_reader' + action: + service: '*' + method: 'read-storage' + + + # Sheepdog admin role + - id: 'sheepdog_admin' + description: 'sheepdog admin role for program project crud' + permissions: + - id: 'sheepdog_admin_action' + action: + service: 'sheepdog' + method: '*' + + + # indexd + - id: 'indexd_admin' + # this only works if indexd.arborist is enabled in manifest! + description: 'full access to indexd API' + permissions: + - id: 'indexd_admin' + action: + service: 'indexd' + method: '*' + - id: 'indexd_record_creator' + description: '' + permissions: + - id: 'indexd_record_creator' + action: + service: 'indexd' + method: 'create' + - id: 'indexd_record_reader' + description: '' + permissions: + - id: 'indexd_record_reader' + action: + service: 'indexd' + method: 'read' + - id: 'indexd_record_updater' + description: '' + permissions: + - id: 'indexd_record_updater' + action: + service: 'indexd' + method: 'update' + - id: 'indexd_delete_record' + description: '' + permissions: + - id: 'indexd_delete_record' + action: + service: 'indexd' + method: 'delete' + - id: 'indexd_storage_reader' + description: '' + permissions: + - id: 'indexd_storage_reader' + action: + service: 'indexd' + method: 'read-storage' + - id: 'indexd_storage_writer' + description: '' + permissions: + - id: 'indexd_storage_writer' + action: + service: 'indexd' + method: 'write-storage' + + # arborist + - id: 'arborist_creator' + description: '' + permissions: + - id: 'arborist_creator' + action: + service: 'arborist' + method: 'create' + - id: 'arborist_reader' + description: '' + permissions: + - id: 'arborist_reader' + action: + service: 'arborist' + method: 'read' + - id: 'arborist_updater' + description: '' + permissions: + - id: 'arborist_updater' + action: + service: 'arborist' + method: 'update' + - id: 'arborist_deleter' + description: '' + permissions: + - id: 'arborist_deleter' + action: + service: 'arborist' + method: 'delete' + + # requestor + - id: requestor_admin + permissions: + - id: requestor_admin_action + action: + service: requestor + method: '*' + - id: requestor_reader + permissions: + - id: requestor_reader_action + action: + service: requestor + method: read + - id: requestor_creator + permissions: + - id: requestor_creator_action + action: + service: requestor + method: create + - id: requestor_updater + permissions: + - id: requestor_updater_action + action: + service: requestor + method: update + - id: requestor_deleter + permissions: + - id: requestor_deleter_action + action: + service: requestor + method: delete + # argo + - id: argo_user + permissions: + - id: argo_access + action: + service: argo + method: access + #PCDC specific + #amanuensis + - id: 'amanuensis_admin' + description: 'can do admin work on project/data request' + permissions: + - id: 'amanuensis_admin_action' + action: + service: 'amanuensis' + method: '*' + clients: + basic-test-client: + policies: + - abc-admin + - gen3-admin + basic-test-abc-client: + policies: + - abc-admin + wts: + policies: + - all_programs_reader + - workspace + + users: + ### BEGIN INTERNS SECTION ### + ### END INTERNS SECTION ### + qureshi@uchicago.edu: + admin: true + policies: + - data_upload + - workspace + - dashboard + - mds_admin + - prometheus + - sower + - services.sheepdog-admin + - programs.QA-admin + - programs.test-admin + - programs.DEV-admin + - programs.jnkns-admin + - indexd_admin + - ttyadmin + projects: + - auth_id: QA + privilege: [create, read, update, delete, upload, read-storage] + - auth_id: test + privilege: [create, read, update, delete, upload, read-storage] + - auth_id: DEV + privilege: [create, read, update, delete, upload, read-storage] + - auth_id: jenkins + privilege: [create, read, update, delete, upload, read-storage] + - auth_id: jenkins2 + privilege: [create, read, update, delete, upload, read-storage] + - auth_id: jnkns + privilege: [create, read, update, delete, upload, read-storage] + pmurdoch@uchicago.edu: + admin: true + policies: + - data_upload + - workspace + - dashboard + - mds_admin + - prometheus + - sower + - services.sheepdog-admin + - services.amanuensis-admin + - programs.QA-admin + - programs.test-admin + - programs.DEV-admin + - programs.jnkns-admin + - indexd_admin + - ttyadmin + - data_admin + - analysis + - privacy_policy + - login_no_access + projects: + - auth_id: QA + privilege: [create, read, update, delete, upload, read-storage] + - auth_id: test + privilege: [create, read, update, delete, upload, read-storage] + - auth_id: DEV + privilege: [create, read, update, delete, upload, read-storage] + - auth_id: jenkins + privilege: [create, read, update, delete, upload, read-storage] + - auth_id: jenkins2 + privilege: [create, read, update, delete, upload, read-storage] + - auth_id: jnkns + privilege: [create, read, update, delete, upload, read-storage] + graglia01@gmail.com: + admin: true + policies: + - data_upload + - workspace + - dashboard + - mds_admin + - prometheus + - sower + - services.sheepdog-admin + - services.amanuensis-admin + - programs.QA-admin + - programs.test-admin + - programs.DEV-admin + - programs.jnkns-admin + - indexd_admin + - ttyadmin + - data_admin + - analysis + - privacy_policy + - login_no_access + projects: + - auth_id: QA + privilege: [create, read, update, delete, upload, read-storage] + - auth_id: test + privilege: [create, read, update, delete, upload, read-storage] + - auth_id: DEV + privilege: [create, read, update, delete, upload, read-storage] + - auth_id: jenkins + privilege: [create, read, update, delete, upload, read-storage] + - auth_id: jenkins2 + privilege: [create, read, update, delete, upload, read-storage] + - auth_id: jnkns + privilege: [create, read, update, delete, upload, read-storage] + furner.brian@gmail.com: + admin: true + policies: + - data_upload + - workspace + - dashboard + - mds_admin + - prometheus + - sower + - services.sheepdog-admin + - services.amanuensis-admin + - programs.QA-admin + - programs.test-admin + - programs.DEV-admin + - programs.jnkns-admin + - indexd_admin + - ttyadmin + - data_admin + - analysis + - privacy_policy + - login_no_access + projects: + - auth_id: QA + privilege: [create, read, update, delete, upload, read-storage] + - auth_id: test + privilege: [create, read, update, delete, upload, read-storage] + - auth_id: DEV + privilege: [create, read, update, delete, upload, read-storage] + - auth_id: jenkins + privilege: [create, read, update, delete, upload, read-storage] + - auth_id: jenkins2 + privilege: [create, read, update, delete, upload, read-storage] + - auth_id: jnkns + privilege: [create, read, update, delete, upload, read-storage] + +guppy: + enabled: true + image: + repository: quay.io/pcdc/guppy + tag: 1.8.0 + authFilterField: "auth_resource_path" + +manifestservice: + image: + repository: quay.io/cdis/manifestservice + tag: 2023.08 + +pcdcanalysistools: + image: + repository: quay.io/pcdc/pcdcanalysistools + tag: 1.8.4 + +peregrine: + image: + repository: quay.io/pcdc/peregrine + tag: "1.3.5" + +portal: + image: + repository: quay.io/pcdc/windmill + tag: 1.25.0 + resources: + requests: + cpu: 1.0 + gitops: + json: | + { + "gaTrackingId": "undefined", + "graphql": { + "boardCounts": [ + { + "graphql": "_person_count", + "name": "Person", + "plural": "Persons" + }, + { + "graphql": "_subject_count", + "name": "Subject", + "plural": "Subjects" + } + ], + "chartCounts": [ + { + "graphql": "_person_count", + "name": "Person" + }, + { + "graphql": "_subject_count", + "name": "Subject" + } + ], + "projectDetails": "boardCounts" + }, + "components": { + "appName": "Pediatric Cancer Data Commons Portal", + "index": { + "introduction": { + "heading": "Pediatric Cancer Data Commons", + "text": "The Pediatric Cancer Data Commons supports the management, analysis and sharing of data for the research community.", + "link": "/submission" + }, + "buttons": [ + { + "name": "Define Data Field", + "icon": "data-field-define", + "body": "The Pediatric Cancer Data Commons defines the data. Please study the dictionary before you start browsing.", + "link": "/DD", + "label": "Learn more" + }, + { + "name": "Explore Data", + "icon": "data-explore", + "body": "The Exploration Page gives you insights and a clear overview under selected factors.", + "link": "/explorer", + "label": "Explore data" + }, + { + "name": "Access Data", + "icon": "data-access", + "body": "Use our selected tool to filter out the data you need.", + "link": "/query", + "label": "Query data" + } + ], + "barChart": { + "showPercentage": true + } + }, + "navigation": { + "items": [ + { + "icon": "dictionary", + "link": "/DD", + "color": "#a2a2a2", + "name": "Dictionary" + }, + { + "icon": "exploration", + "link": "/explorer", + "color": "#a2a2a2", + "name": "Exploration" + }, + { + "icon": "query", + "link": "/query", + "color": "#a2a2a2", + "name": "Query" + } + ] + }, + "topBar": { + "items": [ + { + "icon": "external-link", + "leftOrientation": true, + "link": "https://commons.cri.uchicago.edu/pcdc/", + "name": "About PCDC" + }, + { + "icon": "external-link", + "leftOrientation": true, + "link": "https://commons.cri.uchicago.edu/sponsors/", + "name": "Our Sponsors" + } + ] + }, + "login": { + "title": "Pediatric Cancer Data Commons", + "subTitle": "Connect. Share. Cure.", + "text": "The Pediatric Cancer Data Commons (PCDC) harnesses pediatric cancer clinical data from around the globe into a single combined platform, connecting the data to other sources and making it available to clinicians and researchers everywhere. Headquartered at the University of Chicago, the PCDC team works with international leaders in pediatric cancers to develop and apply uniform data standards that facilitate the collection, combination, and analysis of data from many different sources. The PCDC currently houses the world's largest sets of clinical data for pediatric neuroblastoma and soft tissue sarcoma and is in the process of onboarding additional pediatric cancer disease groups.", + "contact": "If you have any questions about access or the registration process, please contact ", + "email": "pcdc_help@lists.uchicago.edu" + }, + "footerLogos": [ + { + "src": "/src/img/gen3.png", + "href": "https://ctds.uchicago.edu/gen3", + "alt": "Gen3 Data Commons", + "height": 40 + }, + { + "src": "/src/img/uchicago.png", + "href": "https://www.uchicago.edu/", + "alt": "The University of Chicago", + "height": 40 + } + ] + }, + "explorerConfig": [ + { + "id": 1, + "label": "data", + "charts": { + "sex": { + "chartType": "bar", + "title": "Sex" + }, + "race": { + "chartType": "bar", + "title": "Race" + }, + "ethnicity": { + "chartType": "bar", + "title": "Ethnicity" + }, + "consortium": { + "chartType": "bar", + "title": "Consortium" + } + }, + "filters": { + "anchor": { + "field": "disease_phase", + "options": ["Initial Diagnosis", "Relapse"], + "tabs": ["Disease", "Molecular", "Surgery", "Radiation", "Response", "SMN", "Imaging", "Labs", "SCT"] + }, + "tabs": [ + { + "title": "Subject", + "fields": [ + "consortium", + "data_contributor_id", + "studies.study_id", + "studies.treatment_arm", + "sex", + "race", + "ethnicity", + "year_at_disease_phase", + "survival_characteristics.lkss_obfuscated", + "censor_status", + "age_at_censor_status", + "medical_histories.medical_history", + "medical_histories.medical_history_status", + "external_references.external_resource_name" + ] + }, + { + "title": "Disease", + "fields": [ + "histologies.histology", + "histologies.histology_grade", + "histologies.histology_inpc", + "tumor_assessments.age_at_tumor_assessment", + "tumor_assessments.tumor_classification", + "tumor_assessments.tumor_site", + "tumor_assessments.tumor_state", + "tumor_assessments.longest_diam_dim1", + "tumor_assessments.depth", + "tumor_assessments.tumor_size", + "tumor_assessments.invasiveness", + "tumor_assessments.nodal_clinical", + "tumor_assessments.nodal_pathology", + "tumor_assessments.parameningeal_extension", + "tumor_assessments.necrosis", + "tumor_assessments.necrosis_pct", + "tumor_assessments.tumor_laterality", + "stagings.irs_group", + "stagings.tnm_finding", + "stagings.stage_system", + "stagings.stage", + "stagings.AB", + "stagings.E", + "stagings.S", + "disease_characteristics.mki", + "disease_characteristics.bulk_disease", + "disease_characteristics.BULK_MED_MASS", + "disease_characteristics.bulky_nodal_aggregate" + ] + }, + { + "title": "Molecular", + "fields": [ + "molecular_analysis.anaplasia", + "molecular_analysis.anaplasia_extent", + "molecular_analysis.molecular_abnormality", + "molecular_analysis.molecular_abnormality_result", + "molecular_analysis.gene1", + "molecular_analysis.gene2", + "molecular_analysis.dna_index", + "molecular_analysis.age_at_molecular_analysis", + "molecular_analysis.mitoses", + "molecular_analysis.cytodifferentiation" + ] + }, + { + "title": "Surgery", + "fields": [ + "biopsy_surgical_procedures.tumor_classification", + "biopsy_surgical_procedures.procedure_type", + "biopsy_surgical_procedures.margins" + ] + }, + { + "title": "Radiation", + "fields": [ + "radiation_therapies.tumor_classification", + "radiation_therapies.energy_type", + "radiation_therapies.rt_dose" + ] + }, + { + "title": "Response", + "fields": [ + "subject_responses.tx_prior_response", + "subject_responses.response", + "subject_responses.interim_response", + "subject_responses.response_method" + ] + }, + { + "title": "SMN", + "fields": [ + "secondary_malignant_neoplasm.age_at_smn", + "secondary_malignant_neoplasm.smn_site", + "secondary_malignant_neoplasm.smn_type", + "secondary_malignant_neoplasm.smn_morph_icdo" + ] + }, + { + "title": "Imaging", + "fields": [ + "imagings.imaging_method", + "imagings.imaging_result" + ] + }, + { + "title": "Labs", + "fields": [ + "labs.lab_test", + "labs.lab_result", + "labs.lab_result_numeric", + "labs.lab_result_unit" + ] + }, + { + "title": "SCT", + "fields": [ + "stem_cell_transplants.sct_type" + ] + } + ] + }, + "projectId": "search", + "graphqlField": "subject", + "index": "", + "buttons": [ + { + "enabled": true, + "type": "export-to-pfb", + "title": "Export to PFB", + "leftIcon": "datafile", + "rightIcon": "download" + }, + { + "enabled": false, + "type": "data", + "title": "Download Data", + "leftIcon": "user", + "rightIcon": "download", + "fileName": "data.json", + "tooltipText": "You can only download data accessible to you" + } + ], + "table": { + "enabled": true, + "fields": [ + "external_references.external_links", + "consortium", + "data_contributor_id", + "subject_submitter_id", + "sex", + "race", + "ethnicity", + "survival_characteristics.lkss", + "survival_characteristics.age_at_lkss" + ] + }, + "patientIds": { + "filter": false, + "export": true + }, + "survivalAnalysis": { + "result": { + "pval": false, + "risktable": true, + "survival": true + } + }, + "guppyConfig": { + "dataType": "subject", + "nodeCountTitle": "Subjects", + "fieldMapping": [ + { + "field": "data_contributor_id", + "name": "Data Contributor", + "tooltip": "Survival analysis is not allowed for this variable. Filtering by this variable will disable survival analysis." + }, + { + "field": "studies.study_id", + "name": "Study Id", + "tooltip": "Survival analysis is not allowed for this variable. Filtering by this variable will disable survival analysis." + }, + { + "field": "studies.treatment_arm", + "name": "Treatment Arm", + "tooltip": "Survival analysis is not allowed for this variable. Filtering by this variable will disable survival analysis." + }, + { + "field": "year_at_disease_phase", + "name": "Year at Initial Diagnosis" + }, + { + "field": "survival_characteristics.lkss", + "name": "Last Known Survival Status (LKSS)" + }, + { + "field": "survival_characteristics.lkss_obfuscated", + "name": "Last Known Survival Status (LKSS)" + }, + { + "field": "medical_histories.medical_history", + "name": "Medical History" + }, + { + "field": "medical_histories.medical_history_status", + "name": "Medical History Status" + }, + { + "field": "external_references.external_resource_name", + "name": "External Resource Name" + }, + { + "field": "histologies.histology", + "name": "Histology" + }, + { + "field": "histologies.histology_grade", + "name": "Histology Grade" + }, + { + "field": "histologies.histology_inpc", + "name": "INPC Classification" + }, + { + "field": "tumor_assessments.age_at_tumor_assessment", + "name": "Age at Tumor Assessment" + }, + { + "field": "tumor_assessments.tumor_classification", + "name": "Tumor Classification" + }, + { + "field": "tumor_assessments.tumor_site", + "name": "Tumor Site" + }, + { + "field": "tumor_assessments.tumor_state", + "name": "Tumor State" + }, + { + "field": "tumor_assessments.longest_diam_dim1", + "name": "Longest Diameter Dimension 1" + }, + { + "field": "tumor_assessments.depth", + "name": "Tumor Depth" + }, + { + "field": "tumor_assessments.tumor_size", + "name": "Tumor Size" + }, + { + "field": "tumor_assessments.invasiveness", + "name": "Invasiveness" + }, + { + "field": "tumor_assessments.nodal_clinical", + "name": "Nodal Clinical" + }, + { + "field": "tumor_assessments.nodal_pathology", + "name": "Nodal Pathology" + }, + { + "field": "tumor_assessments.parameningeal_extension", + "name": "Parameningeal Extension" + }, + { + "field": "tumor_assessments.necrosis", + "name": "Necrosis" + }, + { + "field": "tumor_assessments.necrosis_pct", + "name": "Necrosis PCT" + }, + { + "field": "tumor_assessments.tumor_laterality", + "name": "Tumor Laterality" + }, + { + "field": "stagings.irs_group", + "name": "IRS Group" + }, + { + "field": "stagings.tnm_finding", + "name": "TNM Finding" + }, + { + "field": "stagings.stage_system", + "name": "Stage System" + }, + { + "field": "stagings.stage", + "name": "Stage" + }, + { + "field": "stagings.AB", + "name": "Ann Arbor AB" + }, + { + "field": "stagings.E", + "name": "Ann Arbor E" + }, + { + "field": "stagings.S", + "name": "Ann Arbor S" + }, + { + "field": "disease_characteristics.mki", + "name": "MKI" + }, + { + "field": "disease_characteristics.bulk_disease", + "name": "Bulky Disease" + }, + { + "field": "disease_characteristics.BULK_MED_MASS", + "name": "Bulky Mediastinal Mass" + }, + { + "field": "disease_characteristics.bulky_nodal_aggregate", + "name": "Bulky Nodal Aggregate" + }, + { + "field": "molecular_analysis.anaplasia", + "name": "Anaplasia" + }, + { + "field": "molecular_analysis.anaplasia_extent", + "name": "Anaplasia Extent" + }, + { + "field": "molecular_analysis.molecular_abnormality", + "name": "Molecular Abnormality" + }, + { + "field": "molecular_analysis.molecular_abnormality_result", + "name": "Molecular Abnormality Result" + }, + { + "field": "molecular_analysis.gene1", + "name": "Gene 1" + }, + { + "field": "molecular_analysis.gene2", + "name": "Gene 2" + }, + { + "field": "molecular_analysis.dna_index", + "name": "DNA Index" + }, + { + "field": "molecular_analysis.age_at_molecular_analysis", + "name": "Age at Molecular Analysis" + }, + { + "field": "molecular_analysis.mitoses", + "name": "Mitoses" + }, + { + "field": "molecular_analysis.cytodifferentiation", + "name": "Cytodifferentiation" + }, + { + "field": "biopsy_surgical_procedures.tumor_classification", + "name": "Tumor Classification" + }, + { + "field": "biopsy_surgical_procedures.procedure_type", + "name": "Procedure Type" + }, + { + "field": "biopsy_surgical_procedures.procedure_site", + "name": "Procedure Site" + }, + { + "field": "biopsy_surgical_procedures.margins", + "name": "Margins" + }, + { + "field": "radiation_therapies.tumor_classification", + "name": "Tumor Classification" + }, + { + "field": "radiation_therapies.age_at_rt_start", + "name": "Age at Radiation Therapy" + }, + { + "field": "radiation_therapies.rt_site", + "name": "Radiation Site" + }, + { + "field": "radiation_therapies.energy_type", + "name": "Energy Type" + }, + { + "field": "radiation_therapies.rt_dose", + "name": "Radiation Dose" + }, + { + "field": "radiation_therapies.rt_unit", + "name": "Radiation Unit" + }, + { + "field": "subject_responses.age_at_response", + "name": "Age at Response" + }, + { + "field": "subject_responses.tx_prior_response", + "name": "Treatment Prior Response" + }, + { + "field": "subject_responses.response", + "name": "Response" + }, + { + "field": "subject_responses.interim_response", + "name": "Interim Response" + }, + { + "field": "subject_responses.response_method", + "name": "Response Method" + }, + { + "field": "subject_responses.necrosis", + "name": "Necrosis" + }, + { + "field": "secondary_malignant_neoplasm.age_at_smn", + "name": "Age at SMN" + }, + { + "field": "secondary_malignant_neoplasm.smn_site", + "name": "SMN Site" + }, + { + "field": "secondary_malignant_neoplasm.smn_type", + "name": "SMN Type" + }, + { + "field": "secondary_malignant_neoplasm.smn_morph_icdo", + "name": "ICD-O Morphology" + }, + { + "field": "imagings.imaging_method", + "name": "Imaging Method" + }, + { + "field": "imagings.imaging_result", + "name": "Imaging Result" + }, + { + "field": "labs.lab_result_numeric", + "name": "Numeric Lab Result" + }, + { + "field": "labs.lab_result_unit", + "name": "Lab Result Unit" + }, + { + "field": "labs.lab_result", + "name": "Lab Result" + }, + { + "field": "labs.lab_test", + "name": "Lab Test" + }, + { + "field": "stem_cell_transplants.sct_type", + "name": "SCT Type" + } + ] + }, + "dataRequests": { + "enabled": false + }, + "getAccessButtonLink": "https://docs.pedscommons.org/PCDCProjectRequestForm/" + }, + { + "id": 2, + "label": "data - survival", + "charts": { + "sex": { + "chartType": "bar", + "title": "Sex" + }, + "race": { + "chartType": "bar", + "title": "Race" + }, + "ethnicity": { + "chartType": "bar", + "title": "Ethnicity" + } + }, + "adminAppliedPreFilters": { + "consortium": { + "selectedValues": ["INSTRuCT"] + } + }, + "filters": { + "anchor": { + "field": "disease_phase", + "options": ["Initial Diagnosis", "Relapse"], + "tabs": ["Disease", "Molecular"], + "tooltip": "You can describe this filter here" + }, + "tabs": [ + { + "title": "Subject", + "fields": [ + "consortium", + "sex", + "race", + "ethnicity", + "survival_characteristics.lkss", + "survival_characteristics.age_at_lkss" + ] + }, + { + "title": "Disease", + "fields": [ + "histologies.histology", + "tumor_assessments.age_at_tumor_assessment", + "tumor_assessments.tumor_classification", + "tumor_assessments.tumor_site", + "tumor_assessments.longest_diam_dim1", + "tumor_assessments.invasiveness", + "tumor_assessments.nodal_clinical", + "tumor_assessments.nodal_pathology", + "tumor_assessments.parameningeal_extension", + "stagings.irs_group", + "stagings.tnm_finding" + ] + }, + { + "title": "Molecular", + "fields": [ + "molecular_analysis.anaplasia", + "molecular_analysis.anaplasia_extent", + "molecular_analysis.molecular_abnormality", + "molecular_analysis.molecular_abnormality_result", + "molecular_analysis.gene1", + "molecular_analysis.gene2" + ] + } + ] + }, + "projectId": "search", + "graphqlField": "subject", + "index": "", + "buttons": [ + { + "enabled": true, + "type": "export-to-pfb", + "title": "Export to PFB", + "leftIcon": "datafile", + "rightIcon": "download" + }, + { + "enabled": false, + "type": "data", + "title": "Download Data", + "leftIcon": "user", + "rightIcon": "download", + "fileName": "data.json", + "tooltipText": "You can only download data accessible to you" + } + ], + "table": { + "enabled": true, + "fields": [ + "external_references.external_links", + "consortium", + "data_contributor_id", + "subject_submitter_id", + "sex", + "race", + "ethnicity", + "survival_characteristics.lkss", + "survival_characteristics.age_at_lkss" + ] + }, + "patientIds": { + "filter": false, + "export": true + }, + "survivalAnalysis": { + "result": { + "pval": false, + "risktable": true, + "survival": true + } + }, + "guppyConfig": { + "dataType": "subject", + "nodeCountTitle": "Subjects", + "fieldMapping": [ + { + "field": "survival_characteristics.lkss", + "name": "Last Known Survival Status (LKSS)", + "tooltip": "test tooltip" + }, + { + "field": "survival_characteristics.age_at_lkss", + "name": "Age at LKSS" + }, + { + "field": "external_references.external_resource_name", + "name": "External Resource Name" + }, + { + "field": "tumor_assessments.age_at_tumor_assessment", + "name": "Age at Tumor Assessment" + }, + { + "field": "tumor_assessments.tumor_classification", + "name": "Tumor Classification" + }, + { + "field": "tumor_assessments.tumor_site", + "name": "Tumor Site" + }, + { + "field": "tumor_assessments.tumor_size", + "name": "Tumor Size" + }, + { + "field": "tumor_assessments.longest_diam_dim1", + "name": "Longest Diameter Dimension 1" + }, + { + "field": "tumor_assessments.invasiveness", + "name": "Invasiveness" + }, + { + "field": "tumor_assessments.nodal_clinical", + "name": "Nodal Clinical" + }, + { + "field": "tumor_assessments.nodal_pathology", + "name": "Nodal Pathology" + }, + { + "field": "tumor_assessments.parameningeal_extension", + "name": "Parameningeal Extension" + }, + { + "field": "histologies.histology", + "name": "Histology" + }, + { + "field": "histologies.histology_grade", + "name": "Histology Grade" + }, + { + "field": "histologies.histology_inpc", + "name": "Histology Inpc" + }, + { + "field": "molecular_analysis.anaplasia", + "name": "Anaplasia" + }, + { + "field": "molecular_analysis.anaplasia_extent", + "name": "Anaplasia Extent" + }, + { + "field": "molecular_analysis.molecular_abnormality", + "name": "Molecular Abnormality" + }, + { + "field": "molecular_analysis.molecular_abnormality_result", + "name": "Molecular Abnormality Result" + }, + { + "field": "molecular_analysis.gene1", + "name": "Gene 1" + }, + { + "field": "molecular_analysis.gene2", + "name": "Gene 2" + }, + { + "field": "project_id", + "name": "Data Release Version" + }, + { + "field": "stagings.irs_group", + "name": "IRS Group" + }, + { + "field": "stagings.tnm_finding", + "name": "TNM Finding" + } + ] + }, + "dataRequests": { + "enabled": false + }, + "getAccessButtonLink": "https://docs.pedscommons.org/PCDCProjectRequestForm/" + } + ] + } + +revproxy: + image: + repository: quay.io/cdis/nginx + tag: 2023.09 + +sheepdog: + image: + repository: quay.io/pcdc/sheepdog + tag: "1.5.6" + +sower: + image: + repository: quay.io/cdis/sower + tag: 2024.04 + +wts: + image: + repository: quay.io/cdis/workspace-token-service + tag: 2024.04 + +elasticsearch: + enabled: true + +######################################################################################## +# DISABLED SERVICES # +######################################################################################## +gearbox: + enabled: false + +gearbox-middleware: + enabled: false + +ambassador: + # -- (bool) Whether to deploy the ambassador subchart. + enabled: false + +argo-wrapper: + # -- (bool) Whether to deploy the argo-wrapper subchart. + enabled: false + +audit: + # -- (bool) Whether to deploy the audit subchart. + enabled: false + +aws-es-proxy: + enabled: false + +metadata: + # -- (bool) Whether to deploy the metadata subchart. + enabled: false + +pidgin: + # -- (bool) Whether to deploy the pidgin subchart. + enabled: false + +indexd: + enabled: false + +hatchery: + enabled: false \ No newline at end of file diff --git a/pcdc_data/load_elasticsearch.sh b/pcdc_data/load_elasticsearch.sh index 16ff71b65..9a8f56939 100755 --- a/pcdc_data/load_elasticsearch.sh +++ b/pcdc_data/load_elasticsearch.sh @@ -76,7 +76,7 @@ EOF yq eval '. * load("./temp.yaml")' secret-values.yaml > updated-secret-values.yaml && mv updated-secret-values.yaml secret-values.yaml -pcdc roll revproxy guppy +pcdc roll revproxy guppy pcdcanalysistools rm ./temp.yaml \ No newline at end of file diff --git a/tools/gearbox b/tools/gearbox new file mode 100755 index 000000000..fb7a48242 --- /dev/null +++ b/tools/gearbox @@ -0,0 +1,56 @@ +#!/bin/bash + + +# Define script names +CONNECT_SCRIPT="connect_to_db.sh" +ROLL_SCRIPT="roll.sh" +JOB_SCRIPT="job.sh" +POD_SCRIPT="connect_to_pod.sh" +RESTART_POD="restart_pod.sh" +LOGS="logs.sh" + + +# Check if command is provided as an argument +if [ $# -eq 0 ]; then + echo "Usage: $0 [...]" + exit 1 +fi + +# Give execute permission to specific .sh scripts in the specified directory +chmod +x "$(dirname "$0")"/*.sh + +# Extract the command +command="$1" +shift # Remove the first argument (command) + +# Check if the command is valid +case "$command" in + "psql") + # Run the connect_to_db.sh script with the remaining arguments + "$CONNECT_SCRIPT" "$@" + ;; + "roll") + # Run the roll.sh script with the remaining arguments + "$ROLL_SCRIPT" "gearbox" "$@" + ;; + "job") + # Run the job.sh script with the remaining arguments + "$JOB_SCRIPT" "$@" + ;; + "pod") + # Run the connect_to_pod.sh script with the remaining arguments + "$POD_SCRIPT" "$@" + ;; + "restart") + # Run the connect_to_pod.sh script with the remaining arguments + "$RESTART_POD" "$@" + ;; + "logs") + # Run the connect_to_pod.sh script with the remaining arguments + "$LOGS" "$@" + ;; + *) + echo "Invalid command: $command" + exit 1 + ;; +esac \ No newline at end of file diff --git a/tools/pcdc b/tools/pcdc index 6f1c1a615..c50bc90c6 100755 --- a/tools/pcdc +++ b/tools/pcdc @@ -33,7 +33,7 @@ case "$command" in ;; "roll") # Run the roll.sh script with the remaining arguments - "$ROLL_SCRIPT" "$@" + "$ROLL_SCRIPT" "pcdc" "$@" ;; "job") # Run the job.sh script with the remaining arguments diff --git a/tools/roll.sh b/tools/roll.sh index 1e0f60b7f..02fd97295 100755 --- a/tools/roll.sh +++ b/tools/roll.sh @@ -5,11 +5,13 @@ cd "$(dirname "$0")/../helm/gen3" || exit 1 || exit 1 rm ../../values.yaml +project="$1" +shift # Check if ../../secret-values.yaml exists if [ -f ../../secret-values.yaml ]; then - yq '. *= load("../../secret-values.yaml")' ../../default-values.yaml > ../../values.yaml + yq '. *= load("../../secret-values.yaml")' ../../$project-default-values.yaml > ../../values.yaml else - cp ../../default-values.yaml ../../values.yaml + cp ../../$project-default-values.yaml ../../values.yaml fi # Directory to store CA certificate ca_dir=../../CA @@ -40,6 +42,10 @@ if [ $# -gt 0 ]; then do # Delete the deployment corresponding to the service name kubectl delete deployment ${service_name}-deployment + if [ "$service_name" = "gearbox" ]; then + kubectl delete job gearbox-g3auto-patch + fi + done fi @@ -47,4 +53,4 @@ fi helm dependency update # Run helm upgrade --install command -helm upgrade --install pcdc . -f ../../values.yaml \ No newline at end of file +helm upgrade --install $project . -f ../../values.yaml \ No newline at end of file From 82ee6881c66f283333f65cd1fe48d1d8bf26b0c2 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Thu, 11 Jul 2024 18:40:44 -0700 Subject: [PATCH 014/126] add jwt-key pairs for services that need them and fixed some bugs --- helm/amanuensis/templates/jwt-keys.yaml | 12 +-- helm/amanuensis/values.yaml | 3 + helm/common/templates/_jwt_key_pairs.tpl | 100 ++++++++++++++++++ .../gearbox-middleware/templates/_helpers.tpl | 13 +-- .../templates/deployment.yaml | 6 +- .../templates/gearbox-create-public-key.yaml | 68 ------------ .../templates/gearbox-middleware-secret.yaml | 10 -- .../templates/jwt-keys.yaml | 5 + helm/gearbox-middleware/values.yaml | 2 +- .../templates/create-gearbox-config.yaml | 2 +- helm/gearbox/values.yaml | 2 +- helm/guppy/templates/deployment.yaml | 2 + helm/guppy/values.yaml | 10 ++ .../templates/deployment.yaml | 3 + .../pcdcanalysistools/templates/jwt-keys.yaml | 12 +-- helm/wts/templates/wts-oidc.yaml | 2 +- pcdc-default-values.yaml | 96 ++++++++++++++++- tools/gearbox | 2 +- tools/job.sh | 5 +- tools/pcdc | 2 +- 20 files changed, 241 insertions(+), 116 deletions(-) create mode 100644 helm/common/templates/_jwt_key_pairs.tpl delete mode 100644 helm/gearbox-middleware/templates/gearbox-create-public-key.yaml create mode 100644 helm/gearbox-middleware/templates/jwt-keys.yaml diff --git a/helm/amanuensis/templates/jwt-keys.yaml b/helm/amanuensis/templates/jwt-keys.yaml index d902c85dc..322abbf5b 100644 --- a/helm/amanuensis/templates/jwt-keys.yaml +++ b/helm/amanuensis/templates/jwt-keys.yaml @@ -1,7 +1,5 @@ -apiVersion: v1 -kind: Secret -metadata: - name: amanuensis-jwt-keys -type: Opaque -data: - jwt_private_key.pem: {{ include "getOrCreatePrivateKey" . }} \ No newline at end of file +{{include "common.jwt-key-pair-secret" .}} +--- +{{include "common.jwt_public_key_setup_sa" .}} +--- +{{include "common.create_public_key_job" .}} \ No newline at end of file diff --git a/helm/amanuensis/values.yaml b/helm/amanuensis/values.yaml index 3a916c739..aeb569296 100644 --- a/helm/amanuensis/values.yaml +++ b/helm/amanuensis/values.yaml @@ -225,6 +225,9 @@ volumes: - name: amanuensis-jwt-keys secret: secretName: "amanuensis-jwt-keys" + items: + - key: jwt_private_key.pem + path: jwt_private_key.pem #need to add potentially - name: yaml-merge configMap: diff --git a/helm/common/templates/_jwt_key_pairs.tpl b/helm/common/templates/_jwt_key_pairs.tpl new file mode 100644 index 000000000..a6fe5ebd7 --- /dev/null +++ b/helm/common/templates/_jwt_key_pairs.tpl @@ -0,0 +1,100 @@ +{{- define "common.jwt_public_key_setup_sa" -}} + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Chart.Name }}-jwt-public-key-patch-sa + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ .Chart.Name }}-jwt-public-key-patch-role +rules: +- apiGroups: [""] + resources: ["secrets"] + verbs: ["*"] + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ .Chart.Name }}-jwt-public-key-patch-rolebinding +subjects: +- kind: ServiceAccount + name: {{ .Chart.Name }}-jwt-public-key-patch-sa + namespace: {{ .Release.Namespace }} +roleRef: + kind: Role + name: {{ .Chart.Name }}-jwt-public-key-patch-role + apiGroup: rbac.authorization.k8s.io + + +{{- end }} + +--- + +{{- define "common.create_public_key_job" -}} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ .Chart.Name }}-create-public-key + labels: + app: gen3job +spec: + template: + spec: + serviceAccountName: {{ .Chart.Name }}-jwt-public-key-patch-sa + containers: + - name: public-key-gen + image: bitnami/kubectl:latest + env: + - name: PRIVATE_KEY_PEM + valueFrom: + secretKeyRef: + name: {{ .Chart.Name }}-jwt-keys + key: jwt_private_key.pem + optional: false + - name: SERVICE + value: {{ .Chart.Name }} + command: ["/bin/sh", "-c"] + args: + - | + set -e + + echo "SERVICE=$SERVICE" + + # Read the private key from the secret + private_key=$(kubectl get secret $SERVICE-jwt-keys -o jsonpath='{.data.jwt_private_key\.pem}' | base64 --decode) + + # Create a temporary file for the private key + echo "${private_key}" > /tmp/private_key.pem + + # Generate the public key from the private key + openssl rsa -in /tmp/private_key.pem -pubout -out /tmp/public_key.pem + + # Base64 encode the public key + public_key=$(base64 -w 0 /tmp/public_key.pem) + + # Update the secret with the public key + kubectl patch secret $SERVICE-jwt-keys -p="{\"data\": {\"jwt_public_key.pem\": \"${public_key}\"}}" + + restartPolicy: OnFailure +{{- end }} +--- + +{{/* +Create k8s secrets for creating jwt key pairs +*/}} +# JWT key Secrets +{{- define "common.jwt-key-pair-secret" -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ $.Chart.Name }}-jwt-keys +type: Opaque +data: + jwt_private_key.pem: {{ genPrivateKey "rsa" | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/helm/gearbox-middleware/templates/_helpers.tpl b/helm/gearbox-middleware/templates/_helpers.tpl index b506cb67c..7704d6b61 100644 --- a/helm/gearbox-middleware/templates/_helpers.tpl +++ b/helm/gearbox-middleware/templates/_helpers.tpl @@ -65,15 +65,4 @@ Create the name of the service account to use {{- else }} {{- default "default" .Values.serviceAccount.name }} {{- end }} -{{- end }} - - -{{- define "getOrCreateRSAPrivateKey" -}} -{{- $secretName := "gearbox-middleware-jwt-keys-g3auto" }} -{{- $existingSecret := (lookup "v1" "Secret" .Release.Namespace $secretName) }} -{{- if $existingSecret }} -{{- index $existingSecret.data "jwt_private_key.pem" }} -{{- else }} -{{- genPrivateKey "rsa" | b64enc }} -{{- end }} -{{- end -}} \ No newline at end of file +{{- end }} \ No newline at end of file diff --git a/helm/gearbox-middleware/templates/deployment.yaml b/helm/gearbox-middleware/templates/deployment.yaml index 9e7b27172..eca422436 100644 --- a/helm/gearbox-middleware/templates/deployment.yaml +++ b/helm/gearbox-middleware/templates/deployment.yaml @@ -67,8 +67,10 @@ spec: {{- toYaml .Values.env | nindent 12 }} initContainers: - name: wait-for-gearbox - image: busybox:1.31.0 - command: ["/bin/sh", "-c", "until [ $(wget -q --spider http://gearbox-service/_status && echo '200' || echo '500') -eq 200 ]; do echo 'Waiting for gearbox to be ready...'; sleep 2; done"] {{- with .Values.nodeSelector }} + image: curlimages/curl:latest + command: ["/bin/sh","-c"] + args: ["while [ $(curl -sw '%{http_code}' http://gearbox-service/_status -o /dev/null) -ne 200 ]; do sleep 5; echo 'Waiting for gearbox...'; done"] + {{- with .Values.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} {{- end }} diff --git a/helm/gearbox-middleware/templates/gearbox-create-public-key.yaml b/helm/gearbox-middleware/templates/gearbox-create-public-key.yaml deleted file mode 100644 index ef285b4fe..000000000 --- a/helm/gearbox-middleware/templates/gearbox-create-public-key.yaml +++ /dev/null @@ -1,68 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ .Chart.Name }}-jwt-patch-sa ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: {{ .Chart.Name }}-jwt-patch-role -rules: -- apiGroups: [""] - resources: ["secrets"] - verbs: ["*"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: {{ .Chart.Name }}-jwt-patch-rolebinding -subjects: -- kind: ServiceAccount - name: {{ .Chart.Name }}-jwt-patch-sa - namespace: {{ .Release.Namespace }} -roleRef: - kind: Role - name: {{ .Chart.Name }}-jwt-patch-role - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: batch/v1 -kind: Job -metadata: - name: gearbox-middleware-create-public-key - labels: - app: gen3job -spec: - template: - spec: - serviceAccountName: {{ .Chart.Name }}-jwt-patch-sa - containers: - - name: public-keygen - image: quay.io/cdis/awshelper:master - env: - - name: PRIVATE_KEY_PEM - valueFrom: - secretKeyRef: - name: gearbox-middleware-jwt-keys-g3auto - key: jwt_private_key.pem - optional: false - command: ["/bin/sh", "-c"] - args: - - | - set -e - - # Read the private key from the secret - private_key=$(kubectl get secret gearbox-middleware-jwt-keys-g3auto -o jsonpath='{.data.jwt_private_key\.pem}' | base64 --decode) - - # Create a temporary file for the private key - echo "${private_key}" > /tmp/private_key.pem - - # Generate the public key from the private key - openssl rsa -in /tmp/private_key.pem -pubout -out /tmp/public_key.pem - - # Base64 encode the public key - public_key=$(base64 -w 0 /tmp/public_key.pem) - - # Update the secret with the public key - kubectl patch secret gearbox-middleware-jwt-keys-g3auto -p="{\"data\": {\"jwt_public_key.pem\": \"${public_key}\"}}" - - restartPolicy: OnFailure \ No newline at end of file diff --git a/helm/gearbox-middleware/templates/gearbox-middleware-secret.yaml b/helm/gearbox-middleware/templates/gearbox-middleware-secret.yaml index 3442aea62..f986bd807 100644 --- a/helm/gearbox-middleware/templates/gearbox-middleware-secret.yaml +++ b/helm/gearbox-middleware/templates/gearbox-middleware-secret.yaml @@ -1,13 +1,3 @@ -apiVersion: v1 -kind: Secret -metadata: - name: gearbox-middleware-jwt-keys-g3auto -type: Opaque -data: - jwt_private_key.pem: {{ include "getOrCreateRSAPrivateKey" . }} - ---- - apiVersion: v1 kind: Secret metadata: diff --git a/helm/gearbox-middleware/templates/jwt-keys.yaml b/helm/gearbox-middleware/templates/jwt-keys.yaml new file mode 100644 index 000000000..322abbf5b --- /dev/null +++ b/helm/gearbox-middleware/templates/jwt-keys.yaml @@ -0,0 +1,5 @@ +{{include "common.jwt-key-pair-secret" .}} +--- +{{include "common.jwt_public_key_setup_sa" .}} +--- +{{include "common.create_public_key_job" .}} \ No newline at end of file diff --git a/helm/gearbox-middleware/values.yaml b/helm/gearbox-middleware/values.yaml index fe428edb9..134ff94bc 100644 --- a/helm/gearbox-middleware/values.yaml +++ b/helm/gearbox-middleware/values.yaml @@ -131,7 +131,7 @@ env: volumes: - name: gearbox-middleware-jwt-keys secret: - secretName: "gearbox-middleware-jwt-keys-g3auto" + secretName: "gearbox-middleware-jwt-keys" items: - key: jwt_private_key.pem path: jwt_private_key.pem diff --git a/helm/gearbox/templates/create-gearbox-config.yaml b/helm/gearbox/templates/create-gearbox-config.yaml index 63d5892e4..19f3a9864 100644 --- a/helm/gearbox/templates/create-gearbox-config.yaml +++ b/helm/gearbox/templates/create-gearbox-config.yaml @@ -40,7 +40,7 @@ spec: serviceAccountName: {{ .Chart.Name }}-secret-patch-sa containers: - name: gearbox-g3auto-patch - image: quay.io/cdis/awshelper:master + image: bitnami/kubectl:latest command: ["/bin/sh", "-c"] args: - | diff --git a/helm/gearbox/values.yaml b/helm/gearbox/values.yaml index e7e5c1a03..d993b9dd5 100644 --- a/helm/gearbox/values.yaml +++ b/helm/gearbox/values.yaml @@ -181,7 +181,7 @@ env: volumes: - name: gearbox-middleware-jwt-keys secret: - secretName: "gearbox-middleware-jwt-keys-g3auto" + secretName: "gearbox-middleware-jwt-keys" items: - key: jwt_public_key.pem path: jwt_public_key.pem diff --git a/helm/guppy/templates/deployment.yaml b/helm/guppy/templates/deployment.yaml index 552f90636..f3208ee82 100644 --- a/helm/guppy/templates/deployment.yaml +++ b/helm/guppy/templates/deployment.yaml @@ -74,6 +74,8 @@ spec: value: {{ .Values.global.tierAccessLevel | quote }} - name: TIER_ACCESS_LIMIT value: {{ .Values.global.tierAccessLimit | quote }} + - name: PUBLIC_KEY_PATH + value: /guppy/jwt_public_key.pem {{- with .Values.volumeMounts }} volumeMounts: {{- toYaml . | nindent 10 }} diff --git a/helm/guppy/values.yaml b/helm/guppy/values.yaml index 054e4734c..a6a5a5996 100644 --- a/helm/guppy/values.yaml +++ b/helm/guppy/values.yaml @@ -128,6 +128,12 @@ volumes: items: - key: guppy_config.json path: guppy_config.json + - name: pcdcanalysistools-jwt-keys + secret: + secretName: "pcdcanalysistools-jwt-keys" + items: + - key: jwt_public_key.pem + path: jwt_public_key.pem # -- (bool) Automount the default service account token automountServiceAccountToken: false @@ -154,6 +160,10 @@ volumeMounts: readOnly: true mountPath: /guppy/guppy_config.json subPath: guppy_config.json + - name: "pcdcanalysistools-jwt-keys" + readOnly: true + mountPath: "/guppy/jwt_public_key.pem" + subPath: "jwt_public_key.pem" # -- (map) Resource requests and limits for the containers in the pod resources: diff --git a/helm/pcdcanalysistools/templates/deployment.yaml b/helm/pcdcanalysistools/templates/deployment.yaml index eef69e6a9..e18375a83 100644 --- a/helm/pcdcanalysistools/templates/deployment.yaml +++ b/helm/pcdcanalysistools/templates/deployment.yaml @@ -33,6 +33,9 @@ spec: - name: pcdcanalysistools-jwt-keys secret: secretName: "pcdcanalysistools-jwt-keys" + items: + - key: jwt_private_key.pem + path: jwt_private_key.pem # - name: config-helper # secret: # secretName: "pcdcanalysistools-secret" diff --git a/helm/pcdcanalysistools/templates/jwt-keys.yaml b/helm/pcdcanalysistools/templates/jwt-keys.yaml index 779761090..322abbf5b 100644 --- a/helm/pcdcanalysistools/templates/jwt-keys.yaml +++ b/helm/pcdcanalysistools/templates/jwt-keys.yaml @@ -1,7 +1,5 @@ -apiVersion: v1 -kind: Secret -metadata: - name: pcdcanalysistools-jwt-keys -type: Opaque -data: - jwt_private_key.pem: {{ include "getOrCreatePrivateKey" . }} \ No newline at end of file +{{include "common.jwt-key-pair-secret" .}} +--- +{{include "common.jwt_public_key_setup_sa" .}} +--- +{{include "common.create_public_key_job" .}} \ No newline at end of file diff --git a/helm/wts/templates/wts-oidc.yaml b/helm/wts/templates/wts-oidc.yaml index 769a3c47d..e04d22008 100644 --- a/helm/wts/templates/wts-oidc.yaml +++ b/helm/wts/templates/wts-oidc.yaml @@ -24,7 +24,7 @@ spec: containers: - name: fence-client # TODO: Make this configurable - image: "quay.io/cdis/fence:master" + image: "quay.io/pcdc/fence:helm-test" imagePullPolicy: {{ .Values.image.pullPolicy }} # TODO: ADD RESOURCES # resources: diff --git a/pcdc-default-values.yaml b/pcdc-default-values.yaml index 5897815b3..43e345bc1 100644 --- a/pcdc-default-values.yaml +++ b/pcdc-default-values.yaml @@ -64,7 +64,99 @@ amanuensis: repository: quay.io/pcdc/amanuensis tag: "2.16.1" -fence: +fence: + volumes: + - name: old-config-volume + secret: + secretName: "fence-secret" + - name: json-secret-volume + secret: + secretName: "fence-json-secret" + optional: true + - name: creds-volume + secret: + secretName: "fence-creds" + - name: config-helper + configMap: + name: config-helper + optional: true + - name: logo-volume + configMap: + name: "logo-config" + - name: config-volume + secret: + secretName: "fence-config" + - name: fence-google-app-creds-secret-volume + secret: + secretName: "fence-google-app-creds-secret" + - name: fence-google-storage-creds-secret-volume + secret: + secretName: "fence-google-storage-creds-secret" + - name: fence-jwt-keys + secret: + secretName: "fence-jwt-keys" + - name: privacy-policy + configMap: + name: "privacy-policy" + - name: yaml-merge + configMap: + name: "fence-yaml-merge" + optional: true + - name: amanuensis-jwt-keys + secret: + secretName: "amanuensis-jwt-keys" + items: + - key: jwt_public_key.pem + path: jwt_public_key.pem + volumeMounts: + - name: "old-config-volume" + readOnly: true + mountPath: "/var/www/fence/local_settings.py" + subPath: local_settings.py + - name: "json-secret-volume" + readOnly: true + mountPath: "/var/www/fence/fence_credentials.json" + subPath: fence_credentials.json + - name: "creds-volume" + readOnly: true + mountPath: "/var/www/fence/creds.json" + subPath: creds.json + - name: "config-helper" + readOnly: true + mountPath: "/var/www/fence/config_helper.py" + subPath: config_helper.py + - name: "logo-volume" + readOnly: true + mountPath: "/fence/fence/static/img/logo.svg" + subPath: "logo.svg" + - name: "privacy-policy" + readOnly: true + mountPath: "/fence/fence/static/privacy_policy.md" + subPath: "privacy_policy.md" + - name: "config-volume" + readOnly: true + mountPath: "/var/www/fence/fence-config.yaml" + subPath: fence-config.yaml + - name: "yaml-merge" + readOnly: true + mountPath: "/var/www/fence/yaml_merge.py" + subPath: yaml_merge.py + - name: "fence-google-app-creds-secret-volume" + readOnly: true + mountPath: "/var/www/fence/fence_google_app_creds_secret.json" + subPath: fence_google_app_creds_secret.json + - name: "fence-google-storage-creds-secret-volume" + readOnly: true + mountPath: "/var/www/fence/fence_google_storage_creds_secret.json" + subPath: fence_google_storage_creds_secret.json + - name: "fence-jwt-keys" + readOnly: true + mountPath: "/fence/keys/key/jwt_private_key.pem" + subPath: "jwt_private_key.pem" + - name: "amanuensis-jwt-keys" + readOnly: true + mountPath: "/amanuensis/jwt_public_key.pem" + subPath: "jwt_public_key.pem" FENCE_CONFIG: DEBUG: true MOCK_STORAGE: true @@ -1036,7 +1128,7 @@ fence: privilege: [create, read, update, delete, upload, read-storage] - auth_id: jnkns privilege: [create, read, update, delete, upload, read-storage] - graglia01@gmail.com: + test@example.com: admin: true policies: - data_upload diff --git a/tools/gearbox b/tools/gearbox index fb7a48242..7a548c5a7 100755 --- a/tools/gearbox +++ b/tools/gearbox @@ -35,7 +35,7 @@ case "$command" in ;; "job") # Run the job.sh script with the remaining arguments - "$JOB_SCRIPT" "$@" + "$JOB_SCRIPT" "gearbox" "$@" ;; "pod") # Run the connect_to_pod.sh script with the remaining arguments diff --git a/tools/job.sh b/tools/job.sh index 36feaba3c..7207e1ac8 100755 --- a/tools/job.sh +++ b/tools/job.sh @@ -7,7 +7,8 @@ if [ $# -eq 0 ]; then fi # Extract the job name -job_name="$1" +project="$1" +job_name="$2" # Delete the specified Job kubectl delete job "$job_name" @@ -19,4 +20,4 @@ if [ $? -ne 0 ]; then fi # Run roll.sh script -roll.sh \ No newline at end of file +$project roll \ No newline at end of file diff --git a/tools/pcdc b/tools/pcdc index c50bc90c6..02638c090 100755 --- a/tools/pcdc +++ b/tools/pcdc @@ -37,7 +37,7 @@ case "$command" in ;; "job") # Run the job.sh script with the remaining arguments - "$JOB_SCRIPT" "$@" + "$JOB_SCRIPT" "pcdc" "$@" ;; "pod") # Run the connect_to_pod.sh script with the remaining arguments From 17cc8dc6b4f2398638190e969994b83b8b9efe21 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Mon, 26 Aug 2024 14:49:21 -0700 Subject: [PATCH 015/126] update common chart --- helm/amanuensis/Chart.yaml | 2 +- helm/gearbox-middleware/Chart.yaml | 2 +- helm/gearbox/Chart.yaml | 2 +- helm/pcdcanalysistools/Chart.yaml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/helm/amanuensis/Chart.yaml b/helm/amanuensis/Chart.yaml index 1ee0699cc..fbe3a4124 100644 --- a/helm/amanuensis/Chart.yaml +++ b/helm/amanuensis/Chart.yaml @@ -25,7 +25,7 @@ appVersion: "1.16.0" dependencies: - name: common - version: 0.1.7 + version: 0.1.11 repository: file://../common - name: postgresql version: 11.9.13 diff --git a/helm/gearbox-middleware/Chart.yaml b/helm/gearbox-middleware/Chart.yaml index 3264d7d74..5870e9fd4 100644 --- a/helm/gearbox-middleware/Chart.yaml +++ b/helm/gearbox-middleware/Chart.yaml @@ -25,7 +25,7 @@ appVersion: "1.16.0" dependencies: - name: common - version: 0.1.7 + version: 0.1.11 repository: file://../common - name: gearbox version: 0.1.0 diff --git a/helm/gearbox/Chart.yaml b/helm/gearbox/Chart.yaml index 6038c80b3..379415e32 100644 --- a/helm/gearbox/Chart.yaml +++ b/helm/gearbox/Chart.yaml @@ -25,7 +25,7 @@ appVersion: "1.16.0" dependencies: - name: common - version: 0.1.7 + version: 0.1.11 repository: file://../common - name: postgresql version: 11.9.13 diff --git a/helm/pcdcanalysistools/Chart.yaml b/helm/pcdcanalysistools/Chart.yaml index be33cae3e..0eb76c931 100644 --- a/helm/pcdcanalysistools/Chart.yaml +++ b/helm/pcdcanalysistools/Chart.yaml @@ -26,5 +26,5 @@ appVersion: "1.16.0" dependencies: - name: common - version: 0.1.7 + version: 0.1.11 repository: file://../common \ No newline at end of file From 21a0e5bf221dc08ca59ea858592d52286edd1c21 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Mon, 26 Aug 2024 14:58:52 -0700 Subject: [PATCH 016/126] update amanuensis chart --- helm/amanuensis/Chart.yaml | 2 +- helm/gen3/Chart.yaml | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/helm/amanuensis/Chart.yaml b/helm/amanuensis/Chart.yaml index fbe3a4124..68fc04d6e 100644 --- a/helm/amanuensis/Chart.yaml +++ b/helm/amanuensis/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 +version: 0.1.1 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/gen3/Chart.yaml b/helm/gen3/Chart.yaml index 6d49fb7a7..7b0013973 100644 --- a/helm/gen3/Chart.yaml +++ b/helm/gen3/Chart.yaml @@ -100,7 +100,7 @@ dependencies: repository: "file://../pcdcanalysistools" condition: pcdcanalysistools.enabled - name: amanuensis - version: "0.1.0" + version: "0.1.1" repository: "file://../amanuensis" condition: amanuensis.enabled - name: gearbox @@ -137,7 +137,7 @@ dependencies: # Application charts are a collection of templates that can be packaged into versioned archives # to be deployed. # -# Library charts provide useful utilities or functions for the chart developer. They're included as +# Library charts pxrovide useful utilities or functions for the chart developer. They're included as # a dependency of application charts to inject those utilities and functions into the rendering # pipeline. Library charts do not define any templates and therefore cannot be deployed. type: application @@ -145,7 +145,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.36 +version: 0.1.37 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to From adac68728ff9d6de7899b00bad7b4412c89f8e43 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Mon, 26 Aug 2024 15:03:45 -0700 Subject: [PATCH 017/126] more chart updates --- helm/ambassador/Chart.yaml | 4 ++-- helm/gen3/Chart.yaml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/helm/ambassador/Chart.yaml b/helm/ambassador/Chart.yaml index b525c4d06..8ee2878ec 100644 --- a/helm/ambassador/Chart.yaml +++ b/helm/ambassador/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.11 +version: 0.1.12 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to @@ -25,5 +25,5 @@ appVersion: "1.4.2" dependencies: - name: common - version: 0.1.10 + version: 0.1.11 repository: file://../common diff --git a/helm/gen3/Chart.yaml b/helm/gen3/Chart.yaml index 7b0013973..b81716bb1 100644 --- a/helm/gen3/Chart.yaml +++ b/helm/gen3/Chart.yaml @@ -5,7 +5,7 @@ description: Helm chart to deploy Gen3 Data Commons # Dependencies dependencies: - name: ambassador - version: 0.1.11 + version: 0.1.12 repository: "file://../ambassador" condition: ambassador.enabled - name: arborist From 9dc4de40ed0e7983be3a55679aaba78ed4a31a19 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Mon, 26 Aug 2024 15:17:43 -0700 Subject: [PATCH 018/126] Revert "more chart updates" This reverts commit adac68728ff9d6de7899b00bad7b4412c89f8e43. --- helm/ambassador/Chart.yaml | 4 ++-- helm/gen3/Chart.yaml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/helm/ambassador/Chart.yaml b/helm/ambassador/Chart.yaml index 8ee2878ec..b525c4d06 100644 --- a/helm/ambassador/Chart.yaml +++ b/helm/ambassador/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.12 +version: 0.1.11 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to @@ -25,5 +25,5 @@ appVersion: "1.4.2" dependencies: - name: common - version: 0.1.11 + version: 0.1.10 repository: file://../common diff --git a/helm/gen3/Chart.yaml b/helm/gen3/Chart.yaml index b81716bb1..7b0013973 100644 --- a/helm/gen3/Chart.yaml +++ b/helm/gen3/Chart.yaml @@ -5,7 +5,7 @@ description: Helm chart to deploy Gen3 Data Commons # Dependencies dependencies: - name: ambassador - version: 0.1.12 + version: 0.1.11 repository: "file://../ambassador" condition: ambassador.enabled - name: arborist From 98dfb6b80bdf0b5c6ae6c4bb89285367562863b5 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 <80228075+paulmurdoch19@users.noreply.github.com> Date: Tue, 27 Aug 2024 14:01:42 -0700 Subject: [PATCH 019/126] Update release.yaml --- .github/workflows/release.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index f4391ce5e..25eeb0058 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -3,7 +3,7 @@ name: Release Charts on: push: branches: - - master + - GEAR-427 jobs: release: From 09976848c56925f29a57533d3a5bb7fea3d45362 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Tue, 27 Aug 2024 14:11:08 -0700 Subject: [PATCH 020/126] remove wildcard --- helm/common/templates/_db_setup_job.tpl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helm/common/templates/_db_setup_job.tpl b/helm/common/templates/_db_setup_job.tpl index 9ea67dbea..9e19d981c 100644 --- a/helm/common/templates/_db_setup_job.tpl +++ b/helm/common/templates/_db_setup_job.tpl @@ -13,7 +13,7 @@ metadata: rules: - apiGroups: [""] resources: ["secrets"] - verbs: ["*"] + verbs: ["get"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding From cfb3261004c08c1c66a4ba3f000bd7cdd2e92cfc Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Tue, 27 Aug 2024 16:02:21 -0700 Subject: [PATCH 021/126] force services to be added to release --- helm/amanuensis/Chart.yaml | 2 +- helm/ambassador/Chart.yaml | 2 +- helm/arborist/Chart.yaml | 2 +- helm/argo-wrapper/Chart.yaml | 2 +- helm/audit/Chart.yaml | 2 +- helm/aws-es-proxy/Chart.yaml | 2 +- helm/dicom-server/Chart.yaml | 2 +- helm/dicom-viewer/Chart.yaml | 2 +- helm/etl/Chart.yaml | 2 +- helm/fence/Chart.yaml | 2 +- helm/frontend-framework/Chart.yaml | 2 +- helm/gearbox-middleware/Chart.yaml | 2 +- helm/gearbox/Chart.yaml | 2 +- helm/gen3/Chart.yaml | 2 +- helm/guppy/Chart.yaml | 2 +- helm/hatchery/Chart.yaml | 2 +- helm/indexd/Chart.yaml | 2 +- helm/manifestservice/Chart.yaml | 2 +- helm/metadata/Chart.yaml | 2 +- helm/neuvector/Chart.yaml | 2 +- helm/pcdcanalysistools/Chart.yaml | 2 +- helm/peregrine/Chart.yaml | 2 +- helm/pidgin/Chart.yaml | 2 +- helm/portal/Chart.yaml | 2 +- helm/requestor/Chart.yaml | 2 +- helm/revproxy/Chart.yaml | 2 +- helm/sheepdog/Chart.yaml | 2 +- helm/sower/Chart.yaml | 2 +- helm/ssjdispatcher/Chart.yaml | 2 +- helm/wts/Chart.yaml | 2 +- 30 files changed, 30 insertions(+), 30 deletions(-) diff --git a/helm/amanuensis/Chart.yaml b/helm/amanuensis/Chart.yaml index 68fc04d6e..bbe800c52 100644 --- a/helm/amanuensis/Chart.yaml +++ b/helm/amanuensis/Chart.yaml @@ -10,7 +10,7 @@ description: A Helm chart for Kubernetes # Library charts provide useful utilities or functions for the chart developer. They're included as # a dependency of application charts to inject those utilities and functions into the rendering # pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application +type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. diff --git a/helm/ambassador/Chart.yaml b/helm/ambassador/Chart.yaml index b525c4d06..3c6459993 100644 --- a/helm/ambassador/Chart.yaml +++ b/helm/ambassador/Chart.yaml @@ -10,7 +10,7 @@ description: A Helm chart for deploying ambassador for gen3 # Library charts provide useful utilities or functions for the chart developer. They're included as # a dependency of application charts to inject those utilities and functions into the rendering # pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application +type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. diff --git a/helm/arborist/Chart.yaml b/helm/arborist/Chart.yaml index 396968729..3bac0282d 100644 --- a/helm/arborist/Chart.yaml +++ b/helm/arborist/Chart.yaml @@ -10,7 +10,7 @@ description: A Helm chart for gen3 arborist # Library charts provide useful utilities or functions for the chart developer. They're included as # a dependency of application charts to inject those utilities and functions into the rendering # pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application +type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. diff --git a/helm/argo-wrapper/Chart.yaml b/helm/argo-wrapper/Chart.yaml index 57d201247..44fab2c42 100644 --- a/helm/argo-wrapper/Chart.yaml +++ b/helm/argo-wrapper/Chart.yaml @@ -10,7 +10,7 @@ description: A Helm chart for gen3 Argo Wrapper Service # Library charts provide useful utilities or functions for the chart developer. They're included as # a dependency of application charts to inject those utilities and functions into the rendering # pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application +type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. diff --git a/helm/audit/Chart.yaml b/helm/audit/Chart.yaml index 2295c6023..dcdea70af 100644 --- a/helm/audit/Chart.yaml +++ b/helm/audit/Chart.yaml @@ -10,7 +10,7 @@ description: A Helm chart for Kubernetes # Library charts provide useful utilities or functions for the chart developer. They're included as # a dependency of application charts to inject those utilities and functions into the rendering # pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application +type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. diff --git a/helm/aws-es-proxy/Chart.yaml b/helm/aws-es-proxy/Chart.yaml index 7fea05b38..e05fb89c8 100644 --- a/helm/aws-es-proxy/Chart.yaml +++ b/helm/aws-es-proxy/Chart.yaml @@ -10,7 +10,7 @@ description: A Helm chart for AWS ES Proxy Service for gen3 # Library charts provide useful utilities or functions for the chart developer. They're included as # a dependency of application charts to inject those utilities and functions into the rendering # pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application +type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. diff --git a/helm/dicom-server/Chart.yaml b/helm/dicom-server/Chart.yaml index 4741141b0..b239e4bbc 100644 --- a/helm/dicom-server/Chart.yaml +++ b/helm/dicom-server/Chart.yaml @@ -10,7 +10,7 @@ description: A Helm chart for gen3 Dicom Server # Library charts provide useful utilities or functions for the chart developer. They're included as # a dependency of application charts to inject those utilities and functions into the rendering # pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application +type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. diff --git a/helm/dicom-viewer/Chart.yaml b/helm/dicom-viewer/Chart.yaml index 4835cfea6..b90186836 100644 --- a/helm/dicom-viewer/Chart.yaml +++ b/helm/dicom-viewer/Chart.yaml @@ -10,7 +10,7 @@ description: A Helm chart for gen3 Dicom Viewer # Library charts provide useful utilities or functions for the chart developer. They're included as # a dependency of application charts to inject those utilities and functions into the rendering # pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application +type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. diff --git a/helm/etl/Chart.yaml b/helm/etl/Chart.yaml index c5b08a7b0..f7fc0be5e 100644 --- a/helm/etl/Chart.yaml +++ b/helm/etl/Chart.yaml @@ -10,7 +10,7 @@ description: A Helm chart for gen3 etl # Library charts provide useful utilities or functions for the chart developer. They're included as # a dependency of application charts to inject those utilities and functions into the rendering # pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application +type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. diff --git a/helm/fence/Chart.yaml b/helm/fence/Chart.yaml index a8e6edc3e..5c0ce03e0 100644 --- a/helm/fence/Chart.yaml +++ b/helm/fence/Chart.yaml @@ -23,7 +23,7 @@ version: 0.1.18 appVersion: "master" dependencies: -- name: common +- name: common version: 0.1.10 repository: file://../common - name: postgresql diff --git a/helm/frontend-framework/Chart.yaml b/helm/frontend-framework/Chart.yaml index a86f9eb13..8cf5997e7 100644 --- a/helm/frontend-framework/Chart.yaml +++ b/helm/frontend-framework/Chart.yaml @@ -10,7 +10,7 @@ description: A Helm chart for the gen3 frontend framework # Library charts provide useful utilities or functions for the chart developer. They're included as # a dependency of application charts to inject those utilities and functions into the rendering # pipeline. Library charts do not define any templates and, therefore, cannot be deployed. -type: application +type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. diff --git a/helm/gearbox-middleware/Chart.yaml b/helm/gearbox-middleware/Chart.yaml index 5870e9fd4..1e0d72388 100644 --- a/helm/gearbox-middleware/Chart.yaml +++ b/helm/gearbox-middleware/Chart.yaml @@ -10,7 +10,7 @@ description: A Helm chart for Kubernetes # Library charts provide useful utilities or functions for the chart developer. They're included as # a dependency of application charts to inject those utilities and functions into the rendering # pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application +type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. diff --git a/helm/gearbox/Chart.yaml b/helm/gearbox/Chart.yaml index 379415e32..04441d00a 100644 --- a/helm/gearbox/Chart.yaml +++ b/helm/gearbox/Chart.yaml @@ -10,7 +10,7 @@ description: A Helm chart for Kubernetes # Library charts provide useful utilities or functions for the chart developer. They're included as # a dependency of application charts to inject those utilities and functions into the rendering # pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application +type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. diff --git a/helm/gen3/Chart.yaml b/helm/gen3/Chart.yaml index 7b0013973..e6da5fdb3 100644 --- a/helm/gen3/Chart.yaml +++ b/helm/gen3/Chart.yaml @@ -118,7 +118,7 @@ dependencies: repository: "https://helm.elastic.co" condition: elasticsearch.enabled - name: postgresql - version: 11.9.13 + version: 11.9.13 repository: "https://charts.bitnami.com/bitnami" condition: global.dev diff --git a/helm/guppy/Chart.yaml b/helm/guppy/Chart.yaml index 12fd56edd..8a6074986 100644 --- a/helm/guppy/Chart.yaml +++ b/helm/guppy/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.11 +version: 0.1.11 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/hatchery/Chart.yaml b/helm/hatchery/Chart.yaml index 8e4c05c16..acd4e4233 100644 --- a/helm/hatchery/Chart.yaml +++ b/helm/hatchery/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.9 +version: 0.1.9 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/indexd/Chart.yaml b/helm/indexd/Chart.yaml index 19e781264..bd8954327 100644 --- a/helm/indexd/Chart.yaml +++ b/helm/indexd/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.14 +version: 0.1.14 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/manifestservice/Chart.yaml b/helm/manifestservice/Chart.yaml index 382e01657..59584ffa7 100644 --- a/helm/manifestservice/Chart.yaml +++ b/helm/manifestservice/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.14 +version: 0.1.14 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/metadata/Chart.yaml b/helm/metadata/Chart.yaml index d38d06e54..2591c8c83 100644 --- a/helm/metadata/Chart.yaml +++ b/helm/metadata/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.12 +version: 0.1.12 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/neuvector/Chart.yaml b/helm/neuvector/Chart.yaml index 46be470be..a47bd7fc6 100644 --- a/helm/neuvector/Chart.yaml +++ b/helm/neuvector/Chart.yaml @@ -14,7 +14,7 @@ description: NeuVector Kubernetes Security Policy templates to protect Gen3 # Library charts provide useful utilities or functions for the chart developer. They're included as # a dependency of application charts to inject those utilities and functions into the rendering # pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application +type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. diff --git a/helm/pcdcanalysistools/Chart.yaml b/helm/pcdcanalysistools/Chart.yaml index 0eb76c931..2ffede7e8 100644 --- a/helm/pcdcanalysistools/Chart.yaml +++ b/helm/pcdcanalysistools/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 +version: 0.1.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/peregrine/Chart.yaml b/helm/peregrine/Chart.yaml index 10f79b5d2..575bea446 100644 --- a/helm/peregrine/Chart.yaml +++ b/helm/peregrine/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.13 +version: 0.1.13 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/pidgin/Chart.yaml b/helm/pidgin/Chart.yaml index be9c20863..fe85f7b51 100644 --- a/helm/pidgin/Chart.yaml +++ b/helm/pidgin/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.10 +version: 0.1.10 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/portal/Chart.yaml b/helm/portal/Chart.yaml index 6141c5e5f..4fe8f4195 100644 --- a/helm/portal/Chart.yaml +++ b/helm/portal/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.15 +version: 0.1.15 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/requestor/Chart.yaml b/helm/requestor/Chart.yaml index e0c06a887..a7a40762b 100644 --- a/helm/requestor/Chart.yaml +++ b/helm/requestor/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.11 +version: 0.1.11 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/revproxy/Chart.yaml b/helm/revproxy/Chart.yaml index 37c503869..455ee0cef 100644 --- a/helm/revproxy/Chart.yaml +++ b/helm/revproxy/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.14 +version: 0.1.14 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/sheepdog/Chart.yaml b/helm/sheepdog/Chart.yaml index ea4bcc323..18a21f241 100644 --- a/helm/sheepdog/Chart.yaml +++ b/helm/sheepdog/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.14 +version: 0.1.14 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/sower/Chart.yaml b/helm/sower/Chart.yaml index a48e3910a..34824ceb4 100644 --- a/helm/sower/Chart.yaml +++ b/helm/sower/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.11 +version: 0.1.11 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/ssjdispatcher/Chart.yaml b/helm/ssjdispatcher/Chart.yaml index ac6682874..5dac10465 100644 --- a/helm/ssjdispatcher/Chart.yaml +++ b/helm/ssjdispatcher/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.9 +version: 0.1.9 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/wts/Chart.yaml b/helm/wts/Chart.yaml index 6d9533e47..ecfc98815 100644 --- a/helm/wts/Chart.yaml +++ b/helm/wts/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.13 +version: 0.1.13 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to From 1ab55e118bfb90a79457a559044285c498d68cd2 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Tue, 27 Aug 2024 16:19:27 -0700 Subject: [PATCH 022/126] Revert "force services to be added to release" This reverts commit cfb3261004c08c1c66a4ba3f000bd7cdd2e92cfc. --- helm/amanuensis/Chart.yaml | 2 +- helm/ambassador/Chart.yaml | 2 +- helm/arborist/Chart.yaml | 2 +- helm/argo-wrapper/Chart.yaml | 2 +- helm/audit/Chart.yaml | 2 +- helm/aws-es-proxy/Chart.yaml | 2 +- helm/dicom-server/Chart.yaml | 2 +- helm/dicom-viewer/Chart.yaml | 2 +- helm/etl/Chart.yaml | 2 +- helm/fence/Chart.yaml | 2 +- helm/frontend-framework/Chart.yaml | 2 +- helm/gearbox-middleware/Chart.yaml | 2 +- helm/gearbox/Chart.yaml | 2 +- helm/gen3/Chart.yaml | 2 +- helm/guppy/Chart.yaml | 2 +- helm/hatchery/Chart.yaml | 2 +- helm/indexd/Chart.yaml | 2 +- helm/manifestservice/Chart.yaml | 2 +- helm/metadata/Chart.yaml | 2 +- helm/neuvector/Chart.yaml | 2 +- helm/pcdcanalysistools/Chart.yaml | 2 +- helm/peregrine/Chart.yaml | 2 +- helm/pidgin/Chart.yaml | 2 +- helm/portal/Chart.yaml | 2 +- helm/requestor/Chart.yaml | 2 +- helm/revproxy/Chart.yaml | 2 +- helm/sheepdog/Chart.yaml | 2 +- helm/sower/Chart.yaml | 2 +- helm/ssjdispatcher/Chart.yaml | 2 +- helm/wts/Chart.yaml | 2 +- 30 files changed, 30 insertions(+), 30 deletions(-) diff --git a/helm/amanuensis/Chart.yaml b/helm/amanuensis/Chart.yaml index bbe800c52..68fc04d6e 100644 --- a/helm/amanuensis/Chart.yaml +++ b/helm/amanuensis/Chart.yaml @@ -10,7 +10,7 @@ description: A Helm chart for Kubernetes # Library charts provide useful utilities or functions for the chart developer. They're included as # a dependency of application charts to inject those utilities and functions into the rendering # pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application +type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. diff --git a/helm/ambassador/Chart.yaml b/helm/ambassador/Chart.yaml index 3c6459993..b525c4d06 100644 --- a/helm/ambassador/Chart.yaml +++ b/helm/ambassador/Chart.yaml @@ -10,7 +10,7 @@ description: A Helm chart for deploying ambassador for gen3 # Library charts provide useful utilities or functions for the chart developer. They're included as # a dependency of application charts to inject those utilities and functions into the rendering # pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application +type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. diff --git a/helm/arborist/Chart.yaml b/helm/arborist/Chart.yaml index 3bac0282d..396968729 100644 --- a/helm/arborist/Chart.yaml +++ b/helm/arborist/Chart.yaml @@ -10,7 +10,7 @@ description: A Helm chart for gen3 arborist # Library charts provide useful utilities or functions for the chart developer. They're included as # a dependency of application charts to inject those utilities and functions into the rendering # pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application +type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. diff --git a/helm/argo-wrapper/Chart.yaml b/helm/argo-wrapper/Chart.yaml index 44fab2c42..57d201247 100644 --- a/helm/argo-wrapper/Chart.yaml +++ b/helm/argo-wrapper/Chart.yaml @@ -10,7 +10,7 @@ description: A Helm chart for gen3 Argo Wrapper Service # Library charts provide useful utilities or functions for the chart developer. They're included as # a dependency of application charts to inject those utilities and functions into the rendering # pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application +type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. diff --git a/helm/audit/Chart.yaml b/helm/audit/Chart.yaml index dcdea70af..2295c6023 100644 --- a/helm/audit/Chart.yaml +++ b/helm/audit/Chart.yaml @@ -10,7 +10,7 @@ description: A Helm chart for Kubernetes # Library charts provide useful utilities or functions for the chart developer. They're included as # a dependency of application charts to inject those utilities and functions into the rendering # pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application +type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. diff --git a/helm/aws-es-proxy/Chart.yaml b/helm/aws-es-proxy/Chart.yaml index e05fb89c8..7fea05b38 100644 --- a/helm/aws-es-proxy/Chart.yaml +++ b/helm/aws-es-proxy/Chart.yaml @@ -10,7 +10,7 @@ description: A Helm chart for AWS ES Proxy Service for gen3 # Library charts provide useful utilities or functions for the chart developer. They're included as # a dependency of application charts to inject those utilities and functions into the rendering # pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application +type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. diff --git a/helm/dicom-server/Chart.yaml b/helm/dicom-server/Chart.yaml index b239e4bbc..4741141b0 100644 --- a/helm/dicom-server/Chart.yaml +++ b/helm/dicom-server/Chart.yaml @@ -10,7 +10,7 @@ description: A Helm chart for gen3 Dicom Server # Library charts provide useful utilities or functions for the chart developer. They're included as # a dependency of application charts to inject those utilities and functions into the rendering # pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application +type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. diff --git a/helm/dicom-viewer/Chart.yaml b/helm/dicom-viewer/Chart.yaml index b90186836..4835cfea6 100644 --- a/helm/dicom-viewer/Chart.yaml +++ b/helm/dicom-viewer/Chart.yaml @@ -10,7 +10,7 @@ description: A Helm chart for gen3 Dicom Viewer # Library charts provide useful utilities or functions for the chart developer. They're included as # a dependency of application charts to inject those utilities and functions into the rendering # pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application +type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. diff --git a/helm/etl/Chart.yaml b/helm/etl/Chart.yaml index f7fc0be5e..c5b08a7b0 100644 --- a/helm/etl/Chart.yaml +++ b/helm/etl/Chart.yaml @@ -10,7 +10,7 @@ description: A Helm chart for gen3 etl # Library charts provide useful utilities or functions for the chart developer. They're included as # a dependency of application charts to inject those utilities and functions into the rendering # pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application +type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. diff --git a/helm/fence/Chart.yaml b/helm/fence/Chart.yaml index 5c0ce03e0..a8e6edc3e 100644 --- a/helm/fence/Chart.yaml +++ b/helm/fence/Chart.yaml @@ -23,7 +23,7 @@ version: 0.1.18 appVersion: "master" dependencies: -- name: common +- name: common version: 0.1.10 repository: file://../common - name: postgresql diff --git a/helm/frontend-framework/Chart.yaml b/helm/frontend-framework/Chart.yaml index 8cf5997e7..a86f9eb13 100644 --- a/helm/frontend-framework/Chart.yaml +++ b/helm/frontend-framework/Chart.yaml @@ -10,7 +10,7 @@ description: A Helm chart for the gen3 frontend framework # Library charts provide useful utilities or functions for the chart developer. They're included as # a dependency of application charts to inject those utilities and functions into the rendering # pipeline. Library charts do not define any templates and, therefore, cannot be deployed. -type: application +type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. diff --git a/helm/gearbox-middleware/Chart.yaml b/helm/gearbox-middleware/Chart.yaml index 1e0d72388..5870e9fd4 100644 --- a/helm/gearbox-middleware/Chart.yaml +++ b/helm/gearbox-middleware/Chart.yaml @@ -10,7 +10,7 @@ description: A Helm chart for Kubernetes # Library charts provide useful utilities or functions for the chart developer. They're included as # a dependency of application charts to inject those utilities and functions into the rendering # pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application +type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. diff --git a/helm/gearbox/Chart.yaml b/helm/gearbox/Chart.yaml index 04441d00a..379415e32 100644 --- a/helm/gearbox/Chart.yaml +++ b/helm/gearbox/Chart.yaml @@ -10,7 +10,7 @@ description: A Helm chart for Kubernetes # Library charts provide useful utilities or functions for the chart developer. They're included as # a dependency of application charts to inject those utilities and functions into the rendering # pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application +type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. diff --git a/helm/gen3/Chart.yaml b/helm/gen3/Chart.yaml index e6da5fdb3..7b0013973 100644 --- a/helm/gen3/Chart.yaml +++ b/helm/gen3/Chart.yaml @@ -118,7 +118,7 @@ dependencies: repository: "https://helm.elastic.co" condition: elasticsearch.enabled - name: postgresql - version: 11.9.13 + version: 11.9.13 repository: "https://charts.bitnami.com/bitnami" condition: global.dev diff --git a/helm/guppy/Chart.yaml b/helm/guppy/Chart.yaml index 8a6074986..12fd56edd 100644 --- a/helm/guppy/Chart.yaml +++ b/helm/guppy/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.11 +version: 0.1.11 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/hatchery/Chart.yaml b/helm/hatchery/Chart.yaml index acd4e4233..8e4c05c16 100644 --- a/helm/hatchery/Chart.yaml +++ b/helm/hatchery/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.9 +version: 0.1.9 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/indexd/Chart.yaml b/helm/indexd/Chart.yaml index bd8954327..19e781264 100644 --- a/helm/indexd/Chart.yaml +++ b/helm/indexd/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.14 +version: 0.1.14 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/manifestservice/Chart.yaml b/helm/manifestservice/Chart.yaml index 59584ffa7..382e01657 100644 --- a/helm/manifestservice/Chart.yaml +++ b/helm/manifestservice/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.14 +version: 0.1.14 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/metadata/Chart.yaml b/helm/metadata/Chart.yaml index 2591c8c83..d38d06e54 100644 --- a/helm/metadata/Chart.yaml +++ b/helm/metadata/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.12 +version: 0.1.12 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/neuvector/Chart.yaml b/helm/neuvector/Chart.yaml index a47bd7fc6..46be470be 100644 --- a/helm/neuvector/Chart.yaml +++ b/helm/neuvector/Chart.yaml @@ -14,7 +14,7 @@ description: NeuVector Kubernetes Security Policy templates to protect Gen3 # Library charts provide useful utilities or functions for the chart developer. They're included as # a dependency of application charts to inject those utilities and functions into the rendering # pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application +type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. diff --git a/helm/pcdcanalysistools/Chart.yaml b/helm/pcdcanalysistools/Chart.yaml index 2ffede7e8..0eb76c931 100644 --- a/helm/pcdcanalysistools/Chart.yaml +++ b/helm/pcdcanalysistools/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 +version: 0.1.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/peregrine/Chart.yaml b/helm/peregrine/Chart.yaml index 575bea446..10f79b5d2 100644 --- a/helm/peregrine/Chart.yaml +++ b/helm/peregrine/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.13 +version: 0.1.13 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/pidgin/Chart.yaml b/helm/pidgin/Chart.yaml index fe85f7b51..be9c20863 100644 --- a/helm/pidgin/Chart.yaml +++ b/helm/pidgin/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.10 +version: 0.1.10 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/portal/Chart.yaml b/helm/portal/Chart.yaml index 4fe8f4195..6141c5e5f 100644 --- a/helm/portal/Chart.yaml +++ b/helm/portal/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.15 +version: 0.1.15 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/requestor/Chart.yaml b/helm/requestor/Chart.yaml index a7a40762b..e0c06a887 100644 --- a/helm/requestor/Chart.yaml +++ b/helm/requestor/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.11 +version: 0.1.11 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/revproxy/Chart.yaml b/helm/revproxy/Chart.yaml index 455ee0cef..37c503869 100644 --- a/helm/revproxy/Chart.yaml +++ b/helm/revproxy/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.14 +version: 0.1.14 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/sheepdog/Chart.yaml b/helm/sheepdog/Chart.yaml index 18a21f241..ea4bcc323 100644 --- a/helm/sheepdog/Chart.yaml +++ b/helm/sheepdog/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.14 +version: 0.1.14 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/sower/Chart.yaml b/helm/sower/Chart.yaml index 34824ceb4..a48e3910a 100644 --- a/helm/sower/Chart.yaml +++ b/helm/sower/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.11 +version: 0.1.11 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/ssjdispatcher/Chart.yaml b/helm/ssjdispatcher/Chart.yaml index 5dac10465..ac6682874 100644 --- a/helm/ssjdispatcher/Chart.yaml +++ b/helm/ssjdispatcher/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.9 +version: 0.1.9 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/wts/Chart.yaml b/helm/wts/Chart.yaml index ecfc98815..6d9533e47 100644 --- a/helm/wts/Chart.yaml +++ b/helm/wts/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.13 +version: 0.1.13 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to From 5e4095f61d82bdbb367b7dd1de0cca1abcd96618 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Tue, 27 Aug 2024 16:26:41 -0700 Subject: [PATCH 023/126] dont create release on this branch --- .github/workflows/release.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 25eeb0058..f4391ce5e 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -3,7 +3,7 @@ name: Release Charts on: push: branches: - - GEAR-427 + - master jobs: release: From 0506dc8454a80b774b4a6b69e42813f1d9f86c45 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Tue, 4 Feb 2025 10:05:05 -0800 Subject: [PATCH 024/126] Revert "remove wildcard" This reverts commit 09976848c56925f29a57533d3a5bb7fea3d45362. --- helm/common/templates/_db_setup_job.tpl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helm/common/templates/_db_setup_job.tpl b/helm/common/templates/_db_setup_job.tpl index 9e19d981c..9ea67dbea 100644 --- a/helm/common/templates/_db_setup_job.tpl +++ b/helm/common/templates/_db_setup_job.tpl @@ -13,7 +13,7 @@ metadata: rules: - apiGroups: [""] resources: ["secrets"] - verbs: ["get"] + verbs: ["*"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding From b678a8b7995849b58d1db269f461b1c274d01d7d Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Tue, 4 Feb 2025 12:38:40 -0800 Subject: [PATCH 025/126] add in changes --- .../amanuensis-clear-filter-set-cronjob.yaml | 102 ++++++++++++++++++ helm/revproxy/nginx/helpers.js | 1 - helm/revproxy/nginx/nginx.conf | 6 +- tools/cronjob.sh | 22 ++++ tools/pcdc | 5 + 5 files changed, 132 insertions(+), 4 deletions(-) create mode 100644 helm/amanuensis/templates/amanuensis-clear-filter-set-cronjob.yaml create mode 100755 tools/cronjob.sh diff --git a/helm/amanuensis/templates/amanuensis-clear-filter-set-cronjob.yaml b/helm/amanuensis/templates/amanuensis-clear-filter-set-cronjob.yaml new file mode 100644 index 000000000..e1a4e41eb --- /dev/null +++ b/helm/amanuensis/templates/amanuensis-clear-filter-set-cronjob.yaml @@ -0,0 +1,102 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: amanuensis-clear-unused-filter-sets + labels: + redeploy-hash: "{{ .Release.Revision }}" +spec: + schedule: "0 0 1 * *" + concurrencyPolicy: Forbid + # This is defualt but do we want to keep long term record of deleted filter sets + # the original filter-sets will not be deleted unless manually done by the user + successfulJobsHistoryLimit: 3 + jobTemplate: + spec: + template: + metadata: + labels: + app: gen3job + spec: + automountServiceAccountToken: false + volumes: + - name: yaml-merge + configMap: + name: "amanuensis-yaml-merge" + optional: true + - name: config-volume + secret: + secretName: "amanuensis-config" + - name: amanuensis-volume + secret: + secretName: "amanuensis-creds" + - name: tmp-pod + emptyDir: {} + containers: + - name: amanuensis + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: PGHOST + valueFrom: + secretKeyRef: + name: amanuensis-dbcreds + key: host + optional: false + - name: PGUSER + valueFrom: + secretKeyRef: + name: amanuensis-dbcreds + key: username + optional: false + - name: PGPASSWORD + valueFrom: + secretKeyRef: + name: amanuensis-dbcreds + key: password + optional: false + - name: PGDB + valueFrom: + secretKeyRef: + name: amanuensis-dbcreds + key: database + optional: false + - name: DBREADY + valueFrom: + secretKeyRef: + name: amanuensis-dbcreds + key: dbcreated + optional: false + - name: DB + value: postgresql://$(PGUSER):$(PGPASSWORD)@$(PGHOST):5432/$(PGDB) + - name: AMANUENSIS_DB + value: postgresql://$(PGUSER):$(PGPASSWORD)@$(PGHOST):5432/$(PGDB) + - name: PYTHONPATH + value: /var/www/amanuensis + - name: AMANUENSIS_PUBLIC_CONFIG + valueFrom: + configMapKeyRef: + name: manifest-amanuensis + key: amanuensis-config-public.yaml + optional: true + volumeMounts: + - name: "config-volume" + readOnly: true + mountPath: "/var/www/amanuensis/amanuensis-config.yaml" + subPath: amanuensis-config.yaml + - name: "yaml-merge" + readOnly: true + mountPath: "/var/www/amanuensis/yaml_merge.py" + subPath: yaml_merge.py + command: ["/bin/bash"] + args: + - "-c" + - | + echo "${AMANUENSIS_PUBLIC_CONFIG:-""}" > "/var/www/amanuensis/amanuensis-config-public.yaml" + python /var/www/amanuensis/yaml_merge.py /var/www/amanuensis/amanuensis-config-public.yaml /var/www/amanuensis/amanuensis-config-secret.yaml > /var/www/amanuensis/amanuensis-config.yaml + cd /amanuensis + clear-old-filter-sets + if [[ $? != 0 ]]; then + echo "WARNING: non zero exit code: $?" + exit 1 + fi + restartPolicy: Never \ No newline at end of file diff --git a/helm/revproxy/nginx/helpers.js b/helm/revproxy/nginx/helpers.js index eaee1b1df..27b63aa69 100644 --- a/helm/revproxy/nginx/helpers.js +++ b/helm/revproxy/nginx/helpers.js @@ -280,4 +280,3 @@ function gen3_workspace_authorize_handler(req) { } } -export default {userid, isCredentialsAllowed}; diff --git a/helm/revproxy/nginx/nginx.conf b/helm/revproxy/nginx/nginx.conf index 4be081dd5..a2f41980d 100644 --- a/helm/revproxy/nginx/nginx.conf +++ b/helm/revproxy/nginx/nginx.conf @@ -65,8 +65,8 @@ http { # # Note - nginscript js_set, etc get processed # # on demand: https://www.nginx.com/blog/introduction-nginscript/ # # # - js_import helpers.js; - js_set $userid helpers.userid; + js_include helpers.js; + js_set $userid userid; perl_set $document_url_env 'sub { return $ENV{"DOCUMENT_URL"} || ""; }'; @@ -134,7 +134,7 @@ http { # # CORS Credential White List # ## perl_set $origins_allow_credentials 'sub { return $ENV{"ORIGINS_ALLOW_CREDENTIALS"}; }'; - js_set $credentials_allowed helpers.isCredentialsAllowed; + js_set $credentials_allowed isCredentialsAllowed; # ## For multi-domain deployments perl_set $csrf_cookie_domain 'sub { return $ENV{"COOKIE_DOMAIN"} ? qq{;domain=$ENV{"COOKIE_DOMAIN"}} : ""; }'; diff --git a/tools/cronjob.sh b/tools/cronjob.sh new file mode 100755 index 000000000..a9d5967c4 --- /dev/null +++ b/tools/cronjob.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +# Check if argument is provided +if [ $# -eq 0 ]; then + echo "Usage: $0 " + exit 1 +fi + +# Extract the job name +job_name="$1" + +# Delete the specified Job +kubectl delete cronjob "$job_name" + +# Check if the deletion was successful +if [ $? -ne 0 ]; then + echo "Error: Failed to delete Job $job_name" + exit 1 +fi + +# Run roll.sh script +roll.sh \ No newline at end of file diff --git a/tools/pcdc b/tools/pcdc index 6f1c1a615..4f7c3a477 100755 --- a/tools/pcdc +++ b/tools/pcdc @@ -10,6 +10,7 @@ CLEAR_ELASTICSEARCH="clear_elasticsearch.sh" RESTART_POD="restart_pod.sh" LOGS="logs.sh" LOAD_DATA="load_data.sh" +CRONJOB_SCRIPT="cronjob.sh" # Check if command is provided as an argument @@ -59,6 +60,10 @@ case "$command" in # Run the connect_to_pod.sh script with the remaining arguments "$LOAD_DATA" "$@" ;; + "cronjob") + # Run the CRONJOB_SCRIPT.sh script with the remaining arguments + "$CRONJOB_SCRIPT" "$@" + ;; *) echo "Invalid command: $command" exit 1 From 63e0f66b9d9ef077266b49092729f141d1cd6f35 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Tue, 4 Feb 2025 15:00:05 -0800 Subject: [PATCH 026/126] Update pcdc-default-values.yaml --- pcdc-default-values.yaml | 2061 +++----------------------------------- 1 file changed, 147 insertions(+), 1914 deletions(-) diff --git a/pcdc-default-values.yaml b/pcdc-default-values.yaml index 43e345bc1..31487a65b 100644 --- a/pcdc-default-values.yaml +++ b/pcdc-default-values.yaml @@ -3,7 +3,7 @@ global: hostname: localhost portalApp: pcdc dictionaryUrl: https://pcdc-dev-dictionaries.s3.amazonaws.com/pcdc-schema-dev-20230912.json - authz_entity_name: subject + authz_entity_name: "subject" tls: cert: | -----BEGIN CERTIFICATE----- @@ -57,106 +57,15 @@ global: arborist: image: repository: quay.io/pcdc/arborist - tag: 2023.08 + tag: "2025.01" amanuensis: image: - repository: quay.io/pcdc/amanuensis - tag: "2.16.1" + repository: "amanuensis" + tag: "test" + pullPolicy: Never -fence: - volumes: - - name: old-config-volume - secret: - secretName: "fence-secret" - - name: json-secret-volume - secret: - secretName: "fence-json-secret" - optional: true - - name: creds-volume - secret: - secretName: "fence-creds" - - name: config-helper - configMap: - name: config-helper - optional: true - - name: logo-volume - configMap: - name: "logo-config" - - name: config-volume - secret: - secretName: "fence-config" - - name: fence-google-app-creds-secret-volume - secret: - secretName: "fence-google-app-creds-secret" - - name: fence-google-storage-creds-secret-volume - secret: - secretName: "fence-google-storage-creds-secret" - - name: fence-jwt-keys - secret: - secretName: "fence-jwt-keys" - - name: privacy-policy - configMap: - name: "privacy-policy" - - name: yaml-merge - configMap: - name: "fence-yaml-merge" - optional: true - - name: amanuensis-jwt-keys - secret: - secretName: "amanuensis-jwt-keys" - items: - - key: jwt_public_key.pem - path: jwt_public_key.pem - volumeMounts: - - name: "old-config-volume" - readOnly: true - mountPath: "/var/www/fence/local_settings.py" - subPath: local_settings.py - - name: "json-secret-volume" - readOnly: true - mountPath: "/var/www/fence/fence_credentials.json" - subPath: fence_credentials.json - - name: "creds-volume" - readOnly: true - mountPath: "/var/www/fence/creds.json" - subPath: creds.json - - name: "config-helper" - readOnly: true - mountPath: "/var/www/fence/config_helper.py" - subPath: config_helper.py - - name: "logo-volume" - readOnly: true - mountPath: "/fence/fence/static/img/logo.svg" - subPath: "logo.svg" - - name: "privacy-policy" - readOnly: true - mountPath: "/fence/fence/static/privacy_policy.md" - subPath: "privacy_policy.md" - - name: "config-volume" - readOnly: true - mountPath: "/var/www/fence/fence-config.yaml" - subPath: fence-config.yaml - - name: "yaml-merge" - readOnly: true - mountPath: "/var/www/fence/yaml_merge.py" - subPath: yaml_merge.py - - name: "fence-google-app-creds-secret-volume" - readOnly: true - mountPath: "/var/www/fence/fence_google_app_creds_secret.json" - subPath: fence_google_app_creds_secret.json - - name: "fence-google-storage-creds-secret-volume" - readOnly: true - mountPath: "/var/www/fence/fence_google_storage_creds_secret.json" - subPath: fence_google_storage_creds_secret.json - - name: "fence-jwt-keys" - readOnly: true - mountPath: "/fence/keys/key/jwt_private_key.pem" - subPath: "jwt_private_key.pem" - - name: "amanuensis-jwt-keys" - readOnly: true - mountPath: "/amanuensis/jwt_public_key.pem" - subPath: "jwt_public_key.pem" +fence: FENCE_CONFIG: DEBUG: true MOCK_STORAGE: true @@ -167,453 +76,66 @@ fence: image: - repository: quay.io/pcdc/fence - tag: "helm-test" - pullPolicy: Always + repository: "fence" + tag: "test" + pullPolicy: Never USER_YAML: | - cloud_providers: {} - groups: {} authz: - # policies automatically given to anyone, even if they haven't authenticated - anonymous_policies: ['open_data_reader', 'full_open_access'] - # policies automatically given to authenticated users (in addition to their other - # policies) - all_users_policies: ['open_data_reader', 'authn_open_access'] - - user_project_to_resource: - QA: /programs/QA - DEV: /programs/DEV - test: /programs/QA/projects/test - jenkins: /programs/jnkns/projects/jenkins - jenkins2: /programs/jnkns/projects/jenkins2 - jnkns: /programs/jnkns + resources: + - name: 'data_file' + description: 'data files, stored in S3' + - name: 'sower' + description: 'sower resource' + - name: workspace + description: jupyter notebooks + - name: analysis + description: analysis tool service + - name: portal + description: data portal service + - name: privacy + description: User privacy policy + - name: 'services' + subresources: + - name: 'sheepdog' + subresources: + - name: 'submission' + subresources: + - name: 'program' + - name: 'project' + - name: 'amanuensis' + - name: 'fence' + subresources: + - name: 'admin' + - name: programs + subresources: + - name: pcdc policies: - # General Access - - id: 'workspace' - description: 'be able to use workspace' - resource_paths: ['/workspace'] - role_ids: ['workspace_user'] - - id: 'dashboard' - description: 'be able to use the commons dashboard' - resource_paths: ['/dashboard'] - role_ids: ['dashboard_user'] - - id: 'prometheus' - description: 'be able to use prometheus' - resource_paths: ['/prometheus'] - role_ids: ['prometheus_user'] - - id: 'ttyadmin' - description: 'be able to use the admin tty' - resource_paths: ['/ttyadmin'] - role_ids: ['ttyadmin_user'] - - id: 'mds_admin' - description: 'be able to use metadata service' - resource_paths: ['/mds_gateway'] - role_ids: ['mds_user'] - id: 'data_upload' description: 'upload raw data files to S3' - role_ids: ['file_uploader'] - resource_paths: ['/data_file'] - - description: be able to use sower job - id: sower - resource_paths: [/sower] - role_ids: [sower_user] - - id: 'mariner_admin' - description: 'full access to mariner API' - resource_paths: ['/mariner'] - role_ids: ['mariner_admin'] - - id: audit_reader - role_ids: - - audit_reader - resource_paths: - - /services/audit - - id: audit_login_reader - role_ids: - - audit_reader - resource_paths: - - /services/audit/login - - id: audit_presigned_url_reader - role_ids: - - audit_reader - resource_paths: - - /services/audit/presigned_url - - id: requestor_admin - role_ids: - - requestor_admin - resource_paths: - - /programs - - id: requestor_reader - role_ids: - - requestor_reader - resource_paths: - - /programs - - id: requestor_creator - role_ids: - - requestor_creator - resource_paths: - - /programs - - id: requestor_updater - role_ids: - - requestor_updater - resource_paths: - - /programs - - id: requestor_deleter - role_ids: - - requestor_deleter - resource_paths: - - /programs - # Data Access - - # All programs policy - - id: 'all_programs_reader' - description: '' - role_ids: - - 'reader' - - 'storage_reader' - resource_paths: ['/programs'] - - # # example if need access to write to storage - # - id: 'programs.jnkns-storage_writer' - # description: '' - # role_ids: - # - 'storage_writer' - # resource_paths: ['/programs/jnkns'] - - - id: 'programs.jnkns-admin' - description: '' - role_ids: - - 'creator' - - 'reader' - - 'updater' - - 'deleter' - - 'storage_reader' - resource_paths: - - '/programs/jnkns' - - '/gen3/programs/jnkns' - - - id: 'programs.jnkns-viewer' - description: '' - role_ids: - - 'reader' - - 'storage_reader' - resource_paths: - - '/programs/jnkns' - - '/gen3/programs/jnkns' - - - - id: 'programs.QA-admin' - description: '' - role_ids: - - 'creator' - - 'reader' - - 'updater' - - 'deleter' - - 'storage_reader' - resource_paths: - - '/programs/QA' - - '/gen3/programs/QA' - - - id: 'programs.QA-admin-no-storage' - description: '' - role_ids: - - 'creator' - - 'reader' - - 'updater' - - 'deleter' - resource_paths: - - '/programs/QA' - - '/gen3/programs/QA' - - - id: 'programs.QA-viewer' - description: '' - role_ids: - - 'reader' - - 'storage_reader' - resource_paths: - - '/programs/QA' - - '/gen3/programs/QA' - - - id: 'programs.DEV-admin' - description: '' - role_ids: - - 'creator' - - 'reader' - - 'updater' - - 'deleter' - - 'storage_reader' - - 'storage_writer' - resource_paths: - - '/programs/DEV' - - '/gen3/programs/DEV' - - - id: 'programs.DEV-storage_writer' - description: '' - role_ids: - - 'storage_writer' - resource_paths: ['/programs/DEV'] - - - id: 'programs.DEV-viewer' - description: '' - role_ids: - - 'reader' - - 'storage_reader' - resource_paths: - - '/programs/DEV' - - '/gen3/programs/DEV' - - - id: 'programs.test-admin' - description: '' - role_ids: - - 'creator' - - 'reader' - - 'updater' - - 'deleter' - - 'storage_reader' - resource_paths: - - '/programs/test' - - '/gen3/programs/test' - - - id: 'programs.test-viewer' - description: '' - role_ids: - - 'reader' - - 'storage_reader' - resource_paths: - - '/programs/test' - - '/gen3/programs/test' - - - id: 'abc-admin' - description: '' - role_ids: - - 'creator' - - 'reader' - - 'updater' - - 'deleter' - - 'storage_reader' - resource_paths: - - '/abc' - - - id: 'gen3-admin' - description: '' - role_ids: - - 'creator' - - 'reader' - - 'updater' - - 'deleter' - - 'storage_reader' - resource_paths: - - '/gen3' - - - id: 'gen3-hmb-researcher' - description: '' - role_ids: - - 'creator' - - 'reader' - - 'updater' - - 'deleter' - - 'storage_reader' - resource_paths: - - '/consents/NRES' - - '/consents/GRU' - - '/consents/GRU_CC' - - '/consents/HMB' - - '/gen3' - - - id: 'abc.programs.test_program.projects.test_project1-viewer' - description: '' - role_ids: - - 'reader' - - 'storage_reader' - resource_paths: - - '/abc/programs/test_program/projects/test_project1' - - - id: 'abc.programs.test_program.projects.test_project2-viewer' - description: '' - role_ids: - - 'reader' - - 'storage_reader' - resource_paths: - - '/abc/programs/test_program/projects/test_project2' - - - id: 'abc.programs.test_program2.projects.test_project3-viewer' - description: '' - role_ids: - - 'reader' - - 'storage_reader' - resource_paths: - - '/abc/programs/test_program2/projects/test_project3' - - # Open data policies - - id: 'authn_open_access' - resource_paths: ['/programs/open/projects/authnRequired'] - description: '' - role_ids: - - 'reader' - - 'storage_reader' - - id: 'full_open_access' - resource_paths: ['/programs/open/projects/1000G'] - description: '' - role_ids: - - 'reader' - - 'storage_reader' - - id: 'open_data_reader' - description: '' - role_ids: - - 'reader' - - 'storage_reader' - resource_paths: ['/open'] - - id: 'open_data_admin' - description: '' - role_ids: - - 'creator' - - 'reader' - - 'updater' - - 'deleter' - - 'storage_writer' - - 'storage_reader' - resource_paths: ['/open'] - - # Consent Code Policies - - id: 'not-for-profit-researcher' - description: '' - role_ids: - - 'admin' - resource_paths: - - '/consents/NPU' - - - id: 'publication-required-researcher' - description: '' - role_ids: - - 'admin' - resource_paths: - - '/consents/PUB' - - - id: 'gru-researcher' - description: '' - role_ids: - - 'admin' - resource_paths: - - '/consents/NRES' - - '/consents/GRU' - - - id: 'gru-cc-researcher' - description: '' - role_ids: - - 'admin' - resource_paths: - - '/consents/NRES' - - '/consents/GRU' - - '/consents/GRU_CC' - - - id: 'hmb-researcher' - description: '' - role_ids: - - 'admin' - resource_paths: - - '/consents/NRES' - - '/consents/GRU' - - '/consents/GRU_CC' - - '/consents/HMB' - - - id: 'poa-researcher' - description: '' - role_ids: - - 'admin' - resource_paths: - - '/consents/NRES' - - '/consents/GRU' - - '/consents/GRU_CC' - - '/consents/POA' - - - id: 'ds-lung-researcher' - description: '' - role_ids: - - 'admin' - resource_paths: - - '/consents/NRES' - - '/consents/GRU' - - '/consents/GRU_CC' - - '/consents/HMB' - - '/consents/DS_LungDisease' - - - id: 'ds-chronic-obstructive-pulmonary-disease-researcher' - description: '' - role_ids: - - 'admin' - resource_paths: - - '/consents/NRES' - - '/consents/GRU' - - '/consents/GRU_CC' - - '/consents/HMB' - - '/consents/DS_ChronicObstructivePulmonaryDisease' - - - id: 'services.sheepdog-admin' - description: 'CRUD access to programs and projects' - role_ids: - - 'sheepdog_admin' - resource_paths: - - '/services/sheepdog/submission/program' - - '/services/sheepdog/submission/project' - - # indexd - - id: 'indexd_admin' - description: 'full access to indexd API' - role_ids: - - 'indexd_admin' - resource_paths: - - '/programs' - - '/services/indexd/admin' - # # TODO resource path '/' is not valid right now in arborist, trying to decide - # # how to handle all resources - # - id: 'indexd_admin' - # description: '' - # role_ids: - # - 'indexd_record_creator' - # - 'indexd_record_reader' - # - 'indexd_record_updater' - # - 'indexd_delete_record' - # - 'indexd_storage_reader' - # - 'indexd_storage_writer' - # resource_paths: ['/'] - # - id: 'indexd_record_reader' - # description: '' - # role_ids: - # - 'indexd_record_reader' - # resource_paths: ['/'] - # - id: 'indexd_record_editor' - # description: '' - # role_ids: - # - 'indexd_record_creator' - # - 'indexd_record_reader' - # - 'indexd_record_updater' - # - 'indexd_delete_record' - # resource_paths: ['/'] - # - id: 'indexd_storage_reader' - # description: '' - # role_ids: - # - 'indexd_storage_reader' - # resource_paths: ['/'] - # - id: 'indexd_storage_editor' - # description: '' - # role_ids: - # - 'indexd_storage_reader' - # - 'indexd_storage_writer' - # resource_paths: ['/'] - - # argo - - id: argo - description: be able to use argo - resource_paths: [/argo] - role_ids: [argo_user] - - #PCDC specific + resource_paths: + - /data_file + role_ids: + - file_uploader - id: 'services.amanuensis-admin' description: 'admin access to amanuensis' role_ids: - 'amanuensis_admin' resource_paths: - '/services/amanuensis' + - id: 'services.fence-admin' + description: 'admin access to fence' + role_ids: + - 'fence_admin' + resource_paths: + - '/services/fence/admin' + - id: workspace + description: be able to use workspace + resource_paths: + - /workspace + role_ids: + - workspace_user - id: analysis description: be able to use analysis tool service resource_paths: @@ -626,6 +148,29 @@ fence: - /privacy role_ids: - reader + - id: indexd_admin + description: full access to indexd API + role_ids: + - indexd_admin + resource_paths: + - /programs + - description: be able to use sower job + id: sower + resource_paths: [/sower] + role_ids: [sower_user] + - id: 'services.sheepdog-admin' + description: 'CRUD access to programs and projects' + role_ids: + - 'sheepdog_admin' + resource_paths: + - '/services/sheepdog/submission/program' + - '/services/sheepdog/submission/project' + - id: all_programs_reader + role_ids: + - reader + - storage_reader + resource_paths: + - /programs - id: login_no_access role_ids: - reader @@ -638,1486 +183,174 @@ fence: resource_paths: - /programs - /programs/pcdc - resources: - # General Access - - name: 'data_file' - description: 'data files, stored in S3' - - name: 'dashboard' - description: 'commons /dashboard' - - name: 'mds_gateway' - description: 'commons /mds-admin' - - name: 'prometheus' - description: 'commons /prometheus and /grafana' - - name: 'ttyadmin' - description: 'commons /ttyadmin' - - name: 'workspace' - description: jupyter notebooks - - name: "sower" - description: 'sower resource' - - name: 'mariner' - description: 'workflow execution service' - - name: argo - #PCDC - - name: analysis - description: analysis tool service - - name: portal - description: data portal service - - name: privacy - description: User privacy policy - # OLD Data - - name: 'programs' - subresources: - #PCDC - - name: pcdc - - name: 'open' - subresources: - - name: 'projects' - subresources: - - name: '1000G' - - name: 'authnRequired' - - name: 'QA' - subresources: - - name: 'projects' - subresources: - - name: 'test' - - name: 'DEV' - subresources: - - name: 'projects' - subresources: - - name: 'test' - - name: 'jnkns' - subresources: - - name: 'projects' - subresources: - - name: 'jenkins' - - name: 'jenkins2' - - name: 'test' - subresources: - - name: 'projects' - subresources: - - name: 'test' - - # NEW Data WITH PREFIX - - name: 'gen3' - subresources: - - name: 'programs' - subresources: - - name: 'QA' - subresources: - - name: 'projects' - subresources: - - name: 'test' - - name: 'DEV' - subresources: - - name: 'projects' - subresources: - - name: 'test' - - name: 'jnkns' - subresources: - - name: 'projects' - subresources: - - name: 'jenkins' - - name: 'jenkins2' - - name: 'test' - subresources: - - name: 'projects' - subresources: - - name: 'test' - - # consents obtained from DUO and NIH - # https://github.com/EBISPOT/DUO - # https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4721915/ - - name: 'consents' - subresources: - - name: 'NRES' - description: 'no restriction' - - name: 'GRU' - description: 'general research use' - - name: 'GRU_CC' - description: 'general research use and clinical care' - - name: 'HMB' - description: 'health/medical/biomedical research' - - name: 'POA' - description: 'population origins or ancestry research' - - name: 'NMDS' - description: 'no general methods research' - - name: 'NPU' - description: 'not-for-profit use only' - - name: 'PUB' - description: 'publication required' - - name: 'DS_LungDisease' - description: 'disease-specific research for lung disease' - - name: 'DS_ChronicObstructivePulmonaryDisease' - description: 'disease-specific research for chronic obstructive pulmonary disease' - - - name: 'abc' - subresources: - - name: 'programs' - subresources: - - name: 'foo' - subresources: - - name: 'projects' - subresources: - - name: 'bar' - - name: 'test_program' - subresources: - - name: 'projects' - subresources: - - name: 'test_project1' - - name: 'test_project2' - - name: 'test_program2' - subresources: - - name: 'projects' - subresources: - - name: 'test_project3' - - - # "Sheepdog admin" resources - - name: 'services' - subresources: - - name: 'sheepdog' - subresources: - - name: 'submission' - subresources: - - name: 'program' - - name: 'project' - #PCDC - - name: 'amanuensis' - - name: 'indexd' - subresources: - - name: 'admin' - - name: 'bundles' - - name: audit - subresources: - - name: presigned_url - - name: login - - - name: 'open' - - # action/methods: - # create, read, update, delete, read-storage, write-storage, - # file_upload, access + + roles: - # General Access - id: 'file_uploader' description: 'can upload data files' permissions: - id: 'file_upload' action: - service: '*' - method: 'file_upload' - - id: 'workspace_user' - permissions: - - id: 'workspace_access' - action: - service: 'jupyterhub' - method: 'access' - - id: 'dashboard_user' + service: 'fence' + method: 'file_upload' + - id: 'amanuensis_admin' + description: 'can do admin work on project/data request' permissions: - - id: 'dashboard_access' - action: - service: 'dashboard' - method: 'access' - - id: 'mds_user' + - id: 'amanuensis_admin_action' + action: + service: 'amanuensis' + method: '*' + - id: 'fence_admin' + description: 'can use the admin endpoint in Fence' permissions: - - id: 'mds_access' - action: - service: 'mds_gateway' - method: 'access' - - id: 'prometheus_user' + - id: 'fence_admin_permission' + action: + service: 'fence' + method: '*' + - id: workspace_user permissions: - - id: 'prometheus_access' - action: - service: 'prometheus' - method: 'access' - - id: 'ttyadmin_user' + - action: {method: access, service: jupyterhub} + id: workspace_access + - id: sower_user permissions: - - id: 'ttyadmin_access' - action: - service: 'ttyadmin' - method: 'access' - - id: 'sower_user' + - action: {method: access, service: job} + id: sower_access + - id: analysis_user permissions: - - id: 'sower_access' - action: - service: 'job' - method: 'access' - - id: 'mariner_admin' - permissions: - - id: 'mariner_access' - action: - service: 'mariner' - method: 'access' - - id: 'audit_reader' + - action: {method: access, service: analysis} + id: analysis_access + # Sheepdog admin role + - id: 'sheepdog_admin' + description: 'sheepdog admin role for program project crud' permissions: - - id: 'audit_reader_action' + - id: 'sheepdog_admin_action' action: - service: 'audit' - method: 'read' - - id: 'analysis_user' + service: 'sheepdog' + method: '*' + - id: indexd_admin + description: full access to indexd API permissions: - - action: {method: 'access', service: 'analysis'} - id: 'analysis_access' - # All services - - id: 'admin' - description: '' + - id: indexd_admin + action: + service: indexd + method: '*' + - id: admin permissions: - - id: 'admin' + - id: admin action: service: '*' method: '*' - - id: 'creator' - description: '' + - id: creator permissions: - - id: 'creator' + - id: creator action: service: '*' - method: 'create' - - id: 'reader' - description: '' + method: create + - id: reader permissions: - - id: 'reader' + - id: reader action: service: '*' - method: 'read' - - id: 'updater' - description: '' + method: read + - id: updater permissions: - - id: 'updater' + - id: updater action: service: '*' - method: 'update' - - id: 'deleter' - description: '' + method: update + - id: deleter permissions: - - id: 'deleter' + - id: deleter action: service: '*' - method: 'delete' - - id: 'storage_writer' - description: '' + method: delete + - id: storage_writer permissions: - - id: 'storage_writer' + - id: storage_creator action: service: '*' - method: 'write-storage' - - id: 'storage_reader' - description: '' + method: write-storage + - id: storage_reader permissions: - - id: 'storage_reader' + - id: storage_reader action: service: '*' - method: 'read-storage' - - - # Sheepdog admin role - - id: 'sheepdog_admin' - description: 'sheepdog admin role for program project crud' - permissions: - - id: 'sheepdog_admin_action' - action: - service: 'sheepdog' - method: '*' - - - # indexd - - id: 'indexd_admin' - # this only works if indexd.arborist is enabled in manifest! - description: 'full access to indexd API' - permissions: - - id: 'indexd_admin' - action: - service: 'indexd' - method: '*' - - id: 'indexd_record_creator' - description: '' - permissions: - - id: 'indexd_record_creator' - action: - service: 'indexd' - method: 'create' - - id: 'indexd_record_reader' - description: '' - permissions: - - id: 'indexd_record_reader' - action: - service: 'indexd' - method: 'read' - - id: 'indexd_record_updater' - description: '' - permissions: - - id: 'indexd_record_updater' - action: - service: 'indexd' - method: 'update' - - id: 'indexd_delete_record' - description: '' - permissions: - - id: 'indexd_delete_record' - action: - service: 'indexd' - method: 'delete' - - id: 'indexd_storage_reader' - description: '' - permissions: - - id: 'indexd_storage_reader' - action: - service: 'indexd' - method: 'read-storage' - - id: 'indexd_storage_writer' - description: '' - permissions: - - id: 'indexd_storage_writer' - action: - service: 'indexd' - method: 'write-storage' - - # arborist - - id: 'arborist_creator' - description: '' - permissions: - - id: 'arborist_creator' - action: - service: 'arborist' - method: 'create' - - id: 'arborist_reader' - description: '' - permissions: - - id: 'arborist_reader' - action: - service: 'arborist' - method: 'read' - - id: 'arborist_updater' - description: '' - permissions: - - id: 'arborist_updater' - action: - service: 'arborist' - method: 'update' - - id: 'arborist_deleter' - description: '' - permissions: - - id: 'arborist_deleter' - action: - service: 'arborist' - method: 'delete' - - # requestor - - id: requestor_admin - permissions: - - id: requestor_admin_action - action: - service: requestor - method: '*' - - id: requestor_reader - permissions: - - id: requestor_reader_action - action: - service: requestor - method: read - - id: requestor_creator - permissions: - - id: requestor_creator_action - action: - service: requestor - method: create - - id: requestor_updater - permissions: - - id: requestor_updater_action - action: - service: requestor - method: update - - id: requestor_deleter - permissions: - - id: requestor_deleter_action - action: - service: requestor - method: delete - # argo - - id: argo_user - permissions: - - id: argo_access - action: - service: argo - method: access - #PCDC specific - #amanuensis - - id: 'amanuensis_admin' - description: 'can do admin work on project/data request' - permissions: - - id: 'amanuensis_admin_action' - action: - service: 'amanuensis' - method: '*' - clients: - basic-test-client: - policies: - - abc-admin - - gen3-admin - basic-test-abc-client: - policies: - - abc-admin - wts: - policies: - - all_programs_reader - - workspace - + method: read-storage + users: ### BEGIN INTERNS SECTION ### ### END INTERNS SECTION ### - qureshi@uchicago.edu: - admin: true - policies: - - data_upload - - workspace - - dashboard - - mds_admin - - prometheus - - sower - - services.sheepdog-admin - - programs.QA-admin - - programs.test-admin - - programs.DEV-admin - - programs.jnkns-admin - - indexd_admin - - ttyadmin - projects: - - auth_id: QA - privilege: [create, read, update, delete, upload, read-storage] - - auth_id: test - privilege: [create, read, update, delete, upload, read-storage] - - auth_id: DEV - privilege: [create, read, update, delete, upload, read-storage] - - auth_id: jenkins - privilege: [create, read, update, delete, upload, read-storage] - - auth_id: jenkins2 - privilege: [create, read, update, delete, upload, read-storage] - - auth_id: jnkns - privilege: [create, read, update, delete, upload, read-storage] pmurdoch@uchicago.edu: admin: true policies: - data_upload - workspace - - dashboard - - mds_admin - - prometheus - - sower - - services.sheepdog-admin - - services.amanuensis-admin - - programs.QA-admin - - programs.test-admin - - programs.DEV-admin - - programs.jnkns-admin - - indexd_admin - - ttyadmin - - data_admin - - analysis - - privacy_policy - - login_no_access - projects: - - auth_id: QA - privilege: [create, read, update, delete, upload, read-storage] - - auth_id: test - privilege: [create, read, update, delete, upload, read-storage] - - auth_id: DEV - privilege: [create, read, update, delete, upload, read-storage] - - auth_id: jenkins - privilege: [create, read, update, delete, upload, read-storage] - - auth_id: jenkins2 - privilege: [create, read, update, delete, upload, read-storage] - - auth_id: jnkns - privilege: [create, read, update, delete, upload, read-storage] - test@example.com: - admin: true - policies: - - data_upload - - workspace - - dashboard - - mds_admin - - prometheus - - sower - services.sheepdog-admin - services.amanuensis-admin - - programs.QA-admin - - programs.test-admin - - programs.DEV-admin - - programs.jnkns-admin - - indexd_admin - - ttyadmin + - services.fence-admin - data_admin - analysis - privacy_policy - login_no_access - projects: - - auth_id: QA - privilege: [create, read, update, delete, upload, read-storage] - - auth_id: test - privilege: [create, read, update, delete, upload, read-storage] - - auth_id: DEV - privilege: [create, read, update, delete, upload, read-storage] - - auth_id: jenkins - privilege: [create, read, update, delete, upload, read-storage] - - auth_id: jenkins2 - privilege: [create, read, update, delete, upload, read-storage] - - auth_id: jnkns - privilege: [create, read, update, delete, upload, read-storage] - furner.brian@gmail.com: - admin: true - policies: - - data_upload - - workspace - - dashboard - - mds_admin - - prometheus - sower - - services.sheepdog-admin - - services.amanuensis-admin - - programs.QA-admin - - programs.test-admin - - programs.DEV-admin - - programs.jnkns-admin - - indexd_admin - - ttyadmin - - data_admin - - analysis - - privacy_policy - - login_no_access - projects: - - auth_id: QA - privilege: [create, read, update, delete, upload, read-storage] - - auth_id: test - privilege: [create, read, update, delete, upload, read-storage] - - auth_id: DEV - privilege: [create, read, update, delete, upload, read-storage] - - auth_id: jenkins - privilege: [create, read, update, delete, upload, read-storage] - - auth_id: jenkins2 - privilege: [create, read, update, delete, upload, read-storage] - - auth_id: jnkns - privilege: [create, read, update, delete, upload, read-storage] + guppy: enabled: true image: repository: quay.io/pcdc/guppy - tag: 1.8.0 + tag: 1.9.1 authFilterField: "auth_resource_path" manifestservice: image: repository: quay.io/cdis/manifestservice - tag: 2023.08 + tag: "2025.01" pcdcanalysistools: image: repository: quay.io/pcdc/pcdcanalysistools - tag: 1.8.4 + tag: "1.8.9" peregrine: image: repository: quay.io/pcdc/peregrine - tag: "1.3.5" + tag: "1.3.10" portal: image: repository: quay.io/pcdc/windmill - tag: 1.25.0 + tag: 1.34.0 resources: requests: cpu: 1.0 gitops: - json: | - { - "gaTrackingId": "undefined", - "graphql": { - "boardCounts": [ - { - "graphql": "_person_count", - "name": "Person", - "plural": "Persons" - }, - { - "graphql": "_subject_count", - "name": "Subject", - "plural": "Subjects" - } - ], - "chartCounts": [ - { - "graphql": "_person_count", - "name": "Person" - }, - { - "graphql": "_subject_count", - "name": "Subject" - } - ], - "projectDetails": "boardCounts" - }, - "components": { - "appName": "Pediatric Cancer Data Commons Portal", - "index": { - "introduction": { - "heading": "Pediatric Cancer Data Commons", - "text": "The Pediatric Cancer Data Commons supports the management, analysis and sharing of data for the research community.", - "link": "/submission" - }, - "buttons": [ - { - "name": "Define Data Field", - "icon": "data-field-define", - "body": "The Pediatric Cancer Data Commons defines the data. Please study the dictionary before you start browsing.", - "link": "/DD", - "label": "Learn more" - }, - { - "name": "Explore Data", - "icon": "data-explore", - "body": "The Exploration Page gives you insights and a clear overview under selected factors.", - "link": "/explorer", - "label": "Explore data" - }, - { - "name": "Access Data", - "icon": "data-access", - "body": "Use our selected tool to filter out the data you need.", - "link": "/query", - "label": "Query data" - } - ], - "barChart": { - "showPercentage": true - } - }, - "navigation": { - "items": [ - { - "icon": "dictionary", - "link": "/DD", - "color": "#a2a2a2", - "name": "Dictionary" - }, - { - "icon": "exploration", - "link": "/explorer", - "color": "#a2a2a2", - "name": "Exploration" - }, - { - "icon": "query", - "link": "/query", - "color": "#a2a2a2", - "name": "Query" - } - ] - }, - "topBar": { - "items": [ - { - "icon": "external-link", - "leftOrientation": true, - "link": "https://commons.cri.uchicago.edu/pcdc/", - "name": "About PCDC" - }, - { - "icon": "external-link", - "leftOrientation": true, - "link": "https://commons.cri.uchicago.edu/sponsors/", - "name": "Our Sponsors" - } - ] - }, - "login": { - "title": "Pediatric Cancer Data Commons", - "subTitle": "Connect. Share. Cure.", - "text": "The Pediatric Cancer Data Commons (PCDC) harnesses pediatric cancer clinical data from around the globe into a single combined platform, connecting the data to other sources and making it available to clinicians and researchers everywhere. Headquartered at the University of Chicago, the PCDC team works with international leaders in pediatric cancers to develop and apply uniform data standards that facilitate the collection, combination, and analysis of data from many different sources. The PCDC currently houses the world's largest sets of clinical data for pediatric neuroblastoma and soft tissue sarcoma and is in the process of onboarding additional pediatric cancer disease groups.", - "contact": "If you have any questions about access or the registration process, please contact ", - "email": "pcdc_help@lists.uchicago.edu" - }, - "footerLogos": [ - { - "src": "/src/img/gen3.png", - "href": "https://ctds.uchicago.edu/gen3", - "alt": "Gen3 Data Commons", - "height": 40 - }, - { - "src": "/src/img/uchicago.png", - "href": "https://www.uchicago.edu/", - "alt": "The University of Chicago", - "height": 40 - } - ] - }, - "explorerConfig": [ - { - "id": 1, - "label": "data", - "charts": { - "sex": { - "chartType": "bar", - "title": "Sex" - }, - "race": { - "chartType": "bar", - "title": "Race" - }, - "ethnicity": { - "chartType": "bar", - "title": "Ethnicity" - }, - "consortium": { - "chartType": "bar", - "title": "Consortium" - } - }, - "filters": { - "anchor": { - "field": "disease_phase", - "options": ["Initial Diagnosis", "Relapse"], - "tabs": ["Disease", "Molecular", "Surgery", "Radiation", "Response", "SMN", "Imaging", "Labs", "SCT"] - }, - "tabs": [ - { - "title": "Subject", - "fields": [ - "consortium", - "data_contributor_id", - "studies.study_id", - "studies.treatment_arm", - "sex", - "race", - "ethnicity", - "year_at_disease_phase", - "survival_characteristics.lkss_obfuscated", - "censor_status", - "age_at_censor_status", - "medical_histories.medical_history", - "medical_histories.medical_history_status", - "external_references.external_resource_name" - ] - }, - { - "title": "Disease", - "fields": [ - "histologies.histology", - "histologies.histology_grade", - "histologies.histology_inpc", - "tumor_assessments.age_at_tumor_assessment", - "tumor_assessments.tumor_classification", - "tumor_assessments.tumor_site", - "tumor_assessments.tumor_state", - "tumor_assessments.longest_diam_dim1", - "tumor_assessments.depth", - "tumor_assessments.tumor_size", - "tumor_assessments.invasiveness", - "tumor_assessments.nodal_clinical", - "tumor_assessments.nodal_pathology", - "tumor_assessments.parameningeal_extension", - "tumor_assessments.necrosis", - "tumor_assessments.necrosis_pct", - "tumor_assessments.tumor_laterality", - "stagings.irs_group", - "stagings.tnm_finding", - "stagings.stage_system", - "stagings.stage", - "stagings.AB", - "stagings.E", - "stagings.S", - "disease_characteristics.mki", - "disease_characteristics.bulk_disease", - "disease_characteristics.BULK_MED_MASS", - "disease_characteristics.bulky_nodal_aggregate" - ] - }, - { - "title": "Molecular", - "fields": [ - "molecular_analysis.anaplasia", - "molecular_analysis.anaplasia_extent", - "molecular_analysis.molecular_abnormality", - "molecular_analysis.molecular_abnormality_result", - "molecular_analysis.gene1", - "molecular_analysis.gene2", - "molecular_analysis.dna_index", - "molecular_analysis.age_at_molecular_analysis", - "molecular_analysis.mitoses", - "molecular_analysis.cytodifferentiation" - ] - }, - { - "title": "Surgery", - "fields": [ - "biopsy_surgical_procedures.tumor_classification", - "biopsy_surgical_procedures.procedure_type", - "biopsy_surgical_procedures.margins" - ] - }, - { - "title": "Radiation", - "fields": [ - "radiation_therapies.tumor_classification", - "radiation_therapies.energy_type", - "radiation_therapies.rt_dose" - ] - }, - { - "title": "Response", - "fields": [ - "subject_responses.tx_prior_response", - "subject_responses.response", - "subject_responses.interim_response", - "subject_responses.response_method" - ] - }, - { - "title": "SMN", - "fields": [ - "secondary_malignant_neoplasm.age_at_smn", - "secondary_malignant_neoplasm.smn_site", - "secondary_malignant_neoplasm.smn_type", - "secondary_malignant_neoplasm.smn_morph_icdo" - ] - }, - { - "title": "Imaging", - "fields": [ - "imagings.imaging_method", - "imagings.imaging_result" - ] - }, - { - "title": "Labs", - "fields": [ - "labs.lab_test", - "labs.lab_result", - "labs.lab_result_numeric", - "labs.lab_result_unit" - ] - }, - { - "title": "SCT", - "fields": [ - "stem_cell_transplants.sct_type" - ] - } - ] - }, - "projectId": "search", - "graphqlField": "subject", - "index": "", - "buttons": [ - { - "enabled": true, - "type": "export-to-pfb", - "title": "Export to PFB", - "leftIcon": "datafile", - "rightIcon": "download" - }, - { - "enabled": false, - "type": "data", - "title": "Download Data", - "leftIcon": "user", - "rightIcon": "download", - "fileName": "data.json", - "tooltipText": "You can only download data accessible to you" - } - ], - "table": { - "enabled": true, - "fields": [ - "external_references.external_links", - "consortium", - "data_contributor_id", - "subject_submitter_id", - "sex", - "race", - "ethnicity", - "survival_characteristics.lkss", - "survival_characteristics.age_at_lkss" - ] - }, - "patientIds": { - "filter": false, - "export": true - }, - "survivalAnalysis": { - "result": { - "pval": false, - "risktable": true, - "survival": true - } - }, - "guppyConfig": { - "dataType": "subject", - "nodeCountTitle": "Subjects", - "fieldMapping": [ - { - "field": "data_contributor_id", - "name": "Data Contributor", - "tooltip": "Survival analysis is not allowed for this variable. Filtering by this variable will disable survival analysis." - }, - { - "field": "studies.study_id", - "name": "Study Id", - "tooltip": "Survival analysis is not allowed for this variable. Filtering by this variable will disable survival analysis." - }, - { - "field": "studies.treatment_arm", - "name": "Treatment Arm", - "tooltip": "Survival analysis is not allowed for this variable. Filtering by this variable will disable survival analysis." - }, - { - "field": "year_at_disease_phase", - "name": "Year at Initial Diagnosis" - }, - { - "field": "survival_characteristics.lkss", - "name": "Last Known Survival Status (LKSS)" - }, - { - "field": "survival_characteristics.lkss_obfuscated", - "name": "Last Known Survival Status (LKSS)" - }, - { - "field": "medical_histories.medical_history", - "name": "Medical History" - }, - { - "field": "medical_histories.medical_history_status", - "name": "Medical History Status" - }, - { - "field": "external_references.external_resource_name", - "name": "External Resource Name" - }, - { - "field": "histologies.histology", - "name": "Histology" - }, - { - "field": "histologies.histology_grade", - "name": "Histology Grade" - }, - { - "field": "histologies.histology_inpc", - "name": "INPC Classification" - }, - { - "field": "tumor_assessments.age_at_tumor_assessment", - "name": "Age at Tumor Assessment" - }, - { - "field": "tumor_assessments.tumor_classification", - "name": "Tumor Classification" - }, - { - "field": "tumor_assessments.tumor_site", - "name": "Tumor Site" - }, - { - "field": "tumor_assessments.tumor_state", - "name": "Tumor State" - }, - { - "field": "tumor_assessments.longest_diam_dim1", - "name": "Longest Diameter Dimension 1" - }, - { - "field": "tumor_assessments.depth", - "name": "Tumor Depth" - }, - { - "field": "tumor_assessments.tumor_size", - "name": "Tumor Size" - }, - { - "field": "tumor_assessments.invasiveness", - "name": "Invasiveness" - }, - { - "field": "tumor_assessments.nodal_clinical", - "name": "Nodal Clinical" - }, - { - "field": "tumor_assessments.nodal_pathology", - "name": "Nodal Pathology" - }, - { - "field": "tumor_assessments.parameningeal_extension", - "name": "Parameningeal Extension" - }, - { - "field": "tumor_assessments.necrosis", - "name": "Necrosis" - }, - { - "field": "tumor_assessments.necrosis_pct", - "name": "Necrosis PCT" - }, - { - "field": "tumor_assessments.tumor_laterality", - "name": "Tumor Laterality" - }, - { - "field": "stagings.irs_group", - "name": "IRS Group" - }, - { - "field": "stagings.tnm_finding", - "name": "TNM Finding" - }, - { - "field": "stagings.stage_system", - "name": "Stage System" - }, - { - "field": "stagings.stage", - "name": "Stage" - }, - { - "field": "stagings.AB", - "name": "Ann Arbor AB" - }, - { - "field": "stagings.E", - "name": "Ann Arbor E" - }, - { - "field": "stagings.S", - "name": "Ann Arbor S" - }, - { - "field": "disease_characteristics.mki", - "name": "MKI" - }, - { - "field": "disease_characteristics.bulk_disease", - "name": "Bulky Disease" - }, - { - "field": "disease_characteristics.BULK_MED_MASS", - "name": "Bulky Mediastinal Mass" - }, - { - "field": "disease_characteristics.bulky_nodal_aggregate", - "name": "Bulky Nodal Aggregate" - }, - { - "field": "molecular_analysis.anaplasia", - "name": "Anaplasia" - }, - { - "field": "molecular_analysis.anaplasia_extent", - "name": "Anaplasia Extent" - }, - { - "field": "molecular_analysis.molecular_abnormality", - "name": "Molecular Abnormality" - }, - { - "field": "molecular_analysis.molecular_abnormality_result", - "name": "Molecular Abnormality Result" - }, - { - "field": "molecular_analysis.gene1", - "name": "Gene 1" - }, - { - "field": "molecular_analysis.gene2", - "name": "Gene 2" - }, - { - "field": "molecular_analysis.dna_index", - "name": "DNA Index" - }, - { - "field": "molecular_analysis.age_at_molecular_analysis", - "name": "Age at Molecular Analysis" - }, - { - "field": "molecular_analysis.mitoses", - "name": "Mitoses" - }, - { - "field": "molecular_analysis.cytodifferentiation", - "name": "Cytodifferentiation" - }, - { - "field": "biopsy_surgical_procedures.tumor_classification", - "name": "Tumor Classification" - }, - { - "field": "biopsy_surgical_procedures.procedure_type", - "name": "Procedure Type" - }, - { - "field": "biopsy_surgical_procedures.procedure_site", - "name": "Procedure Site" - }, - { - "field": "biopsy_surgical_procedures.margins", - "name": "Margins" - }, - { - "field": "radiation_therapies.tumor_classification", - "name": "Tumor Classification" - }, - { - "field": "radiation_therapies.age_at_rt_start", - "name": "Age at Radiation Therapy" - }, - { - "field": "radiation_therapies.rt_site", - "name": "Radiation Site" - }, - { - "field": "radiation_therapies.energy_type", - "name": "Energy Type" - }, - { - "field": "radiation_therapies.rt_dose", - "name": "Radiation Dose" - }, - { - "field": "radiation_therapies.rt_unit", - "name": "Radiation Unit" - }, - { - "field": "subject_responses.age_at_response", - "name": "Age at Response" - }, - { - "field": "subject_responses.tx_prior_response", - "name": "Treatment Prior Response" - }, - { - "field": "subject_responses.response", - "name": "Response" - }, - { - "field": "subject_responses.interim_response", - "name": "Interim Response" - }, - { - "field": "subject_responses.response_method", - "name": "Response Method" - }, - { - "field": "subject_responses.necrosis", - "name": "Necrosis" - }, - { - "field": "secondary_malignant_neoplasm.age_at_smn", - "name": "Age at SMN" - }, - { - "field": "secondary_malignant_neoplasm.smn_site", - "name": "SMN Site" - }, - { - "field": "secondary_malignant_neoplasm.smn_type", - "name": "SMN Type" - }, - { - "field": "secondary_malignant_neoplasm.smn_morph_icdo", - "name": "ICD-O Morphology" - }, - { - "field": "imagings.imaging_method", - "name": "Imaging Method" - }, - { - "field": "imagings.imaging_result", - "name": "Imaging Result" - }, - { - "field": "labs.lab_result_numeric", - "name": "Numeric Lab Result" - }, - { - "field": "labs.lab_result_unit", - "name": "Lab Result Unit" - }, - { - "field": "labs.lab_result", - "name": "Lab Result" - }, - { - "field": "labs.lab_test", - "name": "Lab Test" - }, - { - "field": "stem_cell_transplants.sct_type", - "name": "SCT Type" - } - ] - }, - "dataRequests": { - "enabled": false - }, - "getAccessButtonLink": "https://docs.pedscommons.org/PCDCProjectRequestForm/" - }, - { - "id": 2, - "label": "data - survival", - "charts": { - "sex": { - "chartType": "bar", - "title": "Sex" - }, - "race": { - "chartType": "bar", - "title": "Race" - }, - "ethnicity": { - "chartType": "bar", - "title": "Ethnicity" - } - }, - "adminAppliedPreFilters": { - "consortium": { - "selectedValues": ["INSTRuCT"] - } - }, - "filters": { - "anchor": { - "field": "disease_phase", - "options": ["Initial Diagnosis", "Relapse"], - "tabs": ["Disease", "Molecular"], - "tooltip": "You can describe this filter here" - }, - "tabs": [ - { - "title": "Subject", - "fields": [ - "consortium", - "sex", - "race", - "ethnicity", - "survival_characteristics.lkss", - "survival_characteristics.age_at_lkss" - ] - }, - { - "title": "Disease", - "fields": [ - "histologies.histology", - "tumor_assessments.age_at_tumor_assessment", - "tumor_assessments.tumor_classification", - "tumor_assessments.tumor_site", - "tumor_assessments.longest_diam_dim1", - "tumor_assessments.invasiveness", - "tumor_assessments.nodal_clinical", - "tumor_assessments.nodal_pathology", - "tumor_assessments.parameningeal_extension", - "stagings.irs_group", - "stagings.tnm_finding" - ] - }, - { - "title": "Molecular", - "fields": [ - "molecular_analysis.anaplasia", - "molecular_analysis.anaplasia_extent", - "molecular_analysis.molecular_abnormality", - "molecular_analysis.molecular_abnormality_result", - "molecular_analysis.gene1", - "molecular_analysis.gene2" - ] - } - ] - }, - "projectId": "search", - "graphqlField": "subject", - "index": "", - "buttons": [ - { - "enabled": true, - "type": "export-to-pfb", - "title": "Export to PFB", - "leftIcon": "datafile", - "rightIcon": "download" - }, - { - "enabled": false, - "type": "data", - "title": "Download Data", - "leftIcon": "user", - "rightIcon": "download", - "fileName": "data.json", - "tooltipText": "You can only download data accessible to you" - } - ], - "table": { - "enabled": true, - "fields": [ - "external_references.external_links", - "consortium", - "data_contributor_id", - "subject_submitter_id", - "sex", - "race", - "ethnicity", - "survival_characteristics.lkss", - "survival_characteristics.age_at_lkss" - ] - }, - "patientIds": { - "filter": false, - "export": true - }, - "survivalAnalysis": { - "result": { - "pval": false, - "risktable": true, - "survival": true - } - }, - "guppyConfig": { - "dataType": "subject", - "nodeCountTitle": "Subjects", - "fieldMapping": [ - { - "field": "survival_characteristics.lkss", - "name": "Last Known Survival Status (LKSS)", - "tooltip": "test tooltip" - }, - { - "field": "survival_characteristics.age_at_lkss", - "name": "Age at LKSS" - }, - { - "field": "external_references.external_resource_name", - "name": "External Resource Name" - }, - { - "field": "tumor_assessments.age_at_tumor_assessment", - "name": "Age at Tumor Assessment" - }, - { - "field": "tumor_assessments.tumor_classification", - "name": "Tumor Classification" - }, - { - "field": "tumor_assessments.tumor_site", - "name": "Tumor Site" - }, - { - "field": "tumor_assessments.tumor_size", - "name": "Tumor Size" - }, - { - "field": "tumor_assessments.longest_diam_dim1", - "name": "Longest Diameter Dimension 1" - }, - { - "field": "tumor_assessments.invasiveness", - "name": "Invasiveness" - }, - { - "field": "tumor_assessments.nodal_clinical", - "name": "Nodal Clinical" - }, - { - "field": "tumor_assessments.nodal_pathology", - "name": "Nodal Pathology" - }, - { - "field": "tumor_assessments.parameningeal_extension", - "name": "Parameningeal Extension" - }, - { - "field": "histologies.histology", - "name": "Histology" - }, - { - "field": "histologies.histology_grade", - "name": "Histology Grade" - }, - { - "field": "histologies.histology_inpc", - "name": "Histology Inpc" - }, - { - "field": "molecular_analysis.anaplasia", - "name": "Anaplasia" - }, - { - "field": "molecular_analysis.anaplasia_extent", - "name": "Anaplasia Extent" - }, - { - "field": "molecular_analysis.molecular_abnormality", - "name": "Molecular Abnormality" - }, - { - "field": "molecular_analysis.molecular_abnormality_result", - "name": "Molecular Abnormality Result" - }, - { - "field": "molecular_analysis.gene1", - "name": "Gene 1" - }, - { - "field": "molecular_analysis.gene2", - "name": "Gene 2" - }, - { - "field": "project_id", - "name": "Data Release Version" - }, - { - "field": "stagings.irs_group", - "name": "IRS Group" - }, - { - "field": "stagings.tnm_finding", - "name": "TNM Finding" - } - ] - }, - "dataRequests": { - "enabled": false - }, - "getAccessButtonLink": "https://docs.pedscommons.org/PCDCProjectRequestForm/" - } - ] - } + json: "" revproxy: image: repository: quay.io/cdis/nginx - tag: 2023.09 + tag: "1.17.6-ctds-1.0.1" sheepdog: image: repository: quay.io/pcdc/sheepdog - tag: "1.5.6" + tag: "1.5.10" sower: image: repository: quay.io/cdis/sower - tag: 2024.04 + tag: "2025.01" wts: + enabled: false image: repository: quay.io/cdis/workspace-token-service - tag: 2024.04 - -elasticsearch: - enabled: true + tag: 2025.01 ######################################################################################## # DISABLED SERVICES # ######################################################################################## -gearbox: - enabled: false - -gearbox-middleware: - enabled: false ambassador: # -- (bool) Whether to deploy the ambassador subchart. From b8555e22be39eb6e7bf7149cc51f537038921418 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Fri, 14 Feb 2025 16:11:57 -0800 Subject: [PATCH 027/126] update amanuensis to fill config and creds secrets correctly and set up jwt between amanuensis and fence --- .../amanuensis-secret/config_helper.py | 126 ++++++- .../amanuensis-clear-filter-set-cronjob.yaml | 34 -- .../templates/amanuensis-config-job.yaml | 87 +++++ .../templates/amanuensis-config.yaml | 11 - .../templates/amanuensis-creds-job.yaml | 92 +++++ .../templates/amanuensis-creds.yaml | 18 - .../templates/amanuensis-db-migrate-job.yaml | 34 -- .../templates/amanuensis-jobs-sa.yaml | 26 ++ helm/amanuensis/values.yaml | 318 +----------------- helm/fence/templates/presigned-url-fence.yaml | 2 +- helm/fence/templates/useryaml-job.yaml | 2 +- helm/fence/values.yaml | 14 + pcdc-default-values.yaml | 21 +- tools/roll.sh | 33 +- 14 files changed, 391 insertions(+), 427 deletions(-) create mode 100644 helm/amanuensis/templates/amanuensis-config-job.yaml delete mode 100644 helm/amanuensis/templates/amanuensis-config.yaml create mode 100644 helm/amanuensis/templates/amanuensis-creds-job.yaml delete mode 100644 helm/amanuensis/templates/amanuensis-creds.yaml create mode 100644 helm/amanuensis/templates/amanuensis-jobs-sa.yaml diff --git a/helm/amanuensis/amanuensis-secret/config_helper.py b/helm/amanuensis/amanuensis-secret/config_helper.py index 081ef64d2..132de4b62 100644 --- a/helm/amanuensis/amanuensis-secret/config_helper.py +++ b/helm/amanuensis/amanuensis-secret/config_helper.py @@ -48,6 +48,55 @@ def load_json(file_name, app_name, search_folders=None): return json.load(reader) +def inject_creds_into_fence_config(creds_file_path, config_file_path): + creds_file = open(creds_file_path, "r") + creds = json.load(creds_file) + creds_file.close() + + # get secret values from creds.json file + db_host = _get_nested_value(creds, "db_host") + db_username = _get_nested_value(creds, "db_username") + db_password = _get_nested_value(creds, "db_password") + db_database = _get_nested_value(creds, "db_database") + hostname = _get_nested_value(creds, "hostname") + indexd_password = _get_nested_value(creds, "indexd_password") + google_client_secret = _get_nested_value(creds, "google_client_secret") + google_client_id = _get_nested_value(creds, "google_client_id") + hmac_key = _get_nested_value(creds, "hmac_key") + db_path = "postgresql://{}:{}@{}:5432/{}".format( + db_username, db_password, db_host, db_database + ) + + config_file = open(config_file_path, "r").read() + + print(" DB injected with value(s) from creds.json") + config_file = _replace(config_file, "DB", db_path) + + print(" BASE_URL injected with value(s) from creds.json") + config_file = _replace(config_file, "BASE_URL", "https://{}/user".format(hostname)) + + print(" INDEXD_PASSWORD injected with value(s) from creds.json") + config_file = _replace(config_file, "INDEXD_PASSWORD", indexd_password) + config_file = _replace(config_file, "INDEXD_USERNAME", "fence") + + print(" ENCRYPTION_KEY injected with value(s) from creds.json") + config_file = _replace(config_file, "ENCRYPTION_KEY", hmac_key) + + print( + " OPENID_CONNECT/google/client_secret injected with value(s) " + "from creds.json" + ) + config_file = _replace( + config_file, "OPENID_CONNECT/google/client_secret", google_client_secret + ) + + print(" OPENID_CONNECT/google/client_id injected with value(s) from creds.json") + config_file = _replace( + config_file, "OPENID_CONNECT/google/client_id", google_client_id + ) + + open(config_file_path, "w+").write(config_file) + def inject_creds_into_amanuensis_config(creds_file_path, config_file_path): creds_file = open(creds_file_path, "r") creds = json.load(creds_file) @@ -88,11 +137,16 @@ def inject_creds_into_amanuensis_config(creds_file_path, config_file_path): config_file, "AWS_CREDENTIALS/DATA_DELIVERY_S3_BUCKET/aws_secret_access_key", data_delivery_bucket_aws_access_key ) - print(" AWS_CREDENTIALS/DATA_DELIVERY_S3_BUCKET injected with value(s) from creds.json") + print(" AWS_CREDENTIALS/DATA_DELIVERY_S3_BUCKET/bucket_name injected with value(s) from creds.json") config_file = _replace( - config_file, "AWS_CREDENTIALS/DATA_DELIVERY_S3_BUCKET", data_delivery_bucket, key_only=True + config_file, "AWS_CREDENTIALS/DATA_DELIVERY_S3_BUCKET/bucket_name", data_delivery_bucket ) + # modify USER_API to http://user-service/ if hostname is localhost + + if hostname == "localhost": + print(" USER_API set to http://fence-service/") + config_file = _replace(config_file, "USER_API", "http://fence-service/") # print(" ENCRYPTION_KEY injected with value(s) from creds.json") # config_file = _replace(config_file, "ENCRYPTION_KEY", hmac_key) @@ -100,9 +154,29 @@ def inject_creds_into_amanuensis_config(creds_file_path, config_file_path): open(config_file_path, "w+").write(config_file) -def set_prod_defaults_amanuensis(config_file_path): +def set_prod_defaults(config_file_path): config_file = open(config_file_path, "r").read() + print( + " CIRRUS_CFG/GOOGLE_APPLICATION_CREDENTIALS set as " + "var/www/fence/fence_google_app_creds_secret.json" + ) + config_file = _replace( + config_file, + "CIRRUS_CFG/GOOGLE_APPLICATION_CREDENTIALS", + "/var/www/fence/fence_google_app_creds_secret.json", + ) + + print( + " CIRRUS_CFG/GOOGLE_STORAGE_CREDS set as " + "var/www/fence/fence_google_storage_creds_secret.json" + ) + config_file = _replace( + config_file, + "CIRRUS_CFG/GOOGLE_STORAGE_CREDS", + "/var/www/fence/fence_google_storage_creds_secret.json", + ) + print(" INDEXD set as http://indexd-service/") config_file = _replace(config_file, "INDEXD", "http://indexd-service/") @@ -135,6 +209,40 @@ def set_prod_defaults_amanuensis(config_file_path): open(config_file_path, "w+").write(config_file) +def set_prod_defaults_amanuensis(config_file_path): + config_file = open(config_file_path, "r").read() + + print(" INDEXD set as http://indexd-service/") + config_file = _replace(config_file, "INDEXD", "http://indexd-service/") + + print(" ARBORIST set as http://arborist-service/") + config_file = _replace(config_file, "ARBORIST", "http://arborist-service/") + + print(" HTTP_PROXY/host set as cloud-proxy.internal.io") + config_file = _replace(config_file, "HTTP_PROXY/host", "cloud-proxy.internal.io") + + print(" HTTP_PROXY/port set as 3128") + config_file = _replace(config_file, "HTTP_PROXY/port", 3128) + + print(" DEBUG set to false") + config_file = _replace(config_file, "DEBUG", False) + + print(" MOCK_AUTH set to false") + config_file = _replace(config_file, "MOCK_AUTH", False) + + print(" MOCK_GOOGLE_AUTH set to false") + config_file = _replace(config_file, "MOCK_GOOGLE_AUTH", False) + + print(" AUTHLIB_INSECURE_TRANSPORT set to true") + config_file = _replace(config_file, "AUTHLIB_INSECURE_TRANSPORT", True) + + print(" SESSION_COOKIE_SECURE set to true") + config_file = _replace(config_file, "SESSION_COOKIE_SECURE", True) + + print(" ENABLE_CSRF_PROTECTION set to true") + config_file = _replace(config_file, "ENABLE_CSRF_PROTECTION", True) + + open(config_file_path, "w+").write(config_file) def inject_other_files_into_fence_config(other_files, config_file_path): additional_cfgs = _get_all_additional_configs(other_files) @@ -251,7 +359,7 @@ def _replace(yaml_config, path_to_key, replacement_value, start=0, nested_level= return yaml_config # set new start point to past current match and move on to next match - start = matches.end(0) + start = start + matches.end(0) nested_level += 1 del nested_path_to_replace[0] @@ -351,7 +459,7 @@ def _get_nested_value(dictionary, nested_path): parser.add_argument( "--other_files_to_inject", nargs="+", - help="amanuensis_credentials.json, local_settings.py, amanuensis_settings.py file(s) to " + help="fence_credentials.json, local_settings.py, fence_settings.py file(s) to " "inject into the configuration yaml", ) parser.add_argument( @@ -359,8 +467,12 @@ def _get_nested_value(dictionary, nested_path): ) args = parser.parse_args() - inject_creds_into_amanuensis_config(args.creds_file_to_inject, args.config_file) - set_prod_defaults_amanuensis(args.config_file) + if args.config_file == "new-amanuensis-config.yaml": + inject_creds_into_amanuensis_config(args.creds_file_to_inject, args.config_file) + set_prod_defaults_amanuensis(args.config_file) + else: + inject_creds_into_fence_config(args.creds_file_to_inject, args.config_file) + set_prod_defaults(args.config_file) if args.other_files_to_inject: inject_other_files_into_fence_config( diff --git a/helm/amanuensis/templates/amanuensis-clear-filter-set-cronjob.yaml b/helm/amanuensis/templates/amanuensis-clear-filter-set-cronjob.yaml index e1a4e41eb..6f8b48958 100644 --- a/helm/amanuensis/templates/amanuensis-clear-filter-set-cronjob.yaml +++ b/helm/amanuensis/templates/amanuensis-clear-filter-set-cronjob.yaml @@ -36,40 +36,6 @@ spec: image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} env: - - name: PGHOST - valueFrom: - secretKeyRef: - name: amanuensis-dbcreds - key: host - optional: false - - name: PGUSER - valueFrom: - secretKeyRef: - name: amanuensis-dbcreds - key: username - optional: false - - name: PGPASSWORD - valueFrom: - secretKeyRef: - name: amanuensis-dbcreds - key: password - optional: false - - name: PGDB - valueFrom: - secretKeyRef: - name: amanuensis-dbcreds - key: database - optional: false - - name: DBREADY - valueFrom: - secretKeyRef: - name: amanuensis-dbcreds - key: dbcreated - optional: false - - name: DB - value: postgresql://$(PGUSER):$(PGPASSWORD)@$(PGHOST):5432/$(PGDB) - - name: AMANUENSIS_DB - value: postgresql://$(PGUSER):$(PGPASSWORD)@$(PGHOST):5432/$(PGDB) - name: PYTHONPATH value: /var/www/amanuensis - name: AMANUENSIS_PUBLIC_CONFIG diff --git a/helm/amanuensis/templates/amanuensis-config-job.yaml b/helm/amanuensis/templates/amanuensis-config-job.yaml new file mode 100644 index 000000000..9a4cb9984 --- /dev/null +++ b/helm/amanuensis/templates/amanuensis-config-job.yaml @@ -0,0 +1,87 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: amanuensis-config + labels: + redeploy-hash: "{{ .Release.Revision }}" +spec: + backoffLimit: 0 + template: + metadata: + labels: + app: gen3job + spec: + serviceAccountName: "amanuensis-jobs" + volumes: + - name: shared-data + emptyDir: {} + - name: creds-volume + secret: + secretName: "amanuensis-creds" + - name: config-helper + secret: + secretName: "amanuensis-secret" + containers: + - name: amanuensis + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: Never + volumeMounts: + - name: "creds-volume" + readOnly: true + mountPath: "/var/www/amanuensis/creds.json" + subPath: creds.json + - name: "config-helper" + readOnly: true + mountPath: "/var/www/amanuensis/config_helper.py" + subPath: config_helper.py + - name: shared-data + mountPath: /mnt/shared + command: ["/bin/bash"] + args: + - "-c" + # Script always succeeds if it runs (echo exits with 0) + - | + echo "generating default amanuensis configuration..." + python /amanuensis/cfg_help.py create --config_path new-amanuensis-config.yaml + + if [[ -f /var/www/amanuensis/creds.json ]]; then + echo "" + echo "injecting creds.json into amanuensis configuration..." + python /var/www/amanuensis/config_helper.py -i /var/www/amanuensis/creds.json -c new-amanuensis-config.yaml + else + echo "ERROR: /var/www/amanuensis/creds.json not found!" + echo " Only generating default config..." + fi + + cp new-amanuensis-config.yaml /mnt/shared/new-amanuensis-config.yaml + + - name: awshelper + image: "quay.io/cdis/awshelper:master" + imagePullPolicy: Always + volumeMounts: + - name: shared-data + mountPath: /mnt/shared + command: ["/bin/bash"] + args: + - "-c" + - | + # wait for other image to generate config + let count=0 + while [[ ! -f /mnt/shared/new-amanuensis-config.yaml && $count -lt 50 ]]; do + echo "waiting for /mnt/shared/new-amanuensis-config.yaml" + sleep 2 + let count=$count+1 + done + + if [[ -f /mnt/shared/new-amanuensis-config.yaml ]]; then + # load yaml file into secrets + if kubectl get secrets/amanuensis-config > /dev/null 2>&1; then + kubectl delete secret amanuensis-config + fi + echo "saving amanuensis configuration into amanuensis-config secret..." + kubectl create secret generic amanuensis-config "--from-file=amanuensis-config.yaml=/mnt/shared/new-amanuensis-config.yaml" + else + echo "/mnt/shared/new-amanuensis-config.yaml did not appear within timeout :-(" + fi + + restartPolicy: Never diff --git a/helm/amanuensis/templates/amanuensis-config.yaml b/helm/amanuensis/templates/amanuensis-config.yaml deleted file mode 100644 index 6939d1d72..000000000 --- a/helm/amanuensis/templates/amanuensis-config.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: amanuensis-config -stringData: - amanuensis-config.yaml: | - BASE_URL: https://{{ .Values.global.hostname }}/amanuensis - DB: postgresql://{{ .Files.Get "secrets/amanuensis-dbcreds/username" | default "" }}:{{ .Files.Get "secrets/amanuensis-dbcreds/password" | default "" }}@{{ .Files.Get "secrets/amanuensis-dbcreds/host" | default "" }}:5432/{{ .Files.Get "secrets/amanuensis-dbcreds/database" | default "" }} - {{- with .Values.AMANUENSIS_CONFIG }} - {{- toYaml . | nindent 4 }} - {{ end }} \ No newline at end of file diff --git a/helm/amanuensis/templates/amanuensis-creds-job.yaml b/helm/amanuensis/templates/amanuensis-creds-job.yaml new file mode 100644 index 000000000..b7b146d6a --- /dev/null +++ b/helm/amanuensis/templates/amanuensis-creds-job.yaml @@ -0,0 +1,92 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: amanuensis-creds + labels: + redeploy-hash: "{{ .Release.Revision }}" +spec: + backoffLimit: 0 + template: + metadata: + labels: + app: gen3job + spec: + serviceAccountName: "amanuensis-jobs" + volumes: + - name: shared-data + emptyDir: {} + containers: + - name: awshelper + image: "quay.io/cdis/awshelper:master" + imagePullPolicy: Always + env: + - name: PGPASSWORD + valueFrom: + secretKeyRef: + name: amanuensis-dbcreds + key: password + optional: false + - name: PGUSER + valueFrom: + secretKeyRef: + name: amanuensis-dbcreds + key: username + optional: false + - name: PGDB + valueFrom: + secretKeyRef: + name: amanuensis-dbcreds + key: database + optional: false + - name: PGHOST + valueFrom: + secretKeyRef: + name: amanuensis-dbcreds + key: host + optional: false + - name: PGPORT + valueFrom: + secretKeyRef: + name: amanuensis-dbcreds + key: port + optional: false + - name: DBREADY + valueFrom: + secretKeyRef: + name: amanuensis-dbcreds + key: dbcreated + optional: false + - name: HOSTNAME + value: "{{ .Values.global.hostname }}" + - name: DATA_DOWNLOAD_BUCKET + value: "{{ .Values.AWS_CREDENTIALS.DATA_DELIVERY_S3_BUCKET.bucket_name }}" + - name: AWS_ACCESS_KEY_ID + value: "{{ .Values.AWS_CREDENTIALS.DATA_DELIVERY_S3_BUCKET.aws_access_key_id }}" + - name: AWS_SECRET_ACCESS_KEY + value: "{{ .Values.AWS_CREDENTIALS.DATA_DELIVERY_S3_BUCKET.aws_secret_access_key }}" + volumeMounts: + - name: shared-data + mountPath: /mnt/shared + command: ["/bin/bash"] + args: + - "-c" + - | + if kubectl get secret amanuensis-creds; then + kubectl delete secret amanuensis-creds + fi + cat < /mnt/shared/creds.json + { + "db_host": "${PGHOST}", + "db_username": "${PGUSER}", + "db_password": "${PGPASSWORD}", + "db_database": "${PGDB}", + "hostname": "${HOSTNAME}", + "indexd_password": "", + "data_delivery_bucket": "${DATA_DOWNLOAD_BUCKET}", + "data_delivery_bucket_aws_key_id": "${AWS_ACCESS_KEY_ID}", + "data_delivery_bucket_aws_access_key": "${AWS_SECRET_ACCESS_KEY}" + } + EOF + + kubectl create secret generic amanuensis-creds --from-file=/mnt/shared/creds.json + restartPolicy: Never diff --git a/helm/amanuensis/templates/amanuensis-creds.yaml b/helm/amanuensis/templates/amanuensis-creds.yaml deleted file mode 100644 index 638e1fe0b..000000000 --- a/helm/amanuensis/templates/amanuensis-creds.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: amanuensis-creds -type: Opaque -stringData: - creds.json: |- - { - "db_host": "{{ include "gen3.service-postgres" (dict "key" "host" "service" $.Chart.Name "context" $) }}", - "db_username": "{{include "gen3.service-postgres" (dict "key" "username" "service" $.Chart.Name "context" $) }}", - "db_password": "{{include "gen3.service-postgres" (dict "key" "password" "service" $.Chart.Name "context" $) }}", - "db_database": "{{ include "gen3.service-postgres" (dict "key" "database" "service" $.Chart.Name "context" $)}}", - "hostname": "{{ .Values.global.hostname }}", - "indexd_password": "", - "data_delivery_bucket": {{ .Values.AMANUENSIS_CONFIG.DATA_DOWNLOAD_BUCKET }}, - "data_delivery_bucket_aws_key_id": {{ .Values.AMANUENSIS_CONFIG.AWS_CREDENTIALS.aws_access_key_id }}, - "data_delivery_bucket_aws_access_key": {{ .Values.AMANUENSIS_CONFIG.AWS_CREDENTIALS.aws_secret_access_key }} - } diff --git a/helm/amanuensis/templates/amanuensis-db-migrate-job.yaml b/helm/amanuensis/templates/amanuensis-db-migrate-job.yaml index 9831fce14..8894b54c6 100644 --- a/helm/amanuensis/templates/amanuensis-db-migrate-job.yaml +++ b/helm/amanuensis/templates/amanuensis-db-migrate-job.yaml @@ -28,40 +28,6 @@ spec: image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} env: - - name: PGHOST - valueFrom: - secretKeyRef: - name: amanuensis-dbcreds - key: host - optional: false - - name: PGUSER - valueFrom: - secretKeyRef: - name: amanuensis-dbcreds - key: username - optional: false - - name: PGPASSWORD - valueFrom: - secretKeyRef: - name: amanuensis-dbcreds - key: password - optional: false - - name: PGDB - valueFrom: - secretKeyRef: - name: amanuensis-dbcreds - key: database - optional: false - - name: DBREADY - valueFrom: - secretKeyRef: - name: amanuensis-dbcreds - key: dbcreated - optional: false - - name: DB - value: postgresql://$(PGUSER):$(PGPASSWORD)@$(PGHOST):5432/$(PGDB) - - name: AMANUENSIS_DB - value: postgresql://$(PGUSER):$(PGPASSWORD)@$(PGHOST):5432/$(PGDB) - name: PYTHONPATH value: /var/www/amanuensis - name: AMANUENSIS_PUBLIC_CONFIG diff --git a/helm/amanuensis/templates/amanuensis-jobs-sa.yaml b/helm/amanuensis/templates/amanuensis-jobs-sa.yaml new file mode 100644 index 000000000..7d32e3b4b --- /dev/null +++ b/helm/amanuensis/templates/amanuensis-jobs-sa.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: amanuensis-jobs +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: amanuensis-jobs-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch", "create", "update", "delete"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: amanuensis-jobs-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: amanuensis-jobs-role +subjects: + - kind: ServiceAccount + name: amanuensis-jobs + namespace: default \ No newline at end of file diff --git a/helm/amanuensis/values.yaml b/helm/amanuensis/values.yaml index aeb569296..ea123ac1e 100644 --- a/helm/amanuensis/values.yaml +++ b/helm/amanuensis/values.yaml @@ -178,38 +178,6 @@ env: name: manifest-amanuensis key: amanuensis-config-public.yaml optional: true - - name: PGHOST - valueFrom: - secretKeyRef: - name: amanuensis-dbcreds - key: host - optional: false - - name: PGUSER - valueFrom: - secretKeyRef: - name: amanuensis-dbcreds - key: username - optional: false - - name: PGPASSWORD - valueFrom: - secretKeyRef: - name: amanuensis-dbcreds - key: password - optional: false - - name: PGDB - valueFrom: - secretKeyRef: - name: amanuensis-dbcreds - key: database - optional: false - - name: DBREADY - valueFrom: - secretKeyRef: - name: amanuensis-dbcreds - key: dbcreated - optional: false - - name: DB - value: postgresql://$(PGUSER):$(PGPASSWORD)@$(PGHOST):5432/$(PGDB) # -- (list) Volumes to attach to the container. volumes: @@ -278,286 +246,14 @@ datadogProfilingEnabled: true # -- (int) A value between 0 and 1, that represents the percentage of requests that will be traced. For example, a value of 0.5 means that 50% of requests will be traced. datadogTraceSampleRate: 1 - -AMANUENSIS_CONFIG: - APP_NAME: 'Gen3 Data Commons' - HOSTNAME: 'localhost' - # Where amanuensis microservice is deployed - # a standardized name unique to each app for service-to-service interaction - # so the service receiving the request knows it came from another Gen3 service - SERVICE_NAME: 'amanuensis' - # postgres db to connect to - # connection url format: - # postgresql://[user[:password]@][netloc][:port][/dbname] - # A URL-safe base64-encoded 32-byte key for encrypting keys in db - # in python you can use the following script to generate one: - # import base64 - # import os - # key = base64.urlsafe_b64encode(os.urandom(32)) - # print(key) - - ENCRYPTION_KEY: '' - - # Cross-service keys - # Private key for signing requests sent to other Gen3 services - PRIVATE_KEY_PATH: '/var/www/amanuensis/jwt_private_key.pem' - - # ////////////////////////////////////////////////////////////////////////////////////// - # DEBUG & SECURITY SETTINGS - # - Modify based on whether you're in a dev environment or in production - # ////////////////////////////////////////////////////////////////////////////////////// - # flask's debug setting - # WARNING: DO NOT ENABLE IN PRODUCTION (for testing purposes only) - DEBUG: false - # if true, will automatically login a user with username "test" - # WARNING: DO NOT ENABLE IN PRODUCTION (for testing purposes only) - MOCK_AUTH: false - # if true, will fake a successful login response from Google in /login/google - # NOTE: this will also modify the behavior of /link/google endpoints - # WARNING: DO NOT ENABLE IN PRODUCTION (for testing purposes only) - # will login as the username set in cookie DEV_LOGIN_COOKIE_NAME - MOCK_GOOGLE_AUTH: false - # if true, will ignore anything configured in STORAGE_CREDENTIALS - MOCK_STORAGE: true - # allow OIDC traffic on http for development. By default it requires https. - # - # WARNING: ONLY set to true when amanuensis will be deployed in such a way that it will - # ONLY receive traffic from internal clients and can safely use HTTP. - AUTHLIB_INSECURE_TRANSPORT: false - - # set if you want browsers to only send cookies with requests over HTTPS - SESSION_COOKIE_SECURE: true - - ENABLE_CSRF_PROTECTION: true - - OIDC_ISSUER: 'https://{{HOSTNAME}}/user' - - OAUTH2: - client_id: 'oauth2_client_id' - client_secret: 'oauth2_client_secret' - api_base_url: 'https://{{HOSTNAME}}/user/' - authorize_url: 'https://{{HOSTNAME}}/user/oauth2/authorize' - access_token_url: 'https://{{HOSTNAME}}/user/oauth2/token' - refresh_token_url: 'https://{{HOSTNAME}}/user/oauth2/token' - client_kwargs: - # redirect_uri: 'https://{{HOSTNAME}}/api/v0/oauth2/authorize' - redirect_uri: 'https://{{HOSTNAME}}/amanuensis/oauth2/authorize' - scope: 'openid data user' - # deprecated key values, should be removed after all commons use new oidc - internal_oauth_provider: 'http://fence-service/oauth2/' - oauth_provider: 'https://{{HOSTNAME}}/user/oauth2/' - # redirect_uri: 'https://{{HOSTNAME}}/api/v0/oauth2/authorize' - redirect_uri: 'https://{{HOSTNAME}}/amanuensis/oauth2/authorize' - - USER_API: 'http://fence-service/' - # option to force authutils to prioritize USER_API setting over the issuer from - # token when redirecting, used during local docker compose setup when the - # services are on different containers but the hostname is still localhost - FORCE_ISSUER: true - - # amanuensis (at the moment) attempts a migration on startup. setting this to false will disable that - # WARNING: ONLY set to false if you do NOT want to automatically migrate your database. - # You should be careful about incompatible versions of your db schema with what - # amanuensis expects. In other words, things could be broken if you update to a later - # amanuensis that expects a schema your database isn't migrated to. - # NOTE: We are working to improve the migration process in the near future - ENABLE_DB_MIGRATION: true - - - # ////////////////////////////////////////////////////////////////////////////////////// - # LIBRARY CONFIGURATION (flask) - # - Already contains reasonable defaults - # ////////////////////////////////////////////////////////////////////////////////////// - - # used for flask, "path mounted under by the application / web server" - # since we deploy as microservices, fence is typically under {{base}}/user - # this is also why our BASE_URL default ends in /user - APPLICATION_ROOT: '/amanuensis' - - - - ######################################################################################## - # OPTIONAL CONFIGURATIONS # - ######################################################################################## - - # ////////////////////////////////////////////////////////////////////////////////////// - # SUPPORT INFO - # ////////////////////////////////////////////////////////////////////////////////////// - # If you want an email address to show up when an unhandled error occurs, provide one - # here. Something like: support@example.com - SUPPORT_EMAIL_FOR_ERRORS: null - - - # ////////////////////////////////////////////////////////////////////////////////////// - # AWS BUCKETS AND CREDENTIALS - # - Support `/data` endpoints - # ////////////////////////////////////////////////////////////////////////////////////// - AWS_CREDENTIALS: - 'DATA_DELIVERY_S3_BUCKET': - aws_access_key_id: 'DATA_DELIVERY_S3_BUCKET_ACCESS_KEY' - aws_secret_access_key: 'DATA_DELIVERY_S3_BUCKET_PRIVATE_KEY' - # NOTE: Remove the {} and supply creds if needed. Example in comments below - # 'CRED1': - # aws_access_key_id: '' - # aws_secret_access_key: '' - # 'CRED2': - # aws_access_key_id: '' - # aws_secret_access_key: '' - - # NOTE: the region is optonal for s3_buckets, however it should be specified to avoid a - # call to GetBucketLocation which you make lack the AWS ACLs for. - # public buckets do not need the region field. - S3_BUCKETS: {} - # NOTE: Remove the {} and supply buckets if needed. Example in comments below - # bucket1: - # cred: 'CRED1' - # region: 'us-east-1' - # # optionally you can manually specify an s3-compliant endpoint for this bucket - # endpoint_url: 'https://cleversafe.example.com/' - # bucket2: - # cred: 'CRED2' - # region: 'us-east-1' - # bucket3: - # cred: '*' # public bucket - # bucket4: - # cred: 'CRED1' - # region: 'us-east-1' - # role-arn: 'arn:aws:iam::role1' - - # `DATA_DOWNLOAD_BUCKET` specifies an S3 bucket to which data files are uploaded by the system/admin user, - # User Data request files are stored here. - DATA_DOWNLOAD_BUCKET: 'bucket1' - - # ////////////////////////////////////////////////////////////////////////////////////// - # PROXY - # - Optional: If the api is behind firewall that needs to set http proxy - # ////////////////////////////////////////////////////////////////////////////////////// - # NOTE: leave as-is to not use proxy - # this is only used by the Google Oauth2Client at the moment if provided - HTTP_PROXY: - host: null - port: 3128 - - # ////////////////////////////////////////////////////////////////////////////////////// - # MICROSERVICE PATHS - # - Support `/data` endpoints & authz functionality - # ////////////////////////////////////////////////////////////////////////////////////// - # url where indexd microservice is running (for signed urls primarily) - # NOTE: Leaving as null will force fence to default to {{BASE_URL}}/index - # example value: 'https://example.com/index' - INDEXD: http://indexd-service - - # this is the username which fence uses to make authenticated requests to indexd - INDEXD_USERNAME: 'indexd_client' - # this is the password which fence uses to make authenticated requests to indexd - INDEXD_PASSWORD: '' - - # url where authz microservice is running - ARBORIST: 'http://arborist-service' - FENCE: 'http://fence-service' - - # ////////////////////////////////////////////////////////////////////////////////////// - # EMAIL - # - Support for sending hubspot API work updates to project management team - # ////////////////////////////////////////////////////////////////////////////////////// - # Simple Email Service (for sending emails from fence) - # - # NOTE: Example in comments below +AWS_CREDENTIALS: AWS_SES: SENDER: "" RECIPIENT: "" AWS_REGION: "us-east-1" - AWS_ACCESS_KEY: "" - AWS_SECRET_KEY: "" - - HUBSPOT: - API_KEY: "DEV_KEY" - - DB_MIGRATION_POSTGRES_LOCK_KEY: 100 - - - # Draft: The user started the form but saves it to complete another day - # Submitted: the user sends the completed form to a PM (email or through the system) - # Review: In the hand of the EC - # Revision: back to the requestor, needs to respond to the EC questions or concerns. loop to review - # Approved with Feedback: It is approved but it needs some changes with for example the filter-set before being approved - # Request Criteria Finalized: Update the filterset or apply the feedback the EC gave - # Approved: the request is approved - # Rejected: the request is rejected - # Withdrawal: The request has been withdrawn - # Agreements Negotiation: Creating the agreements after the request is approved - # Agreements Executed: The agreements is in place - # Data Available: The data is ready to be submitted - # Data Downloaded: When a user actually download the data - # Published: Remove access to the data - CONSORTIUM_STATUS: - DEFAULT: - # In order of precedence. - CODES: - - "DRAFT" - - "SUBMITTED" - - "IN_REVIEW" - - "REVISION" - - "APPROVED_WITH_FEEDBACK" - - "REQUEST_CRITERIA_FINALIZED" - - "APPROVED" - - "REJECTED" - - "WITHDRAWAL" - - "AGREEMENTS_NEGOTIATION" - - "AGREEMENTS_EXECUTED" - - "DATA_AVAILABLE" - - "DATA_DOWNLOADED" - - "PUBLISHED" - FINAL: - - "REJECTED" - - "WITHDRAWAL" - - "DATA_DOWNLOADED" - NOTIFY: - - "DATA_DOWNLOADED" - INSTRUCT: - # In order of precedence. - CODES: - - "DRAFT" - - "SUBMITTED" - - "IN_REVIEW" - - "REVISION" - - "APPROVED_WITH_FEEDBACK" - - "REQUEST_CRITERIA_FINALIZED" - - "APPROVED" - - "REJECTED" - - "WITHDRAWAL" - - "AGREEMENTS_NEGOTIATION" - - "AGREEMENTS_EXECUTED" - - "DATA_AVAILABLE" - - "DATA_DOWNLOADED" - - "PUBLISHED" - FINAL: - - "REJECTED" - - "WITHDRAWAL" - - "DATA_DOWNLOADED" - NOTIFY: - - "DATA_DOWNLOADED" - INRG: - # In order of precedence. - CODES: - - "DRAFT" - - "SUBMITTED" - - "IN_REVIEW" - - "REVISION" - - "APPROVED_WITH_FEEDBACK" - - "REQUEST_CRITERIA_FINALIZED" - - "APPROVED" - - "REJECTED" - - "WITHDRAWAL" - - "AGREEMENTS_NEGOTIATION" - - "AGREEMENTS_EXECUTED" - - "DATA_AVAILABLE" - - "DATA_DOWNLOADED" - - "PUBLISHED" - FINAL: - - "REJECTED" - - "WITHDRAWAL" - - "DATA_DOWNLOADED" - NOTIFY: - - "DATA_DOWNLOADED" - # Add consortia \ No newline at end of file + aws_access_key_id: "" + aws_secret_access_key: "" + DATA_DELIVERY_S3_BUCKET: + bucket_name: "" + aws_access_key_id: "" + aws_secret_access_key: "" diff --git a/helm/fence/templates/presigned-url-fence.yaml b/helm/fence/templates/presigned-url-fence.yaml index 534c81e9b..c9c8ec61a 100644 --- a/helm/fence/templates/presigned-url-fence.yaml +++ b/helm/fence/templates/presigned-url-fence.yaml @@ -26,7 +26,7 @@ spec: containers: - name: presigned-url-fence image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: Always + imagePullPolicy: {{ .Values.image.pullPolicy }} ports: - name: http containerPort: 80 diff --git a/helm/fence/templates/useryaml-job.yaml b/helm/fence/templates/useryaml-job.yaml index 6adb96c4d..4f7e66af5 100644 --- a/helm/fence/templates/useryaml-job.yaml +++ b/helm/fence/templates/useryaml-job.yaml @@ -27,7 +27,7 @@ spec: containers: - name: fence image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: Always + imagePullPolicy: {{ .Values.image.pullPolicy }} env: {{- toYaml .Values.env | nindent 10 }} volumeMounts: diff --git a/helm/fence/values.yaml b/helm/fence/values.yaml index d2b2d9ea1..d1841cc05 100644 --- a/helm/fence/values.yaml +++ b/helm/fence/values.yaml @@ -373,6 +373,13 @@ volumes: configMap: name: "fence-yaml-merge" optional: true + + - name: amanuensis-jwt-keys + secret: + secretName: "amanuensis-jwt-keys" + items: + - key: jwt_public_key.pem + path: jwt_public_key.pem # -- (list) Volumes to mount to the container. volumeMounts: @@ -420,6 +427,11 @@ volumeMounts: readOnly: true mountPath: "/fence/keys/key/jwt_private_key.pem" subPath: "jwt_private_key.pem" + - name: "amanuensis-jwt-keys" + readOnly: true + mountPath: "/amanuensis/jwt_public_key.pem" + subPath: "jwt_public_key.pem" + # -- (list) Volumes to mount to the init container. initVolumeMounts: @@ -1402,6 +1414,8 @@ FENCE_CONFIG: # print(key) ENCRYPTION_KEY: REPLACEME + AMANUENSIS_PUBLIC_KEY_PATH: '/amanuensis/jwt_public_key.pem' + # -- (map) Debug and security settings # Modify based on whether you're in a dev environment or in production DEBUG: false diff --git a/pcdc-default-values.yaml b/pcdc-default-values.yaml index 31487a65b..3c5fc10a1 100644 --- a/pcdc-default-values.yaml +++ b/pcdc-default-values.yaml @@ -70,15 +70,15 @@ fence: DEBUG: true MOCK_STORAGE: true #fill in - AMANUENSIS_PUBLIC_KEY_PATH: '/fence/keys/key/jwt_public_key.pem' + #AMANUENSIS_PUBLIC_KEY_PATH: '/fence/keys/key/jwt_public_key.pem' MOCK_GOOGLE_AUTH: true mock_default_user: 'test@example.com' image: - repository: "fence" - tag: "test" - pullPolicy: Never + repository: "quay.io/pcdc/fence" + tag: "helm-test" + pullPolicy: Always USER_YAML: | authz: @@ -287,7 +287,6 @@ fence: - workspace - services.sheepdog-admin - services.amanuensis-admin - - services.fence-admin - data_admin - analysis - privacy_policy @@ -303,6 +302,7 @@ guppy: authFilterField: "auth_resource_path" manifestservice: + enabled: false image: repository: quay.io/cdis/manifestservice tag: "2025.01" @@ -319,8 +319,9 @@ peregrine: portal: image: - repository: quay.io/pcdc/windmill - tag: 1.34.0 + repository: "windmill" + tag: "test" + pullPolicy: Never resources: requests: cpu: 1.0 @@ -379,4 +380,10 @@ indexd: enabled: false hatchery: + enabled: false + +gearbox: + enabled: false + +gearbox-middleware: enabled: false \ No newline at end of file diff --git a/tools/roll.sh b/tools/roll.sh index 02fd97295..36586b6d4 100755 --- a/tools/roll.sh +++ b/tools/roll.sh @@ -5,8 +5,26 @@ cd "$(dirname "$0")/../helm/gen3" || exit 1 || exit 1 rm ../../values.yaml +# Initialize variables +DISABLE_UPDATE=false + + + project="$1" shift + +# Parse arguments +for arg in "$@"; do + case $arg in + --disable-update) + DISABLE_UPDATE=true + shift # Remove --disable-update from arguments + ;; + *) + # Pass through other arguments + ;; + esac +done # Check if ../../secret-values.yaml exists if [ -f ../../secret-values.yaml ]; then yq '. *= load("../../secret-values.yaml")' ../../$project-default-values.yaml > ../../values.yaml @@ -40,17 +58,26 @@ if [ $# -gt 0 ]; then # Iterate over each argument (service name) for service_name in "$@" do + # Skip if service_name is disable-update + if [ "$service_name" = "--disable-update" ]; then + continue + fi + # Delete the deployment corresponding to the service name kubectl delete deployment ${service_name}-deployment if [ "$service_name" = "gearbox" ]; then kubectl delete job gearbox-g3auto-patch fi - done fi -# Run helm dependency update -helm dependency update +# Conditionally run helm dependency update +if [ "$DISABLE_UPDATE" = false ]; then + echo "Running helm dependency update..." + helm dependency update +else + echo "Skipping helm dependency update..." +fi # Run helm upgrade --install command helm upgrade --install $project . -f ../../values.yaml \ No newline at end of file From 9d16a5b79b1d2a793c7d65f8677a3ba091eb5266 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Mon, 3 Mar 2025 10:55:27 -0800 Subject: [PATCH 028/126] fix amanuensis-config job and pcdc data load --- .gitignore | 3 +- .../amanuensis-secret/config_helper.py | 6 + .../templates/amanuensis-config-job.yaml | 87 -- .../templates/amanuensis-creds-job.yaml | 92 -- .../templates/amanuensis-db-migrate-job.yaml | 4 +- .../templates/amanuensis-jobs-sa.yaml | 26 - ...nuensis-populate-search-table-cronjob.yaml | 34 + .../templates/amanuensis-secrets.yaml | 186 ++++ helm/amanuensis/values.yaml | 3 + helm/portal/defaults/gitops.json | 884 ++++++++++++------ pcdc-default-values.yaml | 11 +- pcdc_data/generate_data.sh | 12 +- pcdc_data/load_elasticsearch.sh | 18 +- ...{load_gen3_scripts.sh => load_gen3_etl.sh} | 24 +- pcdc_data/load_graph_db.sh | 11 +- pcdc_data/run_all.sh | 3 +- 16 files changed, 869 insertions(+), 535 deletions(-) delete mode 100644 helm/amanuensis/templates/amanuensis-config-job.yaml delete mode 100644 helm/amanuensis/templates/amanuensis-creds-job.yaml delete mode 100644 helm/amanuensis/templates/amanuensis-jobs-sa.yaml create mode 100644 helm/amanuensis/templates/amanuensis-populate-search-table-cronjob.yaml create mode 100644 helm/amanuensis/templates/amanuensis-secrets.yaml rename pcdc_data/{load_gen3_scripts.sh => load_gen3_etl.sh} (50%) diff --git a/.gitignore b/.gitignore index cba7d7ee5..e371729af 100644 --- a/.gitignore +++ b/.gitignore @@ -10,4 +10,5 @@ credentials.json CA/ temp.yaml /values.yaml -gen3_scripts/ \ No newline at end of file +gen3_scripts/ +gen3_etl/ \ No newline at end of file diff --git a/helm/amanuensis/amanuensis-secret/config_helper.py b/helm/amanuensis/amanuensis-secret/config_helper.py index 132de4b62..869ca25af 100644 --- a/helm/amanuensis/amanuensis-secret/config_helper.py +++ b/helm/amanuensis/amanuensis-secret/config_helper.py @@ -111,6 +111,7 @@ def inject_creds_into_amanuensis_config(creds_file_path, config_file_path): data_delivery_bucket = _get_nested_value(creds, "data_delivery_bucket") data_delivery_bucket_aws_key_id = _get_nested_value(creds, "data_delivery_bucket_aws_key_id") data_delivery_bucket_aws_access_key = _get_nested_value(creds, "data_delivery_bucket_aws_access_key") + csl_key = _get_nested_value(creds, "csl_key") db_path = "postgresql://{}:{}@{}:5432/{}".format( db_username, db_password, db_host, db_database @@ -142,6 +143,11 @@ def inject_creds_into_amanuensis_config(creds_file_path, config_file_path): config_file, "AWS_CREDENTIALS/DATA_DELIVERY_S3_BUCKET/bucket_name", data_delivery_bucket ) + print(" CSL_KEY injected with value(s) from creds.json") + config_file = _replace( + config_file, "CSL_KEY", csl_key + ) + # modify USER_API to http://user-service/ if hostname is localhost if hostname == "localhost": diff --git a/helm/amanuensis/templates/amanuensis-config-job.yaml b/helm/amanuensis/templates/amanuensis-config-job.yaml deleted file mode 100644 index 9a4cb9984..000000000 --- a/helm/amanuensis/templates/amanuensis-config-job.yaml +++ /dev/null @@ -1,87 +0,0 @@ -apiVersion: batch/v1 -kind: Job -metadata: - name: amanuensis-config - labels: - redeploy-hash: "{{ .Release.Revision }}" -spec: - backoffLimit: 0 - template: - metadata: - labels: - app: gen3job - spec: - serviceAccountName: "amanuensis-jobs" - volumes: - - name: shared-data - emptyDir: {} - - name: creds-volume - secret: - secretName: "amanuensis-creds" - - name: config-helper - secret: - secretName: "amanuensis-secret" - containers: - - name: amanuensis - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - imagePullPolicy: Never - volumeMounts: - - name: "creds-volume" - readOnly: true - mountPath: "/var/www/amanuensis/creds.json" - subPath: creds.json - - name: "config-helper" - readOnly: true - mountPath: "/var/www/amanuensis/config_helper.py" - subPath: config_helper.py - - name: shared-data - mountPath: /mnt/shared - command: ["/bin/bash"] - args: - - "-c" - # Script always succeeds if it runs (echo exits with 0) - - | - echo "generating default amanuensis configuration..." - python /amanuensis/cfg_help.py create --config_path new-amanuensis-config.yaml - - if [[ -f /var/www/amanuensis/creds.json ]]; then - echo "" - echo "injecting creds.json into amanuensis configuration..." - python /var/www/amanuensis/config_helper.py -i /var/www/amanuensis/creds.json -c new-amanuensis-config.yaml - else - echo "ERROR: /var/www/amanuensis/creds.json not found!" - echo " Only generating default config..." - fi - - cp new-amanuensis-config.yaml /mnt/shared/new-amanuensis-config.yaml - - - name: awshelper - image: "quay.io/cdis/awshelper:master" - imagePullPolicy: Always - volumeMounts: - - name: shared-data - mountPath: /mnt/shared - command: ["/bin/bash"] - args: - - "-c" - - | - # wait for other image to generate config - let count=0 - while [[ ! -f /mnt/shared/new-amanuensis-config.yaml && $count -lt 50 ]]; do - echo "waiting for /mnt/shared/new-amanuensis-config.yaml" - sleep 2 - let count=$count+1 - done - - if [[ -f /mnt/shared/new-amanuensis-config.yaml ]]; then - # load yaml file into secrets - if kubectl get secrets/amanuensis-config > /dev/null 2>&1; then - kubectl delete secret amanuensis-config - fi - echo "saving amanuensis configuration into amanuensis-config secret..." - kubectl create secret generic amanuensis-config "--from-file=amanuensis-config.yaml=/mnt/shared/new-amanuensis-config.yaml" - else - echo "/mnt/shared/new-amanuensis-config.yaml did not appear within timeout :-(" - fi - - restartPolicy: Never diff --git a/helm/amanuensis/templates/amanuensis-creds-job.yaml b/helm/amanuensis/templates/amanuensis-creds-job.yaml deleted file mode 100644 index b7b146d6a..000000000 --- a/helm/amanuensis/templates/amanuensis-creds-job.yaml +++ /dev/null @@ -1,92 +0,0 @@ -apiVersion: batch/v1 -kind: Job -metadata: - name: amanuensis-creds - labels: - redeploy-hash: "{{ .Release.Revision }}" -spec: - backoffLimit: 0 - template: - metadata: - labels: - app: gen3job - spec: - serviceAccountName: "amanuensis-jobs" - volumes: - - name: shared-data - emptyDir: {} - containers: - - name: awshelper - image: "quay.io/cdis/awshelper:master" - imagePullPolicy: Always - env: - - name: PGPASSWORD - valueFrom: - secretKeyRef: - name: amanuensis-dbcreds - key: password - optional: false - - name: PGUSER - valueFrom: - secretKeyRef: - name: amanuensis-dbcreds - key: username - optional: false - - name: PGDB - valueFrom: - secretKeyRef: - name: amanuensis-dbcreds - key: database - optional: false - - name: PGHOST - valueFrom: - secretKeyRef: - name: amanuensis-dbcreds - key: host - optional: false - - name: PGPORT - valueFrom: - secretKeyRef: - name: amanuensis-dbcreds - key: port - optional: false - - name: DBREADY - valueFrom: - secretKeyRef: - name: amanuensis-dbcreds - key: dbcreated - optional: false - - name: HOSTNAME - value: "{{ .Values.global.hostname }}" - - name: DATA_DOWNLOAD_BUCKET - value: "{{ .Values.AWS_CREDENTIALS.DATA_DELIVERY_S3_BUCKET.bucket_name }}" - - name: AWS_ACCESS_KEY_ID - value: "{{ .Values.AWS_CREDENTIALS.DATA_DELIVERY_S3_BUCKET.aws_access_key_id }}" - - name: AWS_SECRET_ACCESS_KEY - value: "{{ .Values.AWS_CREDENTIALS.DATA_DELIVERY_S3_BUCKET.aws_secret_access_key }}" - volumeMounts: - - name: shared-data - mountPath: /mnt/shared - command: ["/bin/bash"] - args: - - "-c" - - | - if kubectl get secret amanuensis-creds; then - kubectl delete secret amanuensis-creds - fi - cat < /mnt/shared/creds.json - { - "db_host": "${PGHOST}", - "db_username": "${PGUSER}", - "db_password": "${PGPASSWORD}", - "db_database": "${PGDB}", - "hostname": "${HOSTNAME}", - "indexd_password": "", - "data_delivery_bucket": "${DATA_DOWNLOAD_BUCKET}", - "data_delivery_bucket_aws_key_id": "${AWS_ACCESS_KEY_ID}", - "data_delivery_bucket_aws_access_key": "${AWS_SECRET_ACCESS_KEY}" - } - EOF - - kubectl create secret generic amanuensis-creds --from-file=/mnt/shared/creds.json - restartPolicy: Never diff --git a/helm/amanuensis/templates/amanuensis-db-migrate-job.yaml b/helm/amanuensis/templates/amanuensis-db-migrate-job.yaml index 8894b54c6..584cb509a 100644 --- a/helm/amanuensis/templates/amanuensis-db-migrate-job.yaml +++ b/helm/amanuensis/templates/amanuensis-db-migrate-job.yaml @@ -49,8 +49,6 @@ spec: readOnly: true mountPath: "/var/www/amanuensis/creds.json" subPath: creds.json - - mountPath: /tmp/pod - name: tmp-pod command: ["/bin/bash"] args: - "-c" @@ -61,6 +59,6 @@ spec: fence-create migrate if [[ $? != 0 ]]; then echo "WARNING: non zero exit code: $?" + exit 1 fi - touch /tmp/pod/completed restartPolicy: OnFailure \ No newline at end of file diff --git a/helm/amanuensis/templates/amanuensis-jobs-sa.yaml b/helm/amanuensis/templates/amanuensis-jobs-sa.yaml deleted file mode 100644 index 7d32e3b4b..000000000 --- a/helm/amanuensis/templates/amanuensis-jobs-sa.yaml +++ /dev/null @@ -1,26 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: amanuensis-jobs ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: amanuensis-jobs-role -rules: - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list", "watch", "create", "update", "delete"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: amanuensis-jobs-role-binding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: amanuensis-jobs-role -subjects: - - kind: ServiceAccount - name: amanuensis-jobs - namespace: default \ No newline at end of file diff --git a/helm/amanuensis/templates/amanuensis-populate-search-table-cronjob.yaml b/helm/amanuensis/templates/amanuensis-populate-search-table-cronjob.yaml new file mode 100644 index 000000000..ed293238b --- /dev/null +++ b/helm/amanuensis/templates/amanuensis-populate-search-table-cronjob.yaml @@ -0,0 +1,34 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: amanuensis-db-filter-set-filler + labels: + redeploy-hash: "{{ .Release.Revision }}" +spec: + schedule: "0 0 1 * *" + concurrencyPolicy: Forbid + # This is defualt but do we want to keep long term record of deleted filter sets + # the original filter-sets will not be deleted unless manually done by the user + successfulJobsHistoryLimit: 3 + jobTemplate: + spec: + template: + metadata: + labels: + app: gen3job + spec: + automountServiceAccountToken: false + volumes: + - name: config-volume + secret: + secretName: "amanuensis-config" + containers: + - name: amanuensis-db-filter-set-filler + image: "amanuensis-db-filter-set-filler:test" + imagePullPolicy: Never + volumeMounts: + - name: "config-volume" + readOnly: true + mountPath: "/var/www/amanuensis/amanuensis-config.yaml" + subPath: amanuensis-config.yaml + restartPolicy: Never \ No newline at end of file diff --git a/helm/amanuensis/templates/amanuensis-secrets.yaml b/helm/amanuensis/templates/amanuensis-secrets.yaml new file mode 100644 index 000000000..9d3d39f3b --- /dev/null +++ b/helm/amanuensis/templates/amanuensis-secrets.yaml @@ -0,0 +1,186 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: amanuensis-jobs +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: amanuensis-jobs-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch", "create", "update", "delete", "patch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: amanuensis-jobs-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: amanuensis-jobs-role +subjects: + - kind: ServiceAccount + name: amanuensis-jobs + namespace: default +--- +apiVersion: v1 +kind: Secret +metadata: + name: amanuensis-config +data: + amanuensis-config.yaml: "" +--- +apiVersion: v1 +kind: Secret +metadata: + name: amanuensis-creds +data: + creds.json: "" +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: amanuensis-secrets-{{ .Release.Revision }} + annotations: + labels: + redeploy-hash: "{{ .Release.Revision }}" +spec: + backoffLimit: 0 + template: + metadata: + labels: + app: gen3job + spec: + serviceAccountName: "amanuensis-jobs" + volumes: + - name: shared-data + emptyDir: {} + - name: config-helper + secret: + secretName: "amanuensis-secret" + initContainers: + - name: amanuensis-creds + image: "quay.io/cdis/awshelper:master" + imagePullPolicy: Always + env: + - name: PGPASSWORD + valueFrom: + secretKeyRef: + name: amanuensis-dbcreds + key: password + optional: false + - name: PGUSER + valueFrom: + secretKeyRef: + name: amanuensis-dbcreds + key: username + optional: false + - name: PGDB + valueFrom: + secretKeyRef: + name: amanuensis-dbcreds + key: database + optional: false + - name: PGHOST + valueFrom: + secretKeyRef: + name: amanuensis-dbcreds + key: host + optional: false + - name: PGPORT + valueFrom: + secretKeyRef: + name: amanuensis-dbcreds + key: port + optional: false + - name: DBREADY + valueFrom: + secretKeyRef: + name: amanuensis-dbcreds + key: dbcreated + optional: false + - name: HOSTNAME + value: "{{ .Values.global.hostname }}" + - name: DATA_DOWNLOAD_BUCKET + value: "{{ .Values.AWS_CREDENTIALS.DATA_DELIVERY_S3_BUCKET.bucket_name }}" + - name: AWS_ACCESS_KEY_ID + value: "{{ .Values.AWS_CREDENTIALS.DATA_DELIVERY_S3_BUCKET.aws_access_key_id }}" + - name: AWS_SECRET_ACCESS_KEY + value: "{{ .Values.AWS_CREDENTIALS.DATA_DELIVERY_S3_BUCKET.aws_secret_access_key }}" + - name: CSL_KEY + value: "{{ .Values.CSL_KEY }}" + volumeMounts: + - name: shared-data + mountPath: /mnt/shared + command: ["/bin/bash"] + args: + - "-c" + - | + cat < /mnt/shared/creds.json + { + "db_host": "${PGHOST}", + "db_username": "${PGUSER}", + "db_password": "${PGPASSWORD}", + "db_database": "${PGDB}", + "hostname": "${HOSTNAME}", + "indexd_password": "", + "data_delivery_bucket": "${DATA_DOWNLOAD_BUCKET}", + "data_delivery_bucket_aws_key_id": "${AWS_ACCESS_KEY_ID}", + "data_delivery_bucket_aws_access_key": "${AWS_SECRET_ACCESS_KEY}", + "csl_key": "${CSL_KEY}" + } + EOF + + kubectl patch secret amanuensis-creds --type='json' -p='[{"op": "replace", "path": "/data/creds.json", "value": "'$(cat /mnt/shared/creds.json | base64 -w 0)'"}]' + + - name: create-amanuensis-config + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: Never + volumeMounts: + - name: "config-helper" + readOnly: true + mountPath: "/var/www/amanuensis/config_helper.py" + subPath: config_helper.py + - name: shared-data + mountPath: /mnt/shared + command: ["/bin/bash"] + args: + - "-c" + # Script always succeeds if it runs (echo exits with 0) + - | + echo "generating default amanuensis configuration..." + python /amanuensis/cfg_help.py create --config_path new-amanuensis-config.yaml + + if [[ -f /mnt/shared/creds.json ]]; then + echo "" + echo "injecting creds.json into amanuensis configuration..." + if ! python /var/www/amanuensis/config_helper.py -i /mnt/shared/creds.json -c new-amanuensis-config.yaml; then + echo "ERROR: Failed to inject creds.json into amanuensis configuration!" + exit 1 + fi else + echo "ERROR: /mnt/shared/creds.json not found!" + echo " Only generating default config..." + fi + + cp new-amanuensis-config.yaml /mnt/shared/new-amanuensis-config.yaml + + containers: + - name: create-amanuensis-config-secret + image: "quay.io/cdis/awshelper:master" + imagePullPolicy: Always + volumeMounts: + - name: shared-data + mountPath: /mnt/shared + command: ["/bin/bash"] + args: + - "-c" + - | + if [[ -f /mnt/shared/new-amanuensis-config.yaml ]]; then + # load yaml file into secrets + echo "saving amanuensis configuration into amanuensis-config secret..." + kubectl patch secret amanuensis-config --type='json' -p='[{"op": "replace", "path": "/data/amanuensis-config.yaml", "value": "'$(cat /mnt/shared/new-amanuensis-config.yaml | base64 -w 0)'"}]' + fi + + restartPolicy: Never diff --git a/helm/amanuensis/values.yaml b/helm/amanuensis/values.yaml index ea123ac1e..08b4bcb2c 100644 --- a/helm/amanuensis/values.yaml +++ b/helm/amanuensis/values.yaml @@ -257,3 +257,6 @@ AWS_CREDENTIALS: bucket_name: "" aws_access_key_id: "" aws_secret_access_key: "" + + +CSL_KEY: "" \ No newline at end of file diff --git a/helm/portal/defaults/gitops.json b/helm/portal/defaults/gitops.json index 0f3ee0f0a..73fa42f9c 100644 --- a/helm/portal/defaults/gitops.json +++ b/helm/portal/defaults/gitops.json @@ -1,352 +1,666 @@ - { - "subcommons": [ - { - "URL": "https://tb.diseasedatahub.org/", - "name": "TB" - }, - { - "URL": "https://aids.diseasedatahub.org/", - "name": "AIDS" - }, - { - "URL": "https://flu.diseasedatahub.org/", - "name": "FLU" - }, - { - "URL": "https://microbiome.diseasedatahub.org/", - "name": "Microbiome" - } - ], - "gaTrackingId": "UA-119127212-1", + "gaTrackingId": "undefined", "graphql": { "boardCounts": [ + { + "graphql": "_person_count", + "name": "Person", + "plural": "Persons" + }, { "graphql": "_subject_count", "name": "Subject", "plural": "Subjects" - }, - { - "graphql": "_study_count", - "name": "Study", - "plural": "Studies" - }, - { - "graphql": "_summary_lab_result_count", - "name": "Lab record", - "plural": "Lab records" } ], "chartCounts": [ { - "graphql": "_subject_count", - "name": "Subject" + "graphql": "_person_count", + "name": "Person" }, { - "graphql": "_study_count", - "name": "Study" + "graphql": "_subject_count", + "name": "Subject" } ], "projectDetails": "boardCounts" }, "components": { - "appName": "Gen3 Disease Data Hub", + "appName": "Pediatric Cancer Data Commons Portal", "index": { "introduction": { - "heading": "Gen3 Disease Data Hub Datasets", - "text": "The Gen3 Disease Data Hub hosts data related to infectious diseases and aims to make data findable, accessible, interoperable, and reusable (FAIR).", - "link": "/datasets" + "heading": "Pediatric Cancer Data Commons", + "text": "The Pediatric Cancer Data Commons supports the management, analysis and sharing of data for the research community.", + "link": "/submission" }, "buttons": [ { - "name": "TB Environment", - "icon": "data-explore", - "body": "Explore TB data.", - "external_link": "https://tb.diseasedatahub.org" - }, - { - "name": "AIDS Environment", - "icon": "data-explore", - "body": "Explore AIDS data.", - "external_link": "https://aids.diseasedatahub.org" - }, - { - "name": "Flu Environment", - "icon": "data-explore", - "body": "Explore influenza data.", - "external_link": "https://flu.diseasedatahub.org" + "name": "Define Data Field", + "icon": "data-field-define", + "body": "The Pediatric Cancer Data Commons defines the data. Please study the dictionary before you start browsing.", + "link": "/DD", + "label": "Learn more" }, { - "name": "Microbiome Environment", + "name": "Explore Data", "icon": "data-explore", - "body": "Explore data from a collection of open-access microbiome-related studies.", - "external_link": "https://microbiome.diseasedatahub.org" + "body": "The Exploration Page gives you insights and a clear overview under selected factors.", + "link": "/explorer", + "label": "Explore data" } - ] + ], + "barChart": { + "showPercentage": true + } }, "navigation": { "items": [ { - "icon": "query", - "link": "/datasets", + "icon": "dictionary", + "link": "/DD", "color": "#a2a2a2", - "name": "Dataset Browser" + "name": "Dictionary" }, { "icon": "exploration", "link": "/explorer", "color": "#a2a2a2", - "name": "Eco Explorer" + "name": "Exploration" } ] }, "topBar": { "items": [ { - "link": "https://gen3.org/resources/user/", - "name": "Documentation" + "icon": "external-link", + "leftOrientation": true, + "link": "https://commons.cri.uchicago.edu/pcdc/", + "name": "About PCDC" + }, + { + "icon": "external-link", + "leftOrientation": true, + "link": "https://commons.cri.uchicago.edu/sponsors/", + "name": "Our Sponsors" } ] }, "login": { - "title": "Gen3 Disease Data Hub", - "subTitle": "Cross Environment Datasets", - "text": "The website combines open access datasets from multiple disciplines to create clean, easy to navigate visualizations for data-driven discovery within the fields of allergy and infectious diseases.", + "title": "Pediatric Cancer Data Commons", + "subTitle": "Connect. Share. Cure.", + "text": "Welcome to the Pediatric Cancer Data Commons (PCDC), brought to you by Data for the Common Good (D4CG). Headquartered at the University of Chicago, D4CG works with international leaders to develop and apply uniform data standards that facilitate the collection, combination, and analysis of data from many different sources.\n\nThe PCDC harnesses pediatric, AYA, and adult cancer clinical data from around the world into a single unified platform, making it possible to explore and access data across multiple types of cancer. The PCDC Data Portal currently includes some of the world's largest sets of clinical data for pediatric neuroblastoma, soft tissue sarcoma, germ cell tumors, AML, and Hodgkin lymphoma, with the addition of more cancer types in progress.", "contact": "If you have any questions about access or the registration process, please contact ", - "email": "support@datacommons.io" + "email": "pcdc_help@lists.uchicago.edu" }, "footerLogos": [ { - "src": "/custom/sponsors/gitops-sponsors/gen3.png", + "src": "/src/img/gen3.png", "href": "https://ctds.uchicago.edu/gen3", - "alt": "Gen3 Data Commons" + "alt": "Gen3 Data Commons", + "height": 40 }, { - "src": "/src/img/createdby.png", - "href": "https://ctds.uchicago.edu/", - "alt": "Center for Translational Data Science at the University of Chicago" + "src": "/src/img/uchicago.png", + "href": "https://www.uchicago.edu/", + "alt": "The University of Chicago", + "height": 40 } ] }, - "requiredCerts": [], - "featureFlags": { - "explorer": true, - "analysis": true - }, - "datasetBrowserConfig": { - "filterSections": [ - { - "title": "Supported Data Resources", - "options": [ - { "text": "TB", "filterType": "singleSelect"}, - { "text": "AIDS", "filterType": "singleSelect"}, - { "text": "Flu", "filterType": "singleSelect"}, - { "text": "Microbiome", "filterType": "singleSelect"} - ] + "explorerConfig": [ + { + "id": 1, + "label": "data", + "charts": { + "sex": { + "chartType": "bar", + "title": "Sex" + }, + "race": { + "chartType": "bar", + "title": "Race" + }, + "ethnicity": { + "chartType": "bar", + "title": "Ethnicity" + }, + "consortium": { + "chartType": "bar", + "title": "Consortium" + } }, - { - "title": "Research Focus", - "options": [ - { "text": "AIDS", "filterType": "singleSelect"}, - { "text": "TB", "filterType": "singleSelect"}, - { "text": "Flu", "filterType": "singleSelect"}, - { "text": "Immune Response", "filterType": "singleSelect"}, - { "text": "Immune Phenotype", "filterType": "singleSelect"}, - { "text": "Allergy", "filterType": "singleSelect"}, - { "text": "Atopy", "filterType": "singleSelect"}, - { "text": "Infection Response", "filterType": "singleSelect"}, - { "text": "Vaccine Response", "filterType": "singleSelect"}, - { "text": "Transplantation", "filterType": "singleSelect"}, - { "text": "Oncology", "filterType": "singleSelect"}, - { "text": "Autoimmune", "filterType": "singleSelect"}, - { "text": "Preterm Birth", "filterType": "singleSelect"} + "filters": { + "anchor": { + "field": "disease_phase", + "options": ["Initial Diagnosis", "Relapse"], + "tabs": ["Disease", "Molecular", "Surgery", "Radiation", "Response", "SMN", "Imaging", "Labs", "SCT"] + }, + "tabs": [ + { + "title": "Subject", + "fields": [ + "consortium", + "data_contributor_id", + "studies.study_id", + "studies.treatment_arm", + "sex", + "race", + "ethnicity", + "year_at_disease_phase", + "survival_characteristics.lkss_obfuscated", + "censor_status", + "age_at_censor_status", + "medical_histories.medical_history", + "medical_histories.medical_history_status", + "external_references.external_resource_name", + "biospecimen_status" + ] + }, + { + "title": "Disease", + "fields": [ + "histologies.histology", + "histologies.histology_grade", + "histologies.histology_inpc", + "tumor_assessments.age_at_tumor_assessment", + "tumor_assessments.tumor_classification", + "tumor_assessments.tumor_site", + "tumor_assessments.tumor_state", + "tumor_assessments.longest_diam_dim1", + "tumor_assessments.depth", + "tumor_assessments.tumor_size", + "tumor_assessments.invasiveness", + "tumor_assessments.nodal_clinical", + "tumor_assessments.nodal_pathology", + "tumor_assessments.parameningeal_extension", + "tumor_assessments.necrosis", + "tumor_assessments.necrosis_pct", + "tumor_assessments.tumor_laterality", + "stagings.irs_group", + "stagings.tnm_finding", + "stagings.stage_system", + "stagings.stage", + "stagings.AB", + "stagings.E", + "stagings.S", + "disease_characteristics.mki", + "disease_characteristics.bulk_disease", + "disease_characteristics.BULK_MED_MASS", + "disease_characteristics.bulky_nodal_aggregate", + "disease_characteristics.who_aml", + "disease_characteristics.CNS_disease_status", + "disease_characteristics.MLDS" + ] + }, + { + "title": "Molecular", + "fields": [ + "molecular_analysis.anaplasia", + "molecular_analysis.anaplasia_extent", + "molecular_analysis.molecular_abnormality", + "molecular_analysis.molecular_abnormality_result", + "molecular_analysis.gene1", + "molecular_analysis.gene2", + "molecular_analysis.dna_index", + "molecular_analysis.age_at_molecular_analysis", + "molecular_analysis.mitoses", + "molecular_analysis.cytodifferentiation" + ] + }, + { + "title": "Surgery", + "fields": [ + "biopsy_surgical_procedures.tumor_classification", + "biopsy_surgical_procedures.procedure_type", + "biopsy_surgical_procedures.margins" + ] + }, + { + "title": "Radiation", + "fields": [ + "radiation_therapies.tumor_classification", + "radiation_therapies.energy_type", + "radiation_therapies.rt_dose" + ] + }, + { + "title": "Response", + "fields": [ + "subject_responses.tx_prior_response", + "subject_responses.response", + "subject_responses.interim_response", + "subject_responses.response_method", + "minimal_residual_diseases.mrd_result", + "minimal_residual_diseases.mrd_result_numeric" + ] + }, + { + "title": "SMN", + "fields": [ + "secondary_malignant_neoplasm.age_at_smn", + "secondary_malignant_neoplasm.smn_site", + "secondary_malignant_neoplasm.smn_type", + "secondary_malignant_neoplasm.smn_yn", + "secondary_malignant_neoplasm.smn_morph_icdo" + ] + }, + { + "title": "Imaging", + "fields": [ + "imagings.imaging_method", + "imagings.imaging_result" + ] + }, + { + "title": "Labs", + "fields": [ + "labs.lab_test", + "labs.lab_result", + "labs.lab_result_numeric", + "labs.lab_result_unit" + ] + }, + { + "title": "SCT", + "fields": [ + "stem_cell_transplants.sct_type", + "stem_cell_transplants.sct_source", + "stem_cell_transplants.sct_donor_relationship" + ] + } ] - } - ], - "fieldMapping" : [ - { "field": "link", "name": "View" }, - { "field": "dataset_name", "name": "Study" }, - { "field": "supported_data_resource", "name": "Supported Data Resource" }, - { "field": "research_focus", "name": "Research Focus" }, - { "field": "description", "name": "Description of Dataset" } - ], - "filterConfig": { - "tabs": [{ - "title": "Filters", - "fields": ["supported_data_resource", "research_focus"] - }] - } - }, - "dataExplorerConfig": { - "charts": { - "project_id": { - "chartType": "count", - "title": "Projects" - }, - "subject_id": { - "chartType": "count", - "title": "Subjects" - }, - "dataset": { - "chartType": "pie", - "title": "Resources", - "chartRow": 0 - }, - "data_format": { - "chartType": "bar", - "title": "Data Format", - "chartRow": 0 - }, - "data_type": { - "chartType": "pie", - "title": "Data Type", - "chartRow": 0 - }, - "experimental_strategies": { - "chartType": "bar", - "title": "Experimental Strategies", - "chartRow": 0 - }, - "species": { - "chartType": "bar", - "title": "Genus species", - "chartRow": 0 - }, - "gender": { - "chartType": "pie", - "title": "Gender", - "chartRow": 1 - }, - "race": { - "chartType": "pie", - "title": "Race", - "chartRow": 1 - }, - "ethnicity": { - "chartType": "pie", - "title": "Ethnicity", - "chartRow": 1 - }, - "biospecimen_anatomic_site": { - "chartType": "pie", - "title": "Biospecimen Anatomic Site", - "chartRow": 1 - } - }, - "fieldMapping" : [ - { "field": "dataset", "name": "Resource" }, - { "field": "studyAccession", "name": "Study" }, - { "field": "phenotype", "name": "Phenotype" }, - { "field": "gender", "name": "Gender" }, - { "field": "ethnicity", "name": "Ethnicity" }, - { "field": "strain", "name": "Strain" }, - { "field": "species", "name": "Genus species" }, - { "field": "submitter_id", "name": "Submitter ID" }, - { "field": "race", "name": "Race" }, - { "field": "hiv_status", "name": "HIV Status" }, - { "field": "study_submitter_id", "name": "Study"}, - { "field": "frstdthd", "name": "Year of Death" }, - { "field": "arthxbase", "name": "ART Use Prior to Baseline"}, - { "field": "bshbvstat", "name": "Baseline HBV Sero-status"}, - { "field": "bshcvstat", "name": "Baseline HCV Sero-status"}, - { "field": "cd4nadir", "name": "CD4 Nadir Prior to HAART"}, - { "field": "status", "name": "Summarized HIV Sero-status"}, - {"field": "project_id", "name": "Project ID"}, - {"field": "frstcncrd", "name": "First Confirmed Cancer Year"}, - {"field": "frstdmd", "name": "First Visit Year with Diabetes"}, - {"field": "frstdmmd", "name": "First Visit Year with All Necessary Components to Determine Diabetes"}, - {"field": "frsthtnd", "name": "First Visit Year with Hypertension"}, - {"field": "frsthtnmd", "name": "First Visit Year with All Necessary Components to Determine Hypertension"}, - {"field": "fcd4lowd", "name": "First Year Seen CD4N < 200 or CD4% < 14"}, - {"field": "fposdate", "name": "First Year Seen Seropositive"}, - {"field": "frstaidd", "name": "First Reported AIDS Year"}, - {"field": "lastafrd", "name": "Last Reported AIDS Free Year"}, - {"field": "lastcond", "name": "Year of Last Study Visit Attended"}, - {"field": "lastcontact", "name": "Last Year of Contact"}, - {"field": "lcd4higd", "name": "Last Year Seen with CD4N >= 200 and CD4% >= 14"}, - {"field": "lnegdate", "name": "Last Year Seen Seronegative"}, - {"field": "amikacin_res_phenotype", "name": "Amikacin Phenotype" }, - {"field": "capreomycin_res_phenotype", "name": "Capreomycin Phenotype" }, - {"field": "isoniazid_res_phenotype", "name": "Isoniazid Phenotype" }, - {"field": "kanamycin_res_phenotype", "name": "Kanamycin Phenotype" }, - {"field": "ofloxacin_res_phenotype", "name": "Ofloxacin Phenotype" }, - {"field": "pyrazinamide_res_phenotype", "name": "Pyrazinamide Phenotype" }, - {"field": "rifampicin_res_phenotype", "name": "Rifampicin Phenotype" }, - {"field": "rifampin_res_phenotype", "name": "Rifampin Phenotype" }, - {"field": "streptomycin_res_phenotype", "name": "streptomycin Phenotype" } - ], - "filterConfig": { - "tabs": [{ - "title": "Resource", - "fields": ["dataset", "data_format", "data_type"] - }, - { - "title": "Subject", - "fields": ["ethnicity", "gender", "species", "race"] }, - { - "title": "Diagnosis", + "projectId": "search", + "graphqlField": "subject", + "index": "", + "buttons": [ + { + "enabled": true, + "type": "export-to-pfb", + "title": "Export to PFB", + "leftIcon": "datafile", + "rightIcon": "download" + }, + { + "enabled": false, + "type": "data", + "title": "Download Data", + "leftIcon": "user", + "rightIcon": "download", + "fileName": "data.json", + "tooltipText": "You can only download data accessible to you" + } + ], + "table": { + "enabled": false, "fields": [ - "arthxbase", - "bshbvstat", - "bshcvstat", - "cd4nadir", - "status", - "hiv_status" + "external_references.external_links", + "consortium", + "data_contributor_id", + "subject_submitter_id", + "sex", + "race", + "ethnicity", + "survival_characteristics.lkss", + "survival_characteristics.age_at_lkss" ] }, - { - "title": "Comorbidity", - "fields": [ - "frstcncrd", - "frstdmd", - "frstdmmd", - "frsthtnd", - "frsthtnmd" - ] - }, { - "title": "HIV History", - "fields": [ - "cd4nadir", - "fcd4lowd", - "fposdate", - "frstaidd", - "lastafrd", - "lastcond", - "lastcontact", - "lcd4higd", - "lnegdate", - "status" - ] + "patientIds": { + "filter": false, + "export": true }, - { - "title": "Drug Resistance", - "fields": [ - "amikacin_res_phenotype", - "capreomycin_res_phenotype", - "isoniazid_res_phenotype", - "kanamycin_res_phenotype", - "ofloxacin_res_phenotype", - "pyrazinamide_res_phenotype", - "rifampicin_res_phenotype", - "rifampin_res_phenotype", - "streptomycin_res_phenotype" - ] + "survivalAnalysis": { + "result": { + "pval": false, + "risktable": true, + "survival": true + } }, - { - "title": "Experiment", - "fields": [ - "experimental_strategies", - "virus_type", - "virus_subtype", - "analyte_type", - "biospecimen_anatomic_site", - "cell_line", - "sample_type", - "composition", - "strain" + "guppyConfig": { + "dataType": "subject", + "nodeCountTitle": "Subjects", + "fieldMapping": [ + { + "field": "data_contributor_id", + "name": "Data Contributor", + "tooltip": "Survival analysis is not allowed for this variable. Filtering by this variable will disable survival analysis." + }, + { + "field": "studies.study_id", + "name": "Study Id", + "tooltip": "Survival analysis is not allowed for this variable. Filtering by this variable will disable survival analysis." + }, + { + "field": "studies.treatment_arm", + "name": "Treatment Arm", + "tooltip": "Survival analysis is not allowed for this variable. Filtering by this variable will disable survival analysis." + }, + { + "field": "year_at_disease_phase", + "name": "Year at Initial Diagnosis" + }, + { + "field": "survival_characteristics.lkss", + "name": "Last Known Survival Status (LKSS)" + }, + { + "field": "survival_characteristics.lkss_obfuscated", + "name": "Last Known Survival Status (LKSS)" + }, + { + "field": "medical_histories.medical_history", + "name": "Medical History" + }, + { + "field": "medical_histories.medical_history_status", + "name": "Medical History Status" + }, + { + "field": "external_references.external_resource_name", + "name": "External Resource Name" + }, + { + "field": "biospecimen_status", + "name": "Biospecimen" + }, + { + "field": "histologies.histology", + "name": "Histology" + }, + { + "field": "histologies.histology_grade", + "name": "Histology Grade" + }, + { + "field": "histologies.histology_inpc", + "name": "INPC Classification" + }, + { + "field": "tumor_assessments.age_at_tumor_assessment", + "name": "Age at Tumor Assessment" + }, + { + "field": "tumor_assessments.tumor_classification", + "name": "Tumor Classification" + }, + { + "field": "tumor_assessments.tumor_site", + "name": "Tumor Site" + }, + { + "field": "tumor_assessments.tumor_state", + "name": "Tumor State" + }, + { + "field": "tumor_assessments.longest_diam_dim1", + "name": "Longest Diameter Dimension 1" + }, + { + "field": "tumor_assessments.depth", + "name": "Tumor Depth" + }, + { + "field": "tumor_assessments.tumor_size", + "name": "Tumor Size" + }, + { + "field": "tumor_assessments.invasiveness", + "name": "Invasiveness" + }, + { + "field": "tumor_assessments.nodal_clinical", + "name": "Nodal Clinical" + }, + { + "field": "tumor_assessments.nodal_pathology", + "name": "Nodal Pathology" + }, + { + "field": "tumor_assessments.parameningeal_extension", + "name": "Parameningeal Extension" + }, + { + "field": "tumor_assessments.necrosis", + "name": "Necrosis" + }, + { + "field": "tumor_assessments.necrosis_pct", + "name": "Necrosis PCT" + }, + { + "field": "tumor_assessments.tumor_laterality", + "name": "Tumor Laterality" + }, + { + "field": "stagings.irs_group", + "name": "IRS Group" + }, + { + "field": "stagings.tnm_finding", + "name": "TNM Finding" + }, + { + "field": "stagings.stage_system", + "name": "Stage System" + }, + { + "field": "stagings.stage", + "name": "Stage" + }, + { + "field": "stagings.AB", + "name": "Ann Arbor AB" + }, + { + "field": "stagings.E", + "name": "Ann Arbor E" + }, + { + "field": "stagings.S", + "name": "Ann Arbor S" + }, + { + "field": "disease_characteristics.mki", + "name": "MKI" + }, + { + "field": "disease_characteristics.bulk_disease", + "name": "Bulky Disease" + }, + { + "field": "disease_characteristics.BULK_MED_MASS", + "name": "Bulky Mediastinal Mass" + }, + { + "field": "disease_characteristics.bulky_nodal_aggregate", + "name": "Bulky Nodal Aggregate" + }, + { + "field": "disease_characteristics.who_aml", + "name": "WHO AML" + }, + { + "field": "disease_characteristics.CNS_disease_status", + "name": "CNS Disease Status" + }, + { + "field": "disease_characteristics.MLDS", + "name": "MLDS" + }, + { + "field": "molecular_analysis.anaplasia", + "name": "Anaplasia" + }, + { + "field": "molecular_analysis.anaplasia_extent", + "name": "Anaplasia Extent" + }, + { + "field": "molecular_analysis.molecular_abnormality", + "name": "Molecular Abnormality" + }, + { + "field": "molecular_analysis.molecular_abnormality_result", + "name": "Molecular Abnormality Result" + }, + { + "field": "molecular_analysis.gene1", + "name": "Gene 1" + }, + { + "field": "molecular_analysis.gene2", + "name": "Gene 2" + }, + { + "field": "molecular_analysis.dna_index", + "name": "DNA Index" + }, + { + "field": "molecular_analysis.age_at_molecular_analysis", + "name": "Age at Molecular Analysis" + }, + { + "field": "molecular_analysis.mitoses", + "name": "Mitoses" + }, + { + "field": "molecular_analysis.cytodifferentiation", + "name": "Cytodifferentiation" + }, + { + "field": "biopsy_surgical_procedures.tumor_classification", + "name": "Tumor Classification" + }, + { + "field": "biopsy_surgical_procedures.procedure_type", + "name": "Procedure Type" + }, + { + "field": "biopsy_surgical_procedures.procedure_site", + "name": "Procedure Site" + }, + { + "field": "biopsy_surgical_procedures.margins", + "name": "Margins" + }, + { + "field": "radiation_therapies.tumor_classification", + "name": "Tumor Classification" + }, + { + "field": "radiation_therapies.age_at_rt_start", + "name": "Age at Radiation Therapy" + }, + { + "field": "radiation_therapies.rt_site", + "name": "Radiation Site" + }, + { + "field": "radiation_therapies.energy_type", + "name": "Energy Type" + }, + { + "field": "radiation_therapies.rt_dose", + "name": "Radiation Dose" + }, + { + "field": "radiation_therapies.rt_unit", + "name": "Radiation Unit" + }, + { + "field": "subject_responses.age_at_response", + "name": "Age at Response" + }, + { + "field": "subject_responses.tx_prior_response", + "name": "Treatment Prior Response" + }, + { + "field": "subject_responses.response", + "name": "Response" + }, + { + "field": "subject_responses.interim_response", + "name": "Interim Response" + }, + { + "field": "subject_responses.response_method", + "name": "Response Method" + }, + { + "field": "minimal_residual_diseases.mrd_result", + "name": "MRD Result" + }, + { + "field": "minimal_residual_diseases.mrd_result_numeric", + "name": "MRD Result Numeric" + }, + { + "field": "subject_responses.necrosis", + "name": "Necrosis" + }, + { + "field": "secondary_malignant_neoplasm.age_at_smn", + "name": "Age at SMN" + }, + { + "field": "secondary_malignant_neoplasm.smn_site", + "name": "SMN Site" + }, + { + "field": "secondary_malignant_neoplasm.smn_type", + "name": "SMN Type" + }, + { + "field": "secondary_malignant_neoplasm.smn_yn", + "name": "Secondary Malignancy" + }, + { + "field": "secondary_malignant_neoplasm.smn_morph_icdo", + "name": "ICD-O Morphology" + }, + { + "field": "imagings.imaging_method", + "name": "Imaging Method" + }, + { + "field": "imagings.imaging_result", + "name": "Imaging Result" + }, + { + "field": "labs.lab_result_numeric", + "name": "Numeric Lab Result" + }, + { + "field": "labs.lab_result_unit", + "name": "Lab Result Unit" + }, + { + "field": "labs.lab_result", + "name": "Lab Result" + }, + { + "field": "labs.lab_test", + "name": "Lab Test" + }, + { + "field": "stem_cell_transplants.sct_type", + "name": "SCT Type" + }, + { + "field": "stem_cell_transplants.sct_source", + "name": "SCT Source" + }, + { + "field": "stem_cell_transplants.sct_donor_relationship", + "name": "SCT Donor Relationship" + } ] - }] + }, + "dataRequests": { + "enabled": false + }, + "getAccessButtonLink": "https://docs.pedscommons.org/PCDCProjectRequestForm/" } - } -} + ] +} \ No newline at end of file diff --git a/pcdc-default-values.yaml b/pcdc-default-values.yaml index 3c5fc10a1..d76e4dca4 100644 --- a/pcdc-default-values.yaml +++ b/pcdc-default-values.yaml @@ -2,7 +2,7 @@ global: dev: true hostname: localhost portalApp: pcdc - dictionaryUrl: https://pcdc-dev-dictionaries.s3.amazonaws.com/pcdc-schema-dev-20230912.json + dictionaryUrl: https://pcdc-staging-dictionaries.s3.amazonaws.com/pcdc-schema-staging-20250114.json authz_entity_name: "subject" tls: cert: | @@ -73,6 +73,7 @@ fence: #AMANUENSIS_PUBLIC_KEY_PATH: '/fence/keys/key/jwt_public_key.pem' MOCK_GOOGLE_AUTH: true mock_default_user: 'test@example.com' + #LOGIN_REDIRECT_WHITELIST: ["https://localhost:9443/", "http://localhost:9443/"] image: @@ -318,10 +319,11 @@ peregrine: tag: "1.3.10" portal: + #enabled: false image: - repository: "windmill" - tag: "test" - pullPolicy: Never + repository: "quay.io/pcdc/windmill" + tag: "1.34.0" + pullPolicy: IfNotPresent resources: requests: cpu: 1.0 @@ -339,6 +341,7 @@ sheepdog: tag: "1.5.10" sower: + enabled: false image: repository: quay.io/cdis/sower tag: "2025.01" diff --git a/pcdc_data/generate_data.sh b/pcdc_data/generate_data.sh index d48f85752..2d21aeba7 100755 --- a/pcdc_data/generate_data.sh +++ b/pcdc_data/generate_data.sh @@ -1,9 +1,7 @@ #!/bin/bash # Define the file path -generate_file="generate.sh" - -cd ./gen3_scripts/gen3_load +generate_file="./gen3_etl/graph/generate.sh" # Check if the file exists if [ ! -f "$generate_file" ]; then @@ -11,13 +9,15 @@ if [ ! -f "$generate_file" ]; then exit 1 fi -chmod +x ./generate.sh +chmod +x "$generate_file" # Use sed to replace the line sed -i '' 's/GEN3_SCRIPTS_REPO_BRANCH="origin\/pcdc_dev"/GEN3_SCRIPTS_REPO_BRANCH="origin\/pyyaml-patch"/' "$generate_file" echo "data-simulator branch changed to pyyaml-patch change when PR is completed" +# Run the generate_file script +cd ./gen3_etl/graph +mkdir ./fake_data ./generate.sh - -cd ../.. \ No newline at end of file +cd ../../ \ No newline at end of file diff --git a/pcdc_data/load_elasticsearch.sh b/pcdc_data/load_elasticsearch.sh index 9a8f56939..ffddacb15 100755 --- a/pcdc_data/load_elasticsearch.sh +++ b/pcdc_data/load_elasticsearch.sh @@ -1,10 +1,10 @@ #!/bin/bash -pcdc clear_elasticsearch +source ../.env -cd ./gen3_scripts/es_etl_patch +pcdc clear_elasticsearch -rm -rf env/ +cd ./gen3_etl/elasticsearch # Check if the 'env' directory exists if [ ! -d "env" ]; then @@ -15,13 +15,10 @@ else echo "Virtual environment 'env' already exists." fi +# Activate the virtual environment source env/bin/activate -pip install pyyaml==5.3.1 -pip install python-dotenv -pip install gen3==4.5.0 - -pip install -r requirements.txt +poetry install curr_dir=$(pwd) auth_file_path="$curr_dir/env/lib/python3.9/site-packages/gen3/auth.py" @@ -48,13 +45,12 @@ sed -i "" -E 's/(requests\..*)\)/\1, verify=False)/' "$submission_file_path" echo "submission file edited successfully." +mkdir -p files + cd etl python etl.py et -#update the env variable to the mapping -latest_file=$(ls -1 ../files/nested_mapping_*.json | sort -r | head -n 1) -cp $latest_file ../files/nested_mapping.json python etl.py l diff --git a/pcdc_data/load_gen3_scripts.sh b/pcdc_data/load_gen3_etl.sh similarity index 50% rename from pcdc_data/load_gen3_scripts.sh rename to pcdc_data/load_gen3_etl.sh index 9485accc4..adbeeb443 100755 --- a/pcdc_data/load_gen3_scripts.sh +++ b/pcdc_data/load_gen3_etl.sh @@ -1,23 +1,24 @@ -GEN3_SCRIPTS_REPO="https://github.com/chicagopcdc/gen3_scripts.git" -GEN3_SCRIPTS_REPO_BRANCH="origin/gen3-helm" - +GEN3_SCRIPTS_REPO="https://github.com/chicagopcdc/gen3_etl.git" +GEN3_SCRIPTS_REPO_BRANCH="origin/update-requirments" +ENV_FILE="../.env" +CREDENTIALS_FILE="../credentials.json" #------------------------------------------------------ # Clean up #------------------------------------------------------ -rm -rf ./gen3_scripts +rm -rf ./gen3_etl echo "removed old folder" #------------------------------------------------------ # Clone or Update chicagopcdc/data-simulator repo #------------------------------------------------------ -echo "Clone or Update chicagopcdc/gen3-scripts repo from github" +echo "Clone or Update chicagopcdc/gen3_etl repo from github" # Does the repo exist? If not, go get it! -if [ ! -d "./gen3_scripts" ]; then +if [ ! -d "./gen3_etl" ]; then git clone $GEN3_SCRIPTS_REPO - cd ./gen3_scripts + cd ./gen3_etl git checkout -t $GEN3_SCRIPTS_REPO_BRANCH git pull @@ -29,10 +30,5 @@ fi -#load in files to gen3_load -cp ../.env ./gen3_scripts/gen3_load -cp ../credentials.json ./gen3_scripts/gen3_load - -#load in files to es_etl_patch -cp ../.env ./gen3_scripts/es_etl_patch -cp ../credentials.json ./gen3_scripts/es_etl_patch \ No newline at end of file +#load in files to gen3_load and es_etl_patch +cp $ENV_FILE ./gen3_etl/elasticsearch && cp $ENV_FILE ./gen3_etl/graph && cp $CREDENTIALS_FILE ./gen3_etl/elasticsearch && cp $CREDENTIALS_FILE ./gen3_etl/graph diff --git a/pcdc_data/load_graph_db.sh b/pcdc_data/load_graph_db.sh index 8b3f7663f..3e3bb58a4 100755 --- a/pcdc_data/load_graph_db.sh +++ b/pcdc_data/load_graph_db.sh @@ -1,8 +1,8 @@ #!/bin/bash -cd ./gen3_scripts/gen3_load +source ../.env -rm -rf env/ +cd ./gen3_etl/graph # Check if the 'env' directory exists if [ ! -d "env" ]; then @@ -13,9 +13,10 @@ else echo "Virtual environment 'env' already exists." fi +# Activate the virtual environment source env/bin/activate -pip install -r requirements.txt +poetry install curr_dir=$(pwd) auth_file_path="$curr_dir/env/lib/python3.9/site-packages/gen3/auth.py" @@ -45,4 +46,6 @@ echo "submission file edited successfully." cd ./operations -python etl.py load \ No newline at end of file +python etl.py load + +deactivate \ No newline at end of file diff --git a/pcdc_data/run_all.sh b/pcdc_data/run_all.sh index db3dbd959..c9ebeb233 100755 --- a/pcdc_data/run_all.sh +++ b/pcdc_data/run_all.sh @@ -18,7 +18,6 @@ if [ ! -f "$ENV_FILE" ]; then echo "LOCAL_FILE_PATH='../fake_data/data-simulator'" >> "$ENV_FILE" echo "FILE_TYPE='json'" >> "$ENV_FILE" echo "TYPES=[\"program\", \"adverse_event\", \"biopsy_surgical_procedure\", \"biospecimen\", \"cellular_immunotherapy\", \"concomitant_medication\", \"core_metadata_collection\", \"cytology\", \"disease_characteristic\", \"external_reference\", \"family_medical_history\", \"function_test\", \"growing_teratoma_syndrome\", \"histology\", \"imaging\", \"immunohistochemistry\", \"lab\", \"late_effect\", \"lesion_characteristic\", \"medical_history\", \"minimal_residual_disease\", \"molecular_analysis\", \"myeloid_sarcoma_involvement\", \"non_protocol_therapy\", \"off_protocol_therapy_study\", \"patient_reported_outcomes_metadata\", \"person\", \"project\", \"protocol_treatment_modification\", \"radiation_therapy\", \"secondary_malignant_neoplasm\", \"staging\", \"stem_cell_transplant\", \"study\", \"subject\", \"subject_response\", \"survival_characteristic\", \"timing\", \"total_dose\", \"transfusion_medicine_procedure\", \"tumor_assessment\", \"vital\"]" >> "$ENV_FILE" - echo "REQUESTS_CA_BUNDLE='/Users/pmurdoch/Documents/PCDC/gen3-helm/CA/ca.pem'" >> "$ENV_FILE" echo "PROJECT_LIST=[\"pcdc-$DATE\"]" >> "$ENV_FILE" echo "CREDENTIALS='../credentials.json'" >> "$ENV_FILE" echo "LOCAL_ES_FILE_PATH='../files/pcdc_data.json'" >> "$ENV_FILE" @@ -28,7 +27,7 @@ fi chmod +x "$(dirname "$0")"/*.sh -./load_gen3_scripts.sh +./load_gen3_etl.sh ./generate_data.sh From c38bb5790a85a2e5f5c84daa77255bcf1b7e37b7 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Fri, 21 Mar 2025 13:44:04 -0700 Subject: [PATCH 029/126] fixes to make gearbox start --- gearbox-default-values.yaml | 1125 ++++------------- .../templates/amanuensis-secret.yaml | 3 + .../templates/amanuensis-secrets.yaml | 25 +- helm/common/templates/_db_setup_job.tpl | 30 +- helm/common/templates/_jwt_key_pairs.tpl | 11 +- helm/fence/values.yaml | 1 + helm/portal/templates/deployment.yaml | 2 +- helm/portal/values.yaml | 259 +--- pcdc-default-values.yaml | 2 +- tools/connect_to_db.sh | 5 +- tools/gearbox | 2 +- tools/pcdc | 2 +- 12 files changed, 276 insertions(+), 1191 deletions(-) diff --git a/gearbox-default-values.yaml b/gearbox-default-values.yaml index 3b603de9a..ba23210bf 100644 --- a/gearbox-default-values.yaml +++ b/gearbox-default-values.yaml @@ -67,460 +67,155 @@ fence: AMANUENSIS_PUBLIC_KEY_PATH: '/fence/keys/key/jwt_public_key.pem' MOCK_GOOGLE_AUTH: true mock_default_user: 'test@example.com' - + volumes: + - name: old-config-volume + secret: + secretName: "fence-secret" + - name: json-secret-volume + secret: + secretName: "fence-json-secret" + optional: true + - name: creds-volume + secret: + secretName: "fence-creds" + - name: config-helper + configMap: + name: config-helper + optional: true + - name: logo-volume + configMap: + name: "logo-config" + - name: config-volume + secret: + secretName: "fence-config" + - name: fence-google-app-creds-secret-volume + secret: + secretName: "fence-google-app-creds-secret" + - name: fence-google-storage-creds-secret-volume + secret: + secretName: "fence-google-storage-creds-secret" + - name: fence-jwt-keys + secret: + secretName: "fence-jwt-keys" + - name: privacy-policy + configMap: + name: "privacy-policy" + - name: yaml-merge + configMap: + name: "fence-yaml-merge" + optional: true + + volumeMounts: + - name: "old-config-volume" + readOnly: true + mountPath: "/var/www/fence/local_settings.py" + subPath: local_settings.py + - name: "json-secret-volume" + readOnly: true + mountPath: "/var/www/fence/fence_credentials.json" + subPath: fence_credentials.json + - name: "creds-volume" + readOnly: true + mountPath: "/var/www/fence/creds.json" + subPath: creds.json + - name: "config-helper" + readOnly: true + mountPath: "/var/www/fence/config_helper.py" + subPath: config_helper.py + - name: "logo-volume" + readOnly: true + mountPath: "/fence/fence/static/img/logo.svg" + subPath: "logo.svg" + - name: "privacy-policy" + readOnly: true + mountPath: "/fence/fence/static/privacy_policy.md" + subPath: "privacy_policy.md" + - name: "config-volume" + readOnly: true + mountPath: "/var/www/fence/fence-config.yaml" + subPath: fence-config.yaml + - name: "yaml-merge" + readOnly: true + mountPath: "/var/www/fence/yaml_merge.py" + subPath: yaml_merge.py + - name: "fence-google-app-creds-secret-volume" + readOnly: true + mountPath: "/var/www/fence/fence_google_app_creds_secret.json" + subPath: fence_google_app_creds_secret.json + - name: "fence-google-storage-creds-secret-volume" + readOnly: true + mountPath: "/var/www/fence/fence_google_storage_creds_secret.json" + subPath: fence_google_storage_creds_secret.json + - name: "fence-jwt-keys" + readOnly: true + mountPath: "/fence/keys/key/jwt_private_key.pem" + subPath: "jwt_private_key.pem" image: repository: quay.io/pcdc/fence tag: "helm-test" pullPolicy: Always USER_YAML: | - cloud_providers: {} - groups: {} authz: - # policies automatically given to anyone, even if they haven't authenticated - anonymous_policies: ['open_data_reader', 'full_open_access'] - - # policies automatically given to authenticated users (in addition to their other - # policies) - all_users_policies: ['open_data_reader', 'authn_open_access'] - user_project_to_resource: - QA: /programs/QA - DEV: /programs/DEV - test: /programs/QA/projects/test - jenkins: /programs/jnkns/projects/jenkins - jenkins2: /programs/jnkns/projects/jenkins2 - jnkns: /programs/jnkns + resources: + - name: 'gearbox_gateway' + - name: 'data_file' + description: 'data files, stored in S3' + - name: 'sower' + description: 'sower resource' + - name: workspace + description: jupyter notebooks + - name: analysis + description: analysis tool service + - name: portal + description: data portal service + - name: privacy + description: User privacy policy + - name: 'services' + subresources: + - name: 'sheepdog' + subresources: + - name: 'submission' + subresources: + - name: 'program' + - name: 'project' + - name: 'amanuensis' + - name: 'fence' + subresources: + - name: 'admin' + - name: programs + subresources: + - name: pcdc policies: - # GEARBOX - id: gearbox_admin resource_paths: ['/gearbox_gateway'] role_ids: ['gearbox_user'] - # General Access - - id: 'workspace' - description: 'be able to use workspace' - resource_paths: ['/workspace'] - role_ids: ['workspace_user'] - - id: 'dashboard' - description: 'be able to use the commons dashboard' - resource_paths: ['/dashboard'] - role_ids: ['dashboard_user'] - - id: 'prometheus' - description: 'be able to use prometheus' - resource_paths: ['/prometheus'] - role_ids: ['prometheus_user'] - - id: 'ttyadmin' - description: 'be able to use the admin tty' - resource_paths: ['/ttyadmin'] - role_ids: ['ttyadmin_user'] - - id: 'mds_admin' - description: 'be able to use metadata service' - resource_paths: ['/mds_gateway'] - role_ids: ['mds_user'] - id: 'data_upload' description: 'upload raw data files to S3' - role_ids: ['file_uploader'] - resource_paths: ['/data_file'] - - description: be able to use sower job - id: sower - resource_paths: [/sower] - role_ids: [sower_user] - - id: 'mariner_admin' - description: 'full access to mariner API' - resource_paths: ['/mariner'] - role_ids: ['mariner_admin'] - - id: audit_reader - role_ids: - - audit_reader - resource_paths: - - /services/audit - - id: audit_login_reader - role_ids: - - audit_reader - resource_paths: - - /services/audit/login - - id: audit_presigned_url_reader - role_ids: - - audit_reader - resource_paths: - - /services/audit/presigned_url - - id: requestor_admin - role_ids: - - requestor_admin - resource_paths: - - /programs - - id: requestor_reader - role_ids: - - requestor_reader - resource_paths: - - /programs - - id: requestor_creator - role_ids: - - requestor_creator - resource_paths: - - /programs - - id: requestor_updater - role_ids: - - requestor_updater - resource_paths: - - /programs - - id: requestor_deleter - role_ids: - - requestor_deleter - resource_paths: - - /programs - # Data Access - - # All programs policy - - id: 'all_programs_reader' - description: '' - role_ids: - - 'reader' - - 'storage_reader' - resource_paths: ['/programs'] - - # # example if need access to write to storage - # - id: 'programs.jnkns-storage_writer' - # description: '' - # role_ids: - # - 'storage_writer' - # resource_paths: ['/programs/jnkns'] - - - id: 'programs.jnkns-admin' - description: '' - role_ids: - - 'creator' - - 'reader' - - 'updater' - - 'deleter' - - 'storage_reader' - resource_paths: - - '/programs/jnkns' - - '/gen3/programs/jnkns' - - - id: 'programs.jnkns-viewer' - description: '' - role_ids: - - 'reader' - - 'storage_reader' - resource_paths: - - '/programs/jnkns' - - '/gen3/programs/jnkns' - - - - id: 'programs.QA-admin' - description: '' - role_ids: - - 'creator' - - 'reader' - - 'updater' - - 'deleter' - - 'storage_reader' - resource_paths: - - '/programs/QA' - - '/gen3/programs/QA' - - - id: 'programs.QA-admin-no-storage' - description: '' - role_ids: - - 'creator' - - 'reader' - - 'updater' - - 'deleter' - resource_paths: - - '/programs/QA' - - '/gen3/programs/QA' - - - id: 'programs.QA-viewer' - description: '' - role_ids: - - 'reader' - - 'storage_reader' - resource_paths: - - '/programs/QA' - - '/gen3/programs/QA' - - - id: 'programs.DEV-admin' - description: '' - role_ids: - - 'creator' - - 'reader' - - 'updater' - - 'deleter' - - 'storage_reader' - - 'storage_writer' - resource_paths: - - '/programs/DEV' - - '/gen3/programs/DEV' - - - id: 'programs.DEV-storage_writer' - description: '' - role_ids: - - 'storage_writer' - resource_paths: ['/programs/DEV'] - - - id: 'programs.DEV-viewer' - description: '' - role_ids: - - 'reader' - - 'storage_reader' - resource_paths: - - '/programs/DEV' - - '/gen3/programs/DEV' - - - id: 'programs.test-admin' - description: '' - role_ids: - - 'creator' - - 'reader' - - 'updater' - - 'deleter' - - 'storage_reader' - resource_paths: - - '/programs/test' - - '/gen3/programs/test' - - - id: 'programs.test-viewer' - description: '' - role_ids: - - 'reader' - - 'storage_reader' - resource_paths: - - '/programs/test' - - '/gen3/programs/test' - - - id: 'abc-admin' - description: '' - role_ids: - - 'creator' - - 'reader' - - 'updater' - - 'deleter' - - 'storage_reader' - resource_paths: - - '/abc' - - - id: 'gen3-admin' - description: '' - role_ids: - - 'creator' - - 'reader' - - 'updater' - - 'deleter' - - 'storage_reader' - resource_paths: - - '/gen3' - - - id: 'gen3-hmb-researcher' - description: '' - role_ids: - - 'creator' - - 'reader' - - 'updater' - - 'deleter' - - 'storage_reader' - resource_paths: - - '/consents/NRES' - - '/consents/GRU' - - '/consents/GRU_CC' - - '/consents/HMB' - - '/gen3' - - - id: 'abc.programs.test_program.projects.test_project1-viewer' - description: '' - role_ids: - - 'reader' - - 'storage_reader' - resource_paths: - - '/abc/programs/test_program/projects/test_project1' - - - id: 'abc.programs.test_program.projects.test_project2-viewer' - description: '' - role_ids: - - 'reader' - - 'storage_reader' - resource_paths: - - '/abc/programs/test_program/projects/test_project2' - - - id: 'abc.programs.test_program2.projects.test_project3-viewer' - description: '' - role_ids: - - 'reader' - - 'storage_reader' - resource_paths: - - '/abc/programs/test_program2/projects/test_project3' - - # Open data policies - - id: 'authn_open_access' - resource_paths: ['/programs/open/projects/authnRequired'] - description: '' - role_ids: - - 'reader' - - 'storage_reader' - - id: 'full_open_access' - resource_paths: ['/programs/open/projects/1000G'] - description: '' - role_ids: - - 'reader' - - 'storage_reader' - - id: 'open_data_reader' - description: '' - role_ids: - - 'reader' - - 'storage_reader' - resource_paths: ['/open'] - - id: 'open_data_admin' - description: '' - role_ids: - - 'creator' - - 'reader' - - 'updater' - - 'deleter' - - 'storage_writer' - - 'storage_reader' - resource_paths: ['/open'] - - # Consent Code Policies - - id: 'not-for-profit-researcher' - description: '' - role_ids: - - 'admin' - resource_paths: - - '/consents/NPU' - - - id: 'publication-required-researcher' - description: '' - role_ids: - - 'admin' - resource_paths: - - '/consents/PUB' - - - id: 'gru-researcher' - description: '' - role_ids: - - 'admin' - resource_paths: - - '/consents/NRES' - - '/consents/GRU' - - - id: 'gru-cc-researcher' - description: '' - role_ids: - - 'admin' - resource_paths: - - '/consents/NRES' - - '/consents/GRU' - - '/consents/GRU_CC' - - - id: 'hmb-researcher' - description: '' - role_ids: - - 'admin' - resource_paths: - - '/consents/NRES' - - '/consents/GRU' - - '/consents/GRU_CC' - - '/consents/HMB' - - - id: 'poa-researcher' - description: '' - role_ids: - - 'admin' - resource_paths: - - '/consents/NRES' - - '/consents/GRU' - - '/consents/GRU_CC' - - '/consents/POA' - - - id: 'ds-lung-researcher' - description: '' - role_ids: - - 'admin' - resource_paths: - - '/consents/NRES' - - '/consents/GRU' - - '/consents/GRU_CC' - - '/consents/HMB' - - '/consents/DS_LungDisease' - - - id: 'ds-chronic-obstructive-pulmonary-disease-researcher' - description: '' - role_ids: - - 'admin' - resource_paths: - - '/consents/NRES' - - '/consents/GRU' - - '/consents/GRU_CC' - - '/consents/HMB' - - '/consents/DS_ChronicObstructivePulmonaryDisease' - - - id: 'services.sheepdog-admin' - description: 'CRUD access to programs and projects' - role_ids: - - 'sheepdog_admin' - resource_paths: - - '/services/sheepdog/submission/program' - - '/services/sheepdog/submission/project' - - # indexd - - id: 'indexd_admin' - description: 'full access to indexd API' - role_ids: - - 'indexd_admin' - resource_paths: - - '/programs' - - '/services/indexd/admin' - # # TODO resource path '/' is not valid right now in arborist, trying to decide - # # how to handle all resources - # - id: 'indexd_admin' - # description: '' - # role_ids: - # - 'indexd_record_creator' - # - 'indexd_record_reader' - # - 'indexd_record_updater' - # - 'indexd_delete_record' - # - 'indexd_storage_reader' - # - 'indexd_storage_writer' - # resource_paths: ['/'] - # - id: 'indexd_record_reader' - # description: '' - # role_ids: - # - 'indexd_record_reader' - # resource_paths: ['/'] - # - id: 'indexd_record_editor' - # description: '' - # role_ids: - # - 'indexd_record_creator' - # - 'indexd_record_reader' - # - 'indexd_record_updater' - # - 'indexd_delete_record' - # resource_paths: ['/'] - # - id: 'indexd_storage_reader' - # description: '' - # role_ids: - # - 'indexd_storage_reader' - # resource_paths: ['/'] - # - id: 'indexd_storage_editor' - # description: '' - # role_ids: - # - 'indexd_storage_reader' - # - 'indexd_storage_writer' - # resource_paths: ['/'] - - # argo - - id: argo - description: be able to use argo - resource_paths: [/argo] - role_ids: [argo_user] - - #PCDC specific + resource_paths: + - /data_file + role_ids: + - file_uploader - id: 'services.amanuensis-admin' description: 'admin access to amanuensis' role_ids: - 'amanuensis_admin' resource_paths: - '/services/amanuensis' + - id: 'services.fence-admin' + description: 'admin access to fence' + role_ids: + - 'fence_admin' + resource_paths: + - '/services/fence/admin' + - id: workspace + description: be able to use workspace + resource_paths: + - /workspace + role_ids: + - workspace_user - id: analysis description: be able to use analysis tool service resource_paths: @@ -533,6 +228,29 @@ fence: - /privacy role_ids: - reader + - id: indexd_admin + description: full access to indexd API + role_ids: + - indexd_admin + resource_paths: + - /programs + - description: be able to use sower job + id: sower + resource_paths: [/sower] + role_ids: [sower_user] + - id: 'services.sheepdog-admin' + description: 'CRUD access to programs and projects' + role_ids: + - 'sheepdog_admin' + resource_paths: + - '/services/sheepdog/submission/program' + - '/services/sheepdog/submission/project' + - id: all_programs_reader + role_ids: + - reader + - storage_reader + resource_paths: + - /programs - id: login_no_access role_ids: - reader @@ -545,541 +263,122 @@ fence: resource_paths: - /programs - /programs/pcdc - resources: - #GEARBOX - - name: 'portal' - description: 'data portal service' - - name: 'gearbox_gateway' - # General Access - - name: 'data_file' - description: 'data files, stored in S3' - - name: 'dashboard' - description: 'commons /dashboard' - - name: 'mds_gateway' - description: 'commons /mds-admin' - - name: 'prometheus' - description: 'commons /prometheus and /grafana' - - name: 'ttyadmin' - description: 'commons /ttyadmin' - - name: 'workspace' - description: jupyter notebooks - - name: "sower" - description: 'sower resource' - - name: 'mariner' - description: 'workflow execution service' - - name: argo - #PCDC - - name: analysis - description: analysis tool service - - name: portal - description: data portal service - - name: privacy - description: User privacy policy - # OLD Data - - name: 'programs' - subresources: - #PCDC - - name: pcdc - - name: 'open' - subresources: - - name: 'projects' - subresources: - - name: '1000G' - - name: 'authnRequired' - - name: 'QA' - subresources: - - name: 'projects' - subresources: - - name: 'test' - - name: 'DEV' - subresources: - - name: 'projects' - subresources: - - name: 'test' - - name: 'jnkns' - subresources: - - name: 'projects' - subresources: - - name: 'jenkins' - - name: 'jenkins2' - - name: 'test' - subresources: - - name: 'projects' - subresources: - - name: 'test' - - # NEW Data WITH PREFIX - - name: 'gen3' - subresources: - - name: 'programs' - subresources: - - name: 'QA' - subresources: - - name: 'projects' - subresources: - - name: 'test' - - name: 'DEV' - subresources: - - name: 'projects' - subresources: - - name: 'test' - - name: 'jnkns' - subresources: - - name: 'projects' - subresources: - - name: 'jenkins' - - name: 'jenkins2' - - name: 'test' - subresources: - - name: 'projects' - subresources: - - name: 'test' - - # consents obtained from DUO and NIH - # https://github.com/EBISPOT/DUO - # https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4721915/ - - name: 'consents' - subresources: - - name: 'NRES' - description: 'no restriction' - - name: 'GRU' - description: 'general research use' - - name: 'GRU_CC' - description: 'general research use and clinical care' - - name: 'HMB' - description: 'health/medical/biomedical research' - - name: 'POA' - description: 'population origins or ancestry research' - - name: 'NMDS' - description: 'no general methods research' - - name: 'NPU' - description: 'not-for-profit use only' - - name: 'PUB' - description: 'publication required' - - name: 'DS_LungDisease' - description: 'disease-specific research for lung disease' - - name: 'DS_ChronicObstructivePulmonaryDisease' - description: 'disease-specific research for chronic obstructive pulmonary disease' - - - name: 'abc' - subresources: - - name: 'programs' - subresources: - - name: 'foo' - subresources: - - name: 'projects' - subresources: - - name: 'bar' - - name: 'test_program' - subresources: - - name: 'projects' - subresources: - - name: 'test_project1' - - name: 'test_project2' - - name: 'test_program2' - subresources: - - name: 'projects' - subresources: - - name: 'test_project3' - - - # "Sheepdog admin" resources - - name: 'services' - subresources: - - name: 'sheepdog' - subresources: - - name: 'submission' - subresources: - - name: 'program' - - name: 'project' - #PCDC - - name: 'amanuensis' - - name: 'indexd' - subresources: - - name: 'admin' - - name: 'bundles' - - name: audit - subresources: - - name: presigned_url - - name: login - - - name: 'open' - - # action/methods: - # create, read, update, delete, read-storage, write-storage, - # file_upload, access + + roles: - #GEARBOX - id: 'gearbox_user' permissions: - id: 'gearbox_access' action: - service: '*' - method: '*' - # General Access + service: '*' + method: '*' - id: 'file_uploader' description: 'can upload data files' permissions: - id: 'file_upload' action: - service: '*' - method: 'file_upload' - - id: 'workspace_user' - permissions: - - id: 'workspace_access' - action: - service: 'jupyterhub' - method: 'access' - - id: 'dashboard_user' - permissions: - - id: 'dashboard_access' - action: - service: 'dashboard' - method: 'access' - - id: 'mds_user' + service: 'fence' + method: 'file_upload' + - id: 'amanuensis_admin' + description: 'can do admin work on project/data request' permissions: - - id: 'mds_access' - action: - service: 'mds_gateway' - method: 'access' - - id: 'prometheus_user' + - id: 'amanuensis_admin_action' + action: + service: 'amanuensis' + method: '*' + - id: 'fence_admin' + description: 'can use the admin endpoint in Fence' permissions: - - id: 'prometheus_access' - action: - service: 'prometheus' - method: 'access' - - id: 'ttyadmin_user' + - id: 'fence_admin_permission' + action: + service: 'fence' + method: '*' + - id: workspace_user permissions: - - id: 'ttyadmin_access' - action: - service: 'ttyadmin' - method: 'access' - - id: 'sower_user' + - action: {method: access, service: jupyterhub} + id: workspace_access + - id: sower_user permissions: - - id: 'sower_access' - action: - service: 'job' - method: 'access' - - id: 'mariner_admin' + - action: {method: access, service: job} + id: sower_access + - id: analysis_user permissions: - - id: 'mariner_access' - action: - service: 'mariner' - method: 'access' - - id: 'audit_reader' + - action: {method: access, service: analysis} + id: analysis_access + # Sheepdog admin role + - id: 'sheepdog_admin' + description: 'sheepdog admin role for program project crud' permissions: - - id: 'audit_reader_action' + - id: 'sheepdog_admin_action' action: - service: 'audit' - method: 'read' - - id: 'analysis_user' + service: 'sheepdog' + method: '*' + - id: indexd_admin + description: full access to indexd API permissions: - - action: {method: 'access', service: 'analysis'} - id: 'analysis_access' - # All services - - id: 'admin' - description: '' + - id: indexd_admin + action: + service: indexd + method: '*' + - id: admin permissions: - - id: 'admin' + - id: admin action: service: '*' method: '*' - - id: 'creator' - description: '' + - id: creator permissions: - - id: 'creator' + - id: creator action: service: '*' - method: 'create' - - id: 'reader' - description: '' + method: create + - id: reader permissions: - - id: 'reader' + - id: reader action: service: '*' - method: 'read' - - id: 'updater' - description: '' + method: read + - id: updater permissions: - - id: 'updater' + - id: updater action: service: '*' - method: 'update' - - id: 'deleter' - description: '' + method: update + - id: deleter permissions: - - id: 'deleter' + - id: deleter action: service: '*' - method: 'delete' - - id: 'storage_writer' - description: '' + method: delete + - id: storage_writer permissions: - - id: 'storage_writer' + - id: storage_creator action: service: '*' - method: 'write-storage' - - id: 'storage_reader' - description: '' + method: write-storage + - id: storage_reader permissions: - - id: 'storage_reader' + - id: storage_reader action: service: '*' - method: 'read-storage' - - - # Sheepdog admin role - - id: 'sheepdog_admin' - description: 'sheepdog admin role for program project crud' - permissions: - - id: 'sheepdog_admin_action' - action: - service: 'sheepdog' - method: '*' - - - # indexd - - id: 'indexd_admin' - # this only works if indexd.arborist is enabled in manifest! - description: 'full access to indexd API' - permissions: - - id: 'indexd_admin' - action: - service: 'indexd' - method: '*' - - id: 'indexd_record_creator' - description: '' - permissions: - - id: 'indexd_record_creator' - action: - service: 'indexd' - method: 'create' - - id: 'indexd_record_reader' - description: '' - permissions: - - id: 'indexd_record_reader' - action: - service: 'indexd' - method: 'read' - - id: 'indexd_record_updater' - description: '' - permissions: - - id: 'indexd_record_updater' - action: - service: 'indexd' - method: 'update' - - id: 'indexd_delete_record' - description: '' - permissions: - - id: 'indexd_delete_record' - action: - service: 'indexd' - method: 'delete' - - id: 'indexd_storage_reader' - description: '' - permissions: - - id: 'indexd_storage_reader' - action: - service: 'indexd' - method: 'read-storage' - - id: 'indexd_storage_writer' - description: '' - permissions: - - id: 'indexd_storage_writer' - action: - service: 'indexd' - method: 'write-storage' - - # arborist - - id: 'arborist_creator' - description: '' - permissions: - - id: 'arborist_creator' - action: - service: 'arborist' - method: 'create' - - id: 'arborist_reader' - description: '' - permissions: - - id: 'arborist_reader' - action: - service: 'arborist' - method: 'read' - - id: 'arborist_updater' - description: '' - permissions: - - id: 'arborist_updater' - action: - service: 'arborist' - method: 'update' - - id: 'arborist_deleter' - description: '' - permissions: - - id: 'arborist_deleter' - action: - service: 'arborist' - method: 'delete' - - # requestor - - id: requestor_admin - permissions: - - id: requestor_admin_action - action: - service: requestor - method: '*' - - id: requestor_reader - permissions: - - id: requestor_reader_action - action: - service: requestor - method: read - - id: requestor_creator - permissions: - - id: requestor_creator_action - action: - service: requestor - method: create - - id: requestor_updater - permissions: - - id: requestor_updater_action - action: - service: requestor - method: update - - id: requestor_deleter - permissions: - - id: requestor_deleter_action - action: - service: requestor - method: delete - # argo - - id: argo_user - permissions: - - id: argo_access - action: - service: argo - method: access - #PCDC specific - #amanuensis - - id: 'amanuensis_admin' - description: 'can do admin work on project/data request' - permissions: - - id: 'amanuensis_admin_action' - action: - service: 'amanuensis' - method: '*' - clients: - basic-test-client: - policies: - - abc-admin - - gen3-admin - basic-test-abc-client: - policies: - - abc-admin - wts: - policies: - - all_programs_reader - - workspace - + method: read-storage + users: ### BEGIN INTERNS SECTION ### ### END INTERNS SECTION ### - qureshi@uchicago.edu: - admin: true - policies: - - data_upload - - workspace - - dashboard - - mds_admin - - prometheus - - sower - - services.sheepdog-admin - - programs.QA-admin - - programs.test-admin - - programs.DEV-admin - - programs.jnkns-admin - - indexd_admin - - ttyadmin - projects: - - auth_id: QA - privilege: [create, read, update, delete, upload, read-storage] - - auth_id: test - privilege: [create, read, update, delete, upload, read-storage] - - auth_id: DEV - privilege: [create, read, update, delete, upload, read-storage] - - auth_id: jenkins - privilege: [create, read, update, delete, upload, read-storage] - - auth_id: jenkins2 - privilege: [create, read, update, delete, upload, read-storage] - - auth_id: jnkns - privilege: [create, read, update, delete, upload, read-storage] pmurdoch@uchicago.edu: admin: true policies: - gearbox_admin - data_upload - workspace - - dashboard - - mds_admin - - prometheus - - sower - - services.sheepdog-admin - - services.amanuensis-admin - - programs.QA-admin - - programs.test-admin - - programs.DEV-admin - - programs.jnkns-admin - - indexd_admin - - ttyadmin - - data_admin - - analysis - - privacy_policy - projects: - - auth_id: QA - privilege: [create, read, update, delete, upload, read-storage] - - auth_id: test - privilege: [create, read, update, delete, upload, read-storage] - - auth_id: DEV - privilege: [create, read, update, delete, upload, read-storage] - - auth_id: jenkins - privilege: [create, read, update, delete, upload, read-storage] - - auth_id: jenkins2 - privilege: [create, read, update, delete, upload, read-storage] - - auth_id: jnkns - privilege: [create, read, update, delete, upload, read-storage] - graglia01@gmail.com: - admin: true - policies: - - data_upload - - workspace - - dashboard - - mds_admin - - prometheus - - sower - services.sheepdog-admin - services.amanuensis-admin - - programs.QA-admin - - programs.test-admin - - programs.DEV-admin - - programs.jnkns-admin - - indexd_admin - - ttyadmin - data_admin - analysis - privacy_policy - login_no_access - projects: - - auth_id: QA - privilege: [create, read, update, delete, upload, read-storage] - - auth_id: test - privilege: [create, read, update, delete, upload, read-storage] - - auth_id: DEV - privilege: [create, read, update, delete, upload, read-storage] - - auth_id: jenkins - privilege: [create, read, update, delete, upload, read-storage] - - auth_id: jenkins2 - privilege: [create, read, update, delete, upload, read-storage] - - auth_id: jnkns - privilege: [create, read, update, delete, upload, read-storage] + - sower portal: image: repository: quay.io/pcdc/gearbox_fe diff --git a/helm/amanuensis/templates/amanuensis-secret.yaml b/helm/amanuensis/templates/amanuensis-secret.yaml index c7ae2e52b..a4b0cd830 100644 --- a/helm/amanuensis/templates/amanuensis-secret.yaml +++ b/helm/amanuensis/templates/amanuensis-secret.yaml @@ -2,6 +2,9 @@ apiVersion: v1 kind: Secret metadata: name: amanuensis-secret + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-weight": "-1" type: Opaque data: {{ (.Files.Glob "amanuensis-secret/*").AsSecrets | indent 2 }} diff --git a/helm/amanuensis/templates/amanuensis-secrets.yaml b/helm/amanuensis/templates/amanuensis-secrets.yaml index 9d3d39f3b..e32b15f52 100644 --- a/helm/amanuensis/templates/amanuensis-secrets.yaml +++ b/helm/amanuensis/templates/amanuensis-secrets.yaml @@ -2,11 +2,17 @@ apiVersion: v1 kind: ServiceAccount metadata: name: amanuensis-jobs + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-weight": "-2" --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: amanuensis-jobs-role + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-weight": "-2" rules: - apiGroups: [""] resources: ["secrets"] @@ -16,6 +22,9 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: amanuensis-jobs-role-binding + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-weight": "-2" roleRef: apiGroup: rbac.authorization.k8s.io kind: Role @@ -29,6 +38,10 @@ apiVersion: v1 kind: Secret metadata: name: amanuensis-config + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-weight": "-1" + "helm.sh/hook-delete-policy": before-hook-creation data: amanuensis-config.yaml: "" --- @@ -36,6 +49,9 @@ apiVersion: v1 kind: Secret metadata: name: amanuensis-creds + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-weight": "-1" data: creds.json: "" --- @@ -44,6 +60,9 @@ kind: Job metadata: name: amanuensis-secrets-{{ .Release.Revision }} annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-weight": "0" + "helm.sh/hook-delete-policy": before-hook-creation labels: redeploy-hash: "{{ .Release.Revision }}" spec: @@ -95,12 +114,6 @@ spec: name: amanuensis-dbcreds key: port optional: false - - name: DBREADY - valueFrom: - secretKeyRef: - name: amanuensis-dbcreds - key: dbcreated - optional: false - name: HOSTNAME value: "{{ .Values.global.hostname }}" - name: DATA_DOWNLOAD_BUCKET diff --git a/helm/common/templates/_db_setup_job.tpl b/helm/common/templates/_db_setup_job.tpl index 9ea67dbea..31f552ab1 100644 --- a/helm/common/templates/_db_setup_job.tpl +++ b/helm/common/templates/_db_setup_job.tpl @@ -175,15 +175,31 @@ apiVersion: v1 kind: Secret metadata: name: {{ $.Chart.Name }}-dbcreds + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-weight": "-5" data: - database: {{ ( $.Values.postgres.database | default (printf "%s_%s" $.Chart.Name $.Release.Name) ) | b64enc | quote}} - username: {{ ( $.Values.postgres.username | default (printf "%s_%s" $.Chart.Name $.Release.Name) ) | b64enc | quote}} - port: {{ $.Values.postgres.port | b64enc | quote }} - password: {{ include "gen3.service-postgres" (dict "key" "password" "service" $.Chart.Name "context" $) | b64enc | quote }} - {{- if $.Values.global.dev }} - host: {{ (printf "%s-%s" $.Release.Name "postgresql" ) | b64enc | quote }} + {{- $existingSecret := (lookup "v1" "Secret" .Release.Namespace (printf "%s-dbcreds" .Chart.Name)) }} + {{- if $existingSecret }} + database: {{ index $existingSecret.data "database" | quote }} + username: {{ index $existingSecret.data "username" | quote }} + port: {{ index $existingSecret.data "port" | quote }} + password: {{ index $existingSecret.data "password" | quote }} + host: {{ index $existingSecret.data "host" | quote }} + {{- if index $existingSecret.data "dbcreated" }} + dbcreated: {{ index $existingSecret.data "dbcreated" | quote }} + {{- end }} + {{- end }} {{- else }} - host: {{ ( $.Values.postgres.host | default ( $.Values.global.postgres.master.host)) | b64enc | quote }} + database: {{ ( $.Values.postgres.database | default (printf "%s_%s" $.Chart.Name $.Release.Name) ) | b64enc | quote }} + username: {{ ( $.Values.postgres.username | default (printf "%s_%s" $.Chart.Name $.Release.Name) ) | b64enc | quote }} + port: {{ $.Values.postgres.port | b64enc | quote }} + password: {{ include "gen3.service-postgres" (dict "key" "password" "service" $.Chart.Name "context" $) | b64enc | quote }} + {{- if $.Values.global.dev }} + host: {{ (printf "%s-%s" $.Release.Name "postgresql" ) | b64enc | quote }} + {{- else }} + host: {{ ( $.Values.postgres.host | default ( $.Values.global.postgres.master.host)) | b64enc | quote }} + {{- end }} {{- end }} {{- end }} {{- end }} \ No newline at end of file diff --git a/helm/common/templates/_jwt_key_pairs.tpl b/helm/common/templates/_jwt_key_pairs.tpl index a6fe5ebd7..4f6ac3b84 100644 --- a/helm/common/templates/_jwt_key_pairs.tpl +++ b/helm/common/templates/_jwt_key_pairs.tpl @@ -40,7 +40,7 @@ roleRef: apiVersion: batch/v1 kind: Job metadata: - name: {{ .Chart.Name }}-create-public-key + name: {{ .Chart.Name }}-create-public-key-{{ .Release.Revision }} labels: app: gen3job spec: @@ -94,7 +94,16 @@ apiVersion: v1 kind: Secret metadata: name: {{ $.Chart.Name }}-jwt-keys + annotations: + helm.sh/resource-policy: keep type: Opaque data: + {{- if (lookup "v1" "Secret" .Release.Namespace (printf "%s-jwt-keys" .Chart.Name)) }} + # Secret exists - don't regenerate the private key + {{- $existingSecret := (lookup "v1" "Secret" .Release.Namespace (printf "%s-jwt-keys" .Chart.Name)) }} + jwt_private_key.pem: {{ index $existingSecret.data "jwt_private_key.pem" | quote }} + {{- else }} + # Secret doesn't exist yet - generate a new key jwt_private_key.pem: {{ genPrivateKey "rsa" | b64enc | quote }} + {{- end }} {{- end }} \ No newline at end of file diff --git a/helm/fence/values.yaml b/helm/fence/values.yaml index d1841cc05..9f7b1ac96 100644 --- a/helm/fence/values.yaml +++ b/helm/fence/values.yaml @@ -380,6 +380,7 @@ volumes: items: - key: jwt_public_key.pem path: jwt_public_key.pem + optional: true # -- (list) Volumes to mount to the container. volumeMounts: diff --git a/helm/portal/templates/deployment.yaml b/helm/portal/templates/deployment.yaml index dc7a9e2b7..7d072a3ae 100644 --- a/helm/portal/templates/deployment.yaml +++ b/helm/portal/templates/deployment.yaml @@ -219,7 +219,7 @@ spec: mountPath: /data-portal/custom/images {{- end }} - name: "config-volume" - mountPath: "/data-portal/data/config/gitops.json" + mountPath: "/data-portal/data/config/pcdc.json" subPath: "gitops.json" - name: "config-volume" mountPath: "/data-portal/custom/logo/gitops-logo.png" diff --git a/helm/portal/values.yaml b/helm/portal/values.yaml index d697bb551..ab2772c33 100644 --- a/helm/portal/values.yaml +++ b/helm/portal/values.yaml @@ -211,264 +211,7 @@ extraImages: # -- (map) GitOps configuration for portal gitops: # -- (string) multiline string - gitops.json - json: | - { - "graphql": { - "boardCounts": [ - { - "graphql": "_case_count", - "name": "Case", - "plural": "Cases" - }, - { - "graphql": "_experiment_count", - "name": "Experiment", - "plural": "Experiments" - }, - { - "graphql": "_aliquot_count", - "name": "Aliquot", - "plural": "Aliquots" - } - ], - "chartCounts": [ - { - "graphql": "_case_count", - "name": "Case" - }, - { - "graphql": "_experiment_count", - "name": "Experiment" - }, - { - "graphql": "_aliquot_count", - "name": "Aliquot" - } - ], - "projectDetails": "boardCounts" - }, - "components": { - "appName": "Generic Data Commons Portal", - "index": { - "introduction": { - "heading": "Data Commons", - "text": "The Generic Data Commons supports the management, analysis and sharing of data for the research community.", - "link": "/submission" - }, - "buttons": [ - { - "name": "Define Data Field", - "icon": "data-field-define", - "body": "The Generic Data Commons define the data in a general way. Please study the dictionary before you start browsing.", - "link": "/DD", - "label": "Learn more" - }, - { - "name": "Explore Data", - "icon": "data-explore", - "body": "The Exploration Page gives you insights and a clear overview under selected factors.", - "link": "/explorer", - "label": "Explore data" - }, - { - "name": "Access Data", - "icon": "data-access", - "body": "Use our selected tool to filter out the data you need.", - "link": "/query", - "label": "Query data" - }, - { - "name": "Submit Data", - "icon": "data-submit", - "body": "Submit Data based on the dictionary.", - "link": "/submission", - "label": "Submit data" - } - ] - }, - "navigation": { - "title": "Generic Data Commons", - "items": [ - { - "icon": "dictionary", - "link": "/DD", - "color": "#a2a2a2", - "name": "Dictionary" - }, - { - "icon": "exploration", - "link": "/explorer", - "color": "#a2a2a2", - "name": "Exploration" - }, - { - "icon": "query", - "link": "/query", - "color": "#a2a2a2", - "name": "Query" - }, - { - "icon": "workspace", - "link": "/workspace", - "color": "#a2a2a2", - "name": "Workspace" - }, - { - "icon": "profile", - "link": "/identity", - "color": "#a2a2a2", - "name": "Profile" - } - ] - }, - "topBar": { - "items": [ - { - "icon": "upload", - "link": "/submission", - "name": "Submit Data" - }, - { - "link": "https://gen3.org/resources/user", - "name": "Documentation" - } - ] - }, - "login": { - "title": "Generic Data Commons", - "subTitle": "Explore, Analyze, and Share Data", - "text": "This website supports the management, analysis and sharing of human disease data for the research community and aims to advance basic understanding of the genetic basis of complex traits and accelerate discovery and development of therapies, diagnostic tests, and other technologies for diseases like cancer.", - "contact": "If you have any questions about access or the registration process, please contact ", - "email": "support@datacommons.io" - }, - "certs": {}, - "footerLogos": [ - { - "src": "/src/img/gen3.png", - "href": "https://ctds.uchicago.edu/gen3", - "alt": "Gen3 Data Commons" - }, - { - "src": "/src/img/createdby.png", - "href": "https://ctds.uchicago.edu/", - "alt": "Center for Translational Data Science at the University of Chicago" - } - ] - }, - "requiredCerts": [], - "featureFlags": { - "explorer": true, - "noIndex": true, - "analysis": false, - "discovery": false, - "discoveryUseAggMDS": false, - "studyRegistration": false - }, - "dataExplorerConfig": { - "charts": { - "project_id": { - "chartType": "count", - "title": "Projects" - }, - "_case_id": { - "chartType": "count", - "title": "Cases" - }, - "gender": { - "chartType": "pie", - "title": "Gender" - }, - "race": { - "chartType": "bar", - "title": "Race" - } - }, - "filters": { - "tabs": [ - { - "title": "Case", - "fields":[ - "project_id", - "gender", - "race", - "ethnicity" - ] - } - ] - }, - "table": { - "enabled": false - }, - "dropdowns": {}, - "buttons": [], - "guppyConfig": { - "dataType": "case", - "nodeCountTitle": "Cases", - "fieldMapping": [ - { "field": "disease_type", "name": "Disease type" }, - { "field": "primary_site", "name": "Site where samples were collected"} - ], - "manifestMapping": { - "resourceIndexType": "file", - "resourceIdField": "object_id", - "referenceIdFieldInResourceIndex": "_case_id", - "referenceIdFieldInDataIndex": "node_id" - }, - "accessibleFieldCheckList": ["_case_id"], - "accessibleValidationField": "_case_id" - } - }, - "fileExplorerConfig": { - "charts": { - "data_type": { - "chartType": "stackedBar", - "title": "File Type" - }, - "data_format": { - "chartType": "stackedBar", - "title": "File Format" - } - }, - "filters": { - "tabs": [ - { - "title": "File", - "fields": [ - "project_id", - "data_type", - "data_format" - ] - } - ] - }, - "table": { - "enabled": true, - "fields": [ - "project_id", - "file_name", - "file_size", - "object_id" - ] - }, - "dropdowns": {}, - "guppyConfig": { - "dataType": "file", - "fieldMapping": [ - { "field": "object_id", "name": "GUID" } - ], - "nodeCountTitle": "Files", - "manifestMapping": { - "resourceIndexType": "case", - "resourceIdField": "_case_id", - "referenceIdFieldInResourceIndex": "object_id", - "referenceIdFieldInDataIndex": "object_id" - }, - "accessibleFieldCheckList": ["_case_id"], - "accessibleValidationField": "_case_id", - "downloadAccessor": "object_id" - } - } - } + json: # -- (string) - favicon in base64 favicon: "AAABAAEAICAAAAEAIACoEAAAFgAAACgAAAAgAAAAQAAAAAEAIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADQv3IAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA1MiCAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwKg0Nd6yqf+8pi7D3rKp/96yqf/esqn/3rKp/76qNMPEpU2QxbFJNwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/7WfF3cAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMWySQAAAAAA3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/TrIS0AAAAAL+nLQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACxmAIAxrhKBregGtLesqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/2MyPCLGaCwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAs5kJANqvn0vesqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/18l+GwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKuSAADq5L8H3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/z79qBca0SwAAAAAAAAAAAAAAAAAAAAAAAAAAAN6yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf+4oR3YAAAAAAAAAAAAAAAAAAAAAAAAAAC4oBlZ3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/AqC/N3rKp/96yqf+/rD3M3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf+4oyBkAAAAAAAAAAAAAAAAAAAAAN6yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf+9qDAqAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAzb1oH96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/8qoYv8AAAAAAAAAALefHQC4oB5X3rKp/96yqf/esqn/AAAAAAAAAADm3bsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOHbrAAAAAAA6ePTEd6yqf/esqn/3rKp/8CsNngAAAAAAAAAAN6yqf/esqn/3rKp/////xIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADq4bwA08V3EN6yqf/esqn/3rKp/wAAAAAAAAAA3rKp/96yqf+6nyfZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA3rKp/96yqf/esqn/AAAAALyjJDbesqn/3rKp/7ihIc0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADFpE7l3rKp/96yqf/esqn/wq0+Wd6yqf/esqn/3rKp/wAAAADPwW4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAC7pCAAAAAAAN6yqf/esqn/3rKp/8CsOVK6oyF63rKp/96yqf/esqn/uqQqxAAAAAC7oyQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAtZ8WAAAAAADesqn/3rKp/96yqf/esqn/3rKp/7ukIHresqn/3rKp/96yqf/esqn/3rKp/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA3rKp/96yqf/esqn/3rKp/96yqf/esqn/wK1BXN6yqf/esqn/3rKp/96yqf/esqn/uKAYUgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAL+oO1Hesqn/3rKp/96yqf/esqn/3rKp/76pLXq3nx023rKp/96yqf/esqn/3rKp/96yqf/esqn/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAt58l896yqf/esqn/3rKp/96yqf/esqn/3rKp/wAAAADesqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/xrRRVQAAAADYzYkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAM67agAAAAAAybZYUt6yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/9+/UXAAAAAN6yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAN6yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/wAAAACznRMAtJ4ZV96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADesqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/ArDZ4AAAAAAAAAAAAAAAA3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/yqdi/wAAAAAAAAAAAAAAAAAAAADHplZ93rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/6Ny8U+bauVDesqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf+5oyBkAAAAAAAAAAAAAAAAAAAAAAAAAADesqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/t6Ec1wAAAAAAAAAAAAAAAAAAAAAAAAAAs5sWAOHUlQfesqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/OxHUFxbRJAAAAAAAAAAAAAAAAAAAAAAAAAAAAsJkFAN6yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/29COIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAr5YBAN6yqf+7pSf43rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/uaMf+d2xp6MAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAyrhUAAAAAAC7pil73rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/7miH38AAAAAxrJDAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADi150b2K6T4N6yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/7mjI5zUxHAaAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOnftwAAAAAAAAAAAN6yqf/esqn/3rKp/7egG+e2nxf/uKAk/7mjIvPesqn/3rKp/7agGEAAAAAAAAAAANnOjAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA///////wD///gAP//gAAf/wAAD/4AAAf8AAAD+AAAAfgAAAHwA/wA8f//+OP///xj///8Y////CP///xh///4IP//8CD///Bgf//gID//wGAP/wBwB/4A8AP8APgAYAH4AAAB/AAAA/wAAAf+AAAH/8AAP//" diff --git a/pcdc-default-values.yaml b/pcdc-default-values.yaml index d76e4dca4..d9c55f79b 100644 --- a/pcdc-default-values.yaml +++ b/pcdc-default-values.yaml @@ -322,7 +322,7 @@ portal: #enabled: false image: repository: "quay.io/pcdc/windmill" - tag: "1.34.0" + tag: "1.36.1" pullPolicy: IfNotPresent resources: requests: diff --git a/tools/connect_to_db.sh b/tools/connect_to_db.sh index e277b928e..cf435b857 100755 --- a/tools/connect_to_db.sh +++ b/tools/connect_to_db.sh @@ -6,10 +6,11 @@ if [ $# -eq 0 ]; then exit 1 fi -service_name=$1 +project_name=$1 +service_name=$2 # Retrieve password from secret password=$(kubectl get secret ${service_name}-dbcreds -o jsonpath="{.data.password}" | base64 --decode) # Execute command in the pod -kubectl exec -it pcdc-postgresql-0 -- /bin/bash -c "PGPASSWORD='${password}' psql -h pcdc-postgresql -U ${service_name}_pcdc -d ${service_name}_pcdc" \ No newline at end of file +kubectl exec -it ${project_name}-postgresql-0 -- /bin/bash -c "PGPASSWORD='${password}' psql -h ${project_name}-postgresql -U ${service_name}_${project_name} -d ${service_name}_${project_name}" \ No newline at end of file diff --git a/tools/gearbox b/tools/gearbox index 7a548c5a7..1486504f6 100755 --- a/tools/gearbox +++ b/tools/gearbox @@ -27,7 +27,7 @@ shift # Remove the first argument (command) case "$command" in "psql") # Run the connect_to_db.sh script with the remaining arguments - "$CONNECT_SCRIPT" "$@" + "$CONNECT_SCRIPT" gearbox "$@" ;; "roll") # Run the roll.sh script with the remaining arguments diff --git a/tools/pcdc b/tools/pcdc index 7929fdcd9..c2db14797 100755 --- a/tools/pcdc +++ b/tools/pcdc @@ -30,7 +30,7 @@ shift # Remove the first argument (command) case "$command" in "psql") # Run the connect_to_db.sh script with the remaining arguments - "$CONNECT_SCRIPT" "$@" + "$CONNECT_SCRIPT" pcdc "$@" ;; "roll") # Run the roll.sh script with the remaining arguments From 51f359d552600479b82776bfe54062df75555dfa Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Fri, 21 Mar 2025 13:46:59 -0700 Subject: [PATCH 030/126] fix format --- helm/common/templates/_db_setup_job.tpl | 1 - 1 file changed, 1 deletion(-) diff --git a/helm/common/templates/_db_setup_job.tpl b/helm/common/templates/_db_setup_job.tpl index 31f552ab1..51d27963b 100644 --- a/helm/common/templates/_db_setup_job.tpl +++ b/helm/common/templates/_db_setup_job.tpl @@ -189,7 +189,6 @@ data: {{- if index $existingSecret.data "dbcreated" }} dbcreated: {{ index $existingSecret.data "dbcreated" | quote }} {{- end }} - {{- end }} {{- else }} database: {{ ( $.Values.postgres.database | default (printf "%s_%s" $.Chart.Name $.Release.Name) ) | b64enc | quote }} username: {{ ( $.Values.postgres.username | default (printf "%s_%s" $.Chart.Name $.Release.Name) ) | b64enc | quote }} From b4e10549fd6465539dec863e9907fc083b4dd1a1 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Tue, 8 Apr 2025 16:36:20 -0700 Subject: [PATCH 031/126] add changes --- ...nuensis-populate-search-table-cronjob.yaml | 34 ------- .../templates/amanuensis-secrets.yaml | 2 +- ...anuensis-validate-filter-sets-cronjob.yaml | 92 +++++++++++++++++++ helm/sower/templates/pelican-creds.yaml | 2 - pcdc-default-values.yaml | 62 +++++++++++++ 5 files changed, 155 insertions(+), 37 deletions(-) delete mode 100644 helm/amanuensis/templates/amanuensis-populate-search-table-cronjob.yaml create mode 100644 helm/amanuensis/templates/amanuensis-validate-filter-sets-cronjob.yaml diff --git a/helm/amanuensis/templates/amanuensis-populate-search-table-cronjob.yaml b/helm/amanuensis/templates/amanuensis-populate-search-table-cronjob.yaml deleted file mode 100644 index ed293238b..000000000 --- a/helm/amanuensis/templates/amanuensis-populate-search-table-cronjob.yaml +++ /dev/null @@ -1,34 +0,0 @@ -apiVersion: batch/v1 -kind: CronJob -metadata: - name: amanuensis-db-filter-set-filler - labels: - redeploy-hash: "{{ .Release.Revision }}" -spec: - schedule: "0 0 1 * *" - concurrencyPolicy: Forbid - # This is defualt but do we want to keep long term record of deleted filter sets - # the original filter-sets will not be deleted unless manually done by the user - successfulJobsHistoryLimit: 3 - jobTemplate: - spec: - template: - metadata: - labels: - app: gen3job - spec: - automountServiceAccountToken: false - volumes: - - name: config-volume - secret: - secretName: "amanuensis-config" - containers: - - name: amanuensis-db-filter-set-filler - image: "amanuensis-db-filter-set-filler:test" - imagePullPolicy: Never - volumeMounts: - - name: "config-volume" - readOnly: true - mountPath: "/var/www/amanuensis/amanuensis-config.yaml" - subPath: amanuensis-config.yaml - restartPolicy: Never \ No newline at end of file diff --git a/helm/amanuensis/templates/amanuensis-secrets.yaml b/helm/amanuensis/templates/amanuensis-secrets.yaml index e32b15f52..fe879774b 100644 --- a/helm/amanuensis/templates/amanuensis-secrets.yaml +++ b/helm/amanuensis/templates/amanuensis-secrets.yaml @@ -150,7 +150,7 @@ spec: - name: create-amanuensis-config image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - imagePullPolicy: Never + imagePullPolicy: "{{ .Values.image.pullPolicy }}" volumeMounts: - name: "config-helper" readOnly: true diff --git a/helm/amanuensis/templates/amanuensis-validate-filter-sets-cronjob.yaml b/helm/amanuensis/templates/amanuensis-validate-filter-sets-cronjob.yaml new file mode 100644 index 000000000..e6a2bbc19 --- /dev/null +++ b/helm/amanuensis/templates/amanuensis-validate-filter-sets-cronjob.yaml @@ -0,0 +1,92 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: amanuensis-validate-filter-sets + labels: + redeploy-hash: "{{ .Release.Revision }}" +spec: + schedule: "* * * * *" + concurrencyPolicy: Forbid + successfulJobsHistoryLimit: 3 + jobTemplate: + spec: + template: + metadata: + labels: + app: gen3job + spec: + automountServiceAccountToken: false + volumes: + - name: config-volume + secret: + secretName: "amanuensis-config" + - name: es-dd-config-volume + emptyDir: {} + - name: portal-config + secret: + secretName: "portal-config" + initContainers: + - name: amanuensis-db-filter-set-filler + image: "quay.io/pcdc/amanuensis-db-filter-set-filler:main" + imagePullPolicy: IfNotPresent + volumeMounts: + - name: "config-volume" + readOnly: true + mountPath: "/var/www/amanuensis/amanuensis-config.yaml" + subPath: amanuensis-config.yaml + - name: create-es-dd-config + image: "quay.io/cdis/awshelper:stable" + imagePullPolicy: IfNotPresent + env: + - name: BASE_URL + value: "https://portal.pedscommons.org/" + - name: OUTPUT_FILE + value: "/tmp/es-dd-config/es_to_dd_map.json" + - name: DICTIONARY_URL + value: "https://portal.pedscommons.org/api/v0/submission/_dictionary/_all" + volumeMounts: + - name: "es-dd-config-volume" + mountPath: "/tmp/es-dd-config" + args: + - /bin/bash + - -c + - | + + cd /tmp + + export PATH="/home/ubuntu/.local/bin:$PATH" + + git clone https://github.com/chicagopcdc/gen3_etl.git + echo "Repository cloned successfully." + + cd gen3_etl/elasticsearch + + pip install --user -r requirements-ES-DD.txt + + cd etl + + python create_es_dd_mapping.py add-manual-fields + + echo "ES-DD config created successfully." + containers: + - name: validate-filter-sets + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + volumeMounts: + - name: "es-dd-config-volume" + mountPath: "/var/www/amanuensis/es_to_dd_map.json" + subPath: es_to_dd_map.json + - name: "config-volume" + readOnly: true + mountPath: "/var/www/amanuensis/amanuensis-config.yaml" + subPath: amanuensis-config.yaml + - name: "portal-config" + readOnly: true + mountPath: "/var/www/amanuensis/gitops.json" + subPath: gitops.json + args: + - /bin/bash + - -c + - | + validate-filter-sets + restartPolicy: Never \ No newline at end of file diff --git a/helm/sower/templates/pelican-creds.yaml b/helm/sower/templates/pelican-creds.yaml index 0d3420f51..b586bac78 100644 --- a/helm/sower/templates/pelican-creds.yaml +++ b/helm/sower/templates/pelican-creds.yaml @@ -4,7 +4,6 @@ kind: Secret metadata: name: pelicanservice-g3auto type: Opaque -{{- if .Values.global.aws.enabled }} stringData: config.json: | { @@ -13,5 +12,4 @@ stringData: "aws_access_key_id": "{{ .Values.secrets.awsAccessKeyId | default .Values.global.aws.awsAccessKeyId }}", "aws_secret_access_key": "{{ .Values.secrets.awsSecretAccessKey | default .Values.global.aws.awsSecretAccessKey }}" } -{{- end }} {{- end }} \ No newline at end of file diff --git a/pcdc-default-values.yaml b/pcdc-default-values.yaml index d9c55f79b..a87663738 100644 --- a/pcdc-default-values.yaml +++ b/pcdc-default-values.yaml @@ -60,6 +60,7 @@ arborist: tag: "2025.01" amanuensis: + enabled: true image: repository: "amanuensis" tag: "test" @@ -309,6 +310,7 @@ manifestservice: tag: "2025.01" pcdcanalysistools: + enabled: true image: repository: quay.io/pcdc/pcdcanalysistools tag: "1.8.9" @@ -345,6 +347,49 @@ sower: image: repository: quay.io/cdis/sower tag: "2025.01" + pelican: + bucket: "gen3-helm-pelican-export" + # -- (map) Secret information for Usersync and External Secrets. + + sowerConfig: + - name: pelican-export + action: export + container: + name: job-task + image: quay.io/pcdc/pelican:1.3.3_export + pull_policy: Always + env: + - name: DICTIONARY_URL + valueFrom: + configMapKeyRef: + name: manifest-global + key: dictionary_url + - name: GEN3_HOSTNAME + valueFrom: + configMapKeyRef: + name: manifest-global + key: hostname + - name: ROOT_NODE + value: subject + volumeMounts: + - name: pelican-creds-volume + readOnly: true + mountPath: "/pelican-creds.json" + subPath: config.json + - name: peregrine-creds-volume + readOnly: true + mountPath: "/peregrine-creds.json" + subPath: creds.json + cpu-limit: '1' + memory-limit: 2Gi + volumes: + - name: pelican-creds-volume + secret: + secretName: pelicanservice-g3auto + - name: peregrine-creds-volume + secret: + secretName: peregrine-creds + restart_policy: Never wts: enabled: false @@ -352,6 +397,23 @@ wts: repository: quay.io/cdis/workspace-token-service tag: 2025.01 +elasticsearch: + clusterName: gen3-elasticsearch + maxUnavailable: 0 + singleNode: true + replicas: 1 + clusterHealthCheckParams: "wait_for_status=yellow&timeout=1s" + esConfig: + elasticsearch.yml: | + # Here we can add elasticsearch config + + resources: + requests: + cpu: 0.5 + memory: 500Mi + limits: + cpu: 1 + memory: 2Gi ######################################################################################## # DISABLED SERVICES # ######################################################################################## From f37405c2366de5ddbdcd7d4fd9565ed893aba8c0 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Thu, 17 Apr 2025 16:30:56 -0700 Subject: [PATCH 032/126] changes to work with cdis --- .gitignore | 2 +- gearbox-default-values.yaml | 13 ++++++++ ...anuensis-validate-filter-sets-cronjob.yaml | 2 +- helm/amanuensis/templates/hpa.yaml | 8 ++--- helm/amanuensis/values.yaml | 17 ++-------- helm/fence/values.yaml | 1 - helm/gearbox-middleware/values.yaml | 3 +- helm/gearbox/values.yaml | 3 +- helm/gen3/Chart.yaml | 32 +++++++++---------- helm/peregrine/templates/deployment.yaml | 3 +- pcdc-default-values.yaml | 15 +++++++-- 11 files changed, 54 insertions(+), 45 deletions(-) diff --git a/.gitignore b/.gitignore index e371729af..af3cc1070 100644 --- a/.gitignore +++ b/.gitignore @@ -11,4 +11,4 @@ CA/ temp.yaml /values.yaml gen3_scripts/ -gen3_etl/ \ No newline at end of file +gen3_etl/ diff --git a/gearbox-default-values.yaml b/gearbox-default-values.yaml index ba23210bf..b739703b4 100644 --- a/gearbox-default-values.yaml +++ b/gearbox-default-values.yaml @@ -104,6 +104,10 @@ fence: configMap: name: "fence-yaml-merge" optional: true + - name: config-volume-public + configMap: + name: "manifest-fence" + optional: true volumeMounts: - name: "old-config-volume" @@ -150,6 +154,10 @@ fence: readOnly: true mountPath: "/fence/keys/key/jwt_private_key.pem" subPath: "jwt_private_key.pem" + - name: "config-volume-public" + readOnly: true + mountPath: "/var/www/fence/fence-config-public.yaml" + subPath: fence-config-public.yaml image: repository: quay.io/pcdc/fence @@ -379,6 +387,8 @@ fence: - privacy_policy - login_no_access - sower + + portal: image: repository: quay.io/pcdc/gearbox_fe @@ -470,4 +480,7 @@ indexd: enabled: false hatchery: + enabled: false + +cohort-middleware: enabled: false \ No newline at end of file diff --git a/helm/amanuensis/templates/amanuensis-validate-filter-sets-cronjob.yaml b/helm/amanuensis/templates/amanuensis-validate-filter-sets-cronjob.yaml index e6a2bbc19..26b233682 100644 --- a/helm/amanuensis/templates/amanuensis-validate-filter-sets-cronjob.yaml +++ b/helm/amanuensis/templates/amanuensis-validate-filter-sets-cronjob.yaml @@ -5,7 +5,7 @@ metadata: labels: redeploy-hash: "{{ .Release.Revision }}" spec: - schedule: "* * * * *" + schedule: "0 0 1 * *" concurrencyPolicy: Forbid successfulJobsHistoryLimit: 3 jobTemplate: diff --git a/helm/amanuensis/templates/hpa.yaml b/helm/amanuensis/templates/hpa.yaml index 9181d33b6..386a3d4c5 100644 --- a/helm/amanuensis/templates/hpa.yaml +++ b/helm/amanuensis/templates/hpa.yaml @@ -13,20 +13,20 @@ spec: minReplicas: {{ .Values.autoscaling.minReplicas }} maxReplicas: {{ .Values.autoscaling.maxReplicas }} metrics: - {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} - type: Resource resource: name: cpu target: type: Utilization averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} - {{- end }} - {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} - type: Resource resource: name: memory target: type: Utilization averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} - {{- end }} + {{- end }} {{- end }} diff --git a/helm/amanuensis/values.yaml b/helm/amanuensis/values.yaml index 08b4bcb2c..5a8bae290 100644 --- a/helm/amanuensis/values.yaml +++ b/helm/amanuensis/values.yaml @@ -41,7 +41,8 @@ global: # -- (int) Only relevant if tireAccessLevel is set to "regular". Summary charts below this limit will not appear for aggregated data. tierAccessLimit: 1000 # -- (bool) Whether network policies are enabled. - netPolicy: true + netPolicy: + enabled: false # -- (int) Number of dispatcher jobs. dispatcherJobNum: 10 # -- (bool) Whether Datadog is enabled. @@ -51,20 +52,6 @@ global: # -- (int) The minimum amount of pods that are available at all times if the PDB is deployed. minAvialable: 1 - postgres: - # -- (bool) Whether the database should be created. - dbCreate: true - # -- (map) Master credentials to postgres. This is going to be the default postgres server being used for each service, unless each service specifies their own postgres - master: - # -- (string) hostname of postgres server - host: - # -- (string) username of superuser in postgres. This is used to create or restore databases - username: postgres - # -- (string) password for superuser in postgres. This is used to create or restore databases - password: - # -- (string) Port for Postgres. - port: "5432" - postgres: # (bool) Whether the database should be restored from s3. Default to global.postgres.dbRestore dbRestore: false diff --git a/helm/fence/values.yaml b/helm/fence/values.yaml index d4fddd8b8..e4ad3ec70 100644 --- a/helm/fence/values.yaml +++ b/helm/fence/values.yaml @@ -433,7 +433,6 @@ volumeMounts: readOnly: true mountPath: "/amanuensis/jwt_public_key.pem" subPath: "jwt_public_key.pem" - - name: "config-volume-public" readOnly: true mountPath: "/var/www/fence/fence-config-public.yaml" diff --git a/helm/gearbox-middleware/values.yaml b/helm/gearbox-middleware/values.yaml index 134ff94bc..6868dc575 100644 --- a/helm/gearbox-middleware/values.yaml +++ b/helm/gearbox-middleware/values.yaml @@ -41,7 +41,8 @@ global: # -- (int) Only relevant if tireAccessLevel is set to "regular". Summary charts below this limit will not appear for aggregated data. tierAccessLimit: 1000 # -- (bool) Whether network policies are enabled. - netPolicy: true + netPolicy: + enabled: false # -- (int) Number of dispatcher jobs. dispatcherJobNum: 10 # -- (bool) Whether Datadog is enabled. diff --git a/helm/gearbox/values.yaml b/helm/gearbox/values.yaml index d993b9dd5..891cf8d07 100644 --- a/helm/gearbox/values.yaml +++ b/helm/gearbox/values.yaml @@ -41,7 +41,8 @@ global: # -- (int) Only relevant if tireAccessLevel is set to "regular". Summary charts below this limit will not appear for aggregated data. tierAccessLimit: 1000 # -- (bool) Whether network policies are enabled. - netPolicy: true + netPolicy: + enabled: false # -- (int) Number of dispatcher jobs. dispatcherJobNum: 10 # -- (bool) Whether Datadog is enabled. diff --git a/helm/gen3/Chart.yaml b/helm/gen3/Chart.yaml index 592fa818c..324cde95a 100644 --- a/helm/gen3/Chart.yaml +++ b/helm/gen3/Chart.yaml @@ -95,22 +95,22 @@ dependencies: version: 0.1.19 repository: "file://../wts" condition: wts.enabled -- name: pcdcanalysistools - version: "0.1.0" - repository: "file://../pcdcanalysistools" - condition: pcdcanalysistools.enabled -- name: amanuensis - version: "0.1.1" - repository: "file://../amanuensis" - condition: amanuensis.enabled -- name: gearbox - version: "0.1.0" - repository: "file://../gearbox" - condition: gearbox.enabled -- name: gearbox-middleware - version: "0.1.0" - repository: "file://../gearbox-middleware" - condition: gearbox-middleware.enabled + - name: pcdcanalysistools + version: "0.1.0" + repository: "file://../pcdcanalysistools" + condition: pcdcanalysistools.enabled + - name: amanuensis + version: "0.1.1" + repository: "file://../amanuensis" + condition: amanuensis.enabled + - name: gearbox + version: "0.1.0" + repository: "file://../gearbox" + condition: gearbox.enabled + - name: gearbox-middleware + version: "0.1.0" + repository: "file://../gearbox-middleware" + condition: gearbox-middleware.enabled - name: gen3-network-policies version: 0.1.2 diff --git a/helm/peregrine/templates/deployment.yaml b/helm/peregrine/templates/deployment.yaml index ebc6aca00..4c9cdfa71 100644 --- a/helm/peregrine/templates/deployment.yaml +++ b/helm/peregrine/templates/deployment.yaml @@ -84,7 +84,7 @@ spec: secretKeyRef: name: indexd-service-creds key: sheepdog - optional: false + optional: true - name: PGHOST valueFrom: secretKeyRef: @@ -153,7 +153,6 @@ spec: value: "False" - name: CONF_HOSTNAME value: {{ .Values.global.hostname | quote }} - {{- with .Values.volumeMounts }} volumeMounts: - name: "config-volume" readOnly: true diff --git a/pcdc-default-values.yaml b/pcdc-default-values.yaml index a87663738..f495c6953 100644 --- a/pcdc-default-values.yaml +++ b/pcdc-default-values.yaml @@ -62,9 +62,9 @@ arborist: amanuensis: enabled: true image: - repository: "amanuensis" - tag: "test" - pullPolicy: Never + repository: "quay.io/pcdc/amanuensis" + tag: "2.24.0" + pullPolicy: IfNotPresent fence: FENCE_CONFIG: @@ -397,6 +397,12 @@ wts: repository: quay.io/cdis/workspace-token-service tag: 2025.01 +postgresql: + primary: + persistence: + # -- (bool) Option to persist the dbs data. + enabled: false + elasticsearch: clusterName: gen3-elasticsearch maxUnavailable: 0 @@ -451,4 +457,7 @@ gearbox: enabled: false gearbox-middleware: + enabled: false + +cohort-middleware: enabled: false \ No newline at end of file From 2555048e87b51a2cae815c3839a6dbc44202226d Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Tue, 29 Apr 2025 17:12:06 -0700 Subject: [PATCH 033/126] fix gearbox config not being created on later revisions --- .../templates/create-gearbox-config.yaml | 10 +++++++++- helm/gearbox/templates/gearbox-secret.yaml | 19 +++++++++++++------ 2 files changed, 22 insertions(+), 7 deletions(-) diff --git a/helm/gearbox/templates/create-gearbox-config.yaml b/helm/gearbox/templates/create-gearbox-config.yaml index 19f3a9864..776b03718 100644 --- a/helm/gearbox/templates/create-gearbox-config.yaml +++ b/helm/gearbox/templates/create-gearbox-config.yaml @@ -27,10 +27,17 @@ roleRef: name: {{ .Chart.Name }}-secret-patch-role apiGroup: rbac.authorization.k8s.io --- +{{- $existingSecret := lookup "v1" "Secret" .Release.Namespace "gearbox-g3auto" }} +{{- $shouldRunJob := true }} +{{- if and $existingSecret (index $existingSecret.data "secretready") }} + {{- $shouldRunJob = false }} +{{- end }} + +{{- if $shouldRunJob }} apiVersion: batch/v1 kind: Job metadata: - name: gearbox-g3auto-patch + name: gearbox-g3auto-patch-{{ .Release.Revision }} spec: template: metadata: @@ -65,5 +72,6 @@ spec: fi done restartPolicy: Never +{{- end }} diff --git a/helm/gearbox/templates/gearbox-secret.yaml b/helm/gearbox/templates/gearbox-secret.yaml index 4fc33dd81..be60699e1 100644 --- a/helm/gearbox/templates/gearbox-secret.yaml +++ b/helm/gearbox/templates/gearbox-secret.yaml @@ -4,16 +4,23 @@ metadata: name: gearbox-g3auto type: Opaque stringData: - {{- $randomPass := printf "%s%s" "gateway:" (randAlphaNum 32) }} - base64Authz.txt: {{ $randomPass | quote | b64enc }} + {{- $existingSecret := lookup "v1" "Secret" .Release.Namespace "gearbox-g3auto" }} + {{- $randomPass := printf "%s%s" "gateway:" (randAlphaNum 32) }} + base64Authz.txt: {{ if and $existingSecret (index $existingSecret.data "base64Authz.txt") }}{{ index $existingSecret.data "base64Authz.txt" | b64dec | quote }}{{ else }}{{ $randomPass | quote | b64enc }}{{ end }} gearbox.env: | DEBUG=0 FORCE_ISSUER=True USER_API="http://fence-service/" ALLOWED_ISSUERS="http://fence-service/,https://localhost/user" DUMMY_S3=True - DB_DATABASE={{ ( $.Values.postgres.database | default (printf "%s_%s" $.Chart.Name $.Release.Name) ) }} - DB_HOST={{ (printf "%s-%s" $.Release.Name "postgresql" ) }} - DB_USER={{ ( $.Values.postgres.username | default (printf "%s_%s" $.Chart.Name $.Release.Name) ) }} - ADMIN_LOGINS={{ $randomPass }} + DB_DATABASE={{ if and $existingSecret (index $existingSecret.data "gearbox.env") }}{{ index $existingSecret.data "gearbox.env" | b64dec | regexFind "DB_DATABASE=(.*)" | quote }}{{ else }}{{ ( $.Values.postgres.database | default (printf "%s_%s" $.Chart.Name $.Release.Name) ) }}{{ end }} + DB_HOST={{ if and $existingSecret (index $existingSecret.data "gearbox.env") }}{{ index $existingSecret.data "gearbox.env" | b64dec | regexFind "DB_HOST=(.*)" | quote }}{{ else }}{{ (printf "%s-%s" $.Release.Name "postgresql" ) }}{{ end }} + DB_USER={{ if and $existingSecret (index $existingSecret.data "gearbox.env") }}{{ index $existingSecret.data "gearbox.env" | b64dec | regexFind "DB_USER=(.*)" | quote }}{{ else }}{{ ( $.Values.postgres.username | default (printf "%s_%s" $.Chart.Name $.Release.Name) ) }}{{ end }} + ADMIN_LOGINS={{ if and $existingSecret (index $existingSecret.data "gearbox.env") }}{{ index $existingSecret.data "gearbox.env" | b64dec | regexFind "ADMIN_LOGINS=(.*)" | quote }}{{ else }}{{ $randomPass }}{{ end }} ENABLE_PHI=True + {{- if and $existingSecret (index $existingSecret.data "gearbox.env") (index $existingSecret.data "gearbox.env" | b64dec | regexFind "DB_PASSWORD=(.*)") }} + DB_PASSWORD={{ index $existingSecret.data "gearbox.env" | b64dec | regexFind "DB_PASSWORD=(.*)" | quote }} + {{- end }} + {{- if and $existingSecret (index $existingSecret.data "secretReady") }} + secretReady: {{ index $existingSecret.data "secretReady" | b64dec | quote }} + {{- end }} \ No newline at end of file From cfd6a109203d25881b9700426923fb65deec898c Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Tue, 29 Apr 2025 17:12:23 -0700 Subject: [PATCH 034/126] dont run jwt key job every revision --- helm/common/templates/_jwt_key_pairs.tpl | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/helm/common/templates/_jwt_key_pairs.tpl b/helm/common/templates/_jwt_key_pairs.tpl index 4f6ac3b84..809be5a8d 100644 --- a/helm/common/templates/_jwt_key_pairs.tpl +++ b/helm/common/templates/_jwt_key_pairs.tpl @@ -1,5 +1,4 @@ {{- define "common.jwt_public_key_setup_sa" -}} - apiVersion: v1 kind: ServiceAccount metadata: @@ -30,13 +29,18 @@ roleRef: kind: Role name: {{ .Chart.Name }}-jwt-public-key-patch-role apiGroup: rbac.authorization.k8s.io - - {{- end }} --- {{- define "common.create_public_key_job" -}} +{{- $existingSecret := lookup "v1" "Secret" .Release.Namespace (printf "%s-jwt-keys" .Chart.Name) }} +{{- $shouldRunJob := true }} +{{- if and $existingSecret (index $existingSecret.data "jwt_public_key.pem") }} + {{- $shouldRunJob = false }} +{{- end }} + +{{- if $shouldRunJob }} apiVersion: batch/v1 kind: Job metadata: @@ -83,12 +87,10 @@ spec: restartPolicy: OnFailure {{- end }} +{{- end }} + --- -{{/* -Create k8s secrets for creating jwt key pairs -*/}} -# JWT key Secrets {{- define "common.jwt-key-pair-secret" -}} apiVersion: v1 kind: Secret @@ -98,10 +100,11 @@ metadata: helm.sh/resource-policy: keep type: Opaque data: - {{- if (lookup "v1" "Secret" .Release.Namespace (printf "%s-jwt-keys" .Chart.Name)) }} - # Secret exists - don't regenerate the private key {{- $existingSecret := (lookup "v1" "Secret" .Release.Namespace (printf "%s-jwt-keys" .Chart.Name)) }} + {{- if $existingSecret }} + # Secret exists - don't regenerate the keys jwt_private_key.pem: {{ index $existingSecret.data "jwt_private_key.pem" | quote }} + jwt_public_key.pem: {{ index $existingSecret.data "jwt_public_key.pem" | quote }} {{- else }} # Secret doesn't exist yet - generate a new key jwt_private_key.pem: {{ genPrivateKey "rsa" | b64enc | quote }} From dfd0f7c5039aa3d61408dd38b0f52ebe5ac3e470 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Tue, 29 Apr 2025 17:12:44 -0700 Subject: [PATCH 035/126] remove db creds from gearbox env variables --- helm/gearbox/values.yaml | 30 ------------------------------ 1 file changed, 30 deletions(-) diff --git a/helm/gearbox/values.yaml b/helm/gearbox/values.yaml index 891cf8d07..6dc91e2fb 100644 --- a/helm/gearbox/values.yaml +++ b/helm/gearbox/values.yaml @@ -142,36 +142,6 @@ env: value: http://esproxy-service:9200 - name: AWS_REGION value: "us-east-1" - - name: PGHOST - valueFrom: - secretKeyRef: - name: gearbox-dbcreds - key: host - optional: false - - name: PGUSER - valueFrom: - secretKeyRef: - name: gearbox-dbcreds - key: username - optional: false - - name: PGPASSWORD - valueFrom: - secretKeyRef: - name: gearbox-dbcreds - key: password - optional: false - - name: PGDB - valueFrom: - secretKeyRef: - name: gearbox-dbcreds - key: database - optional: false - - name: DBREADY - valueFrom: - secretKeyRef: - name: gearbox-dbcreds - key: dbcreated - optional: false - name: GB_SECRET_READY valueFrom: secretKeyRef: From 7713506ee90147caa026d2d060f4ae1f427234b6 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Tue, 29 Apr 2025 17:13:13 -0700 Subject: [PATCH 036/126] update pelican env variable to include output file format in pcdc-default-values.yaml --- pcdc-default-values.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pcdc-default-values.yaml b/pcdc-default-values.yaml index f495c6953..dacb427e9 100644 --- a/pcdc-default-values.yaml +++ b/pcdc-default-values.yaml @@ -371,6 +371,8 @@ sower: key: hostname - name: ROOT_NODE value: subject + - name: OUTPUT_FILE_FORMAT + value: ZIP volumeMounts: - name: pelican-creds-volume readOnly: true From 8c5fca169431c9cc7830194904e718fd75bfff17 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Wed, 30 Apr 2025 16:22:24 -0700 Subject: [PATCH 037/126] run fence as non root user --- gearbox-default-values.yaml | 21 +++++++++++++++++---- helm/fence/templates/fence-deployment.yaml | 2 ++ helm/fence/templates/jwt-keys.yaml | 14 +++++--------- openshift.code-workspace | 11 +++++++++++ 4 files changed, 35 insertions(+), 13 deletions(-) create mode 100644 openshift.code-workspace diff --git a/gearbox-default-values.yaml b/gearbox-default-values.yaml index b739703b4..35d0abfcb 100644 --- a/gearbox-default-values.yaml +++ b/gearbox-default-values.yaml @@ -107,7 +107,7 @@ fence: - name: config-volume-public configMap: name: "manifest-fence" - optional: true + optional: true volumeMounts: - name: "old-config-volume" @@ -154,11 +154,22 @@ fence: readOnly: true mountPath: "/fence/keys/key/jwt_private_key.pem" subPath: "jwt_private_key.pem" + - name: "fence-jwt-keys" + readOnly: true + mountPath: "/fence/keys/key/jwt_public_key.pem" + subPath: "jwt_public_key.pem" - name: "config-volume-public" readOnly: true mountPath: "/var/www/fence/fence-config-public.yaml" subPath: fence-config-public.yaml - + + podSecurityContext: + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 + # securityContext: + # allowPrivilegeEscalation: false + # runAsNonRoot: true image: repository: quay.io/pcdc/fence tag: "helm-test" @@ -390,6 +401,7 @@ fence: portal: + enabled: false image: repository: quay.io/pcdc/gearbox_fe tag: "dev" @@ -405,19 +417,20 @@ portal: gearboxS3Bucket: "https://test-compose-gearbox-data-bucket-with-versioning.s3.amazonaws.com" revproxy: + enabled: true image: repository: quay.io/cdis/nginx tag: 2023.09 gearbox: - enabled: true + enabled: false image: repository: quay.io/pcdc/gearbox_be tag: 1.3.0 pullPolicy: Always gearbox-middleware: - enabled: true + enabled: false image: repository: quay.io/pcdc/gearbox-middleware tag: "helm-test" diff --git a/helm/fence/templates/fence-deployment.yaml b/helm/fence/templates/fence-deployment.yaml index 1b24c149d..e11d4729f 100644 --- a/helm/fence/templates/fence-deployment.yaml +++ b/helm/fence/templates/fence-deployment.yaml @@ -30,6 +30,8 @@ spec: {{- include "common.extraLabels" . | nindent 8 }} spec: enableServiceLinks: false + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} serviceAccountName: {{ include "fence.serviceAccountName" . }} volumes: {{- toYaml .Values.volumes | nindent 8 }} diff --git a/helm/fence/templates/jwt-keys.yaml b/helm/fence/templates/jwt-keys.yaml index 06d10f288..322abbf5b 100644 --- a/helm/fence/templates/jwt-keys.yaml +++ b/helm/fence/templates/jwt-keys.yaml @@ -1,9 +1,5 @@ -{{- if or (not .Values.global.externalSecrets.deploy) (and .Values.global.externalSecrets.deploy .Values.externalSecrets.createK8sJwtKeysSecret) }} -apiVersion: v1 -kind: Secret -metadata: - name: fence-jwt-keys -type: Opaque -data: - jwt_private_key.pem: {{ include "getOrCreatePrivateKey" . }} -{{- end }} \ No newline at end of file +{{include "common.jwt-key-pair-secret" .}} +--- +{{include "common.jwt_public_key_setup_sa" .}} +--- +{{include "common.create_public_key_job" .}} \ No newline at end of file diff --git a/openshift.code-workspace b/openshift.code-workspace new file mode 100644 index 000000000..a6d1847f4 --- /dev/null +++ b/openshift.code-workspace @@ -0,0 +1,11 @@ +{ + "folders": [ + { + "path": "." + }, + { + "path": "../fence" + } + ], + "settings": {} +} \ No newline at end of file From 018cf705e87e616b13733c86d91b37fded3100a0 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Thu, 1 May 2025 16:03:33 -0700 Subject: [PATCH 038/126] update useryaml arguments --- helm/fence/templates/useryaml-job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helm/fence/templates/useryaml-job.yaml b/helm/fence/templates/useryaml-job.yaml index eb7818556..658839fbb 100644 --- a/helm/fence/templates/useryaml-job.yaml +++ b/helm/fence/templates/useryaml-job.yaml @@ -44,7 +44,7 @@ spec: - "-c" # Script always succeeds if it runs (echo exits with 0) - | - pip3 install SQLAlchemy==1.3.6 + # pip3 install SQLAlchemy==1.3.6 # can be removed once this is merged: https://github.com/uc-cdis/fence/pull/1096 fence-create sync --arborist http://arborist-service --yaml /var/www/fence/user.yaml restartPolicy: OnFailure From 10b2379b480620a4ae14f8a1d742263dc4ec79ff Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Fri, 2 May 2025 11:43:30 -0700 Subject: [PATCH 039/126] fix gearbox services back to enabled = true --- gearbox-default-values.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/gearbox-default-values.yaml b/gearbox-default-values.yaml index 35d0abfcb..bc1d91fee 100644 --- a/gearbox-default-values.yaml +++ b/gearbox-default-values.yaml @@ -401,7 +401,7 @@ fence: portal: - enabled: false + enabled: true image: repository: quay.io/pcdc/gearbox_fe tag: "dev" @@ -423,14 +423,14 @@ revproxy: tag: 2023.09 gearbox: - enabled: false + enabled: true image: repository: quay.io/pcdc/gearbox_be tag: 1.3.0 pullPolicy: Always gearbox-middleware: - enabled: false + enabled: true image: repository: quay.io/pcdc/gearbox-middleware tag: "helm-test" From 082aa193c6a8122b298ab302f25416976b6b0c67 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Fri, 2 May 2025 12:52:39 -0700 Subject: [PATCH 040/126] bug fix running fence as non root user --- gearbox-default-values.yaml | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/gearbox-default-values.yaml b/gearbox-default-values.yaml index bc1d91fee..ae39b0da3 100644 --- a/gearbox-default-values.yaml +++ b/gearbox-default-values.yaml @@ -163,6 +163,31 @@ fence: mountPath: "/var/www/fence/fence-config-public.yaml" subPath: fence-config-public.yaml + # -- (list) Volumes to mount to the init container. + + initVolumeMounts: + - name: "config-volume" + readOnly: true + mountPath: "/var/www/fence/fence-config.yaml" + subPath: fence-config.yaml + - name: "config-volume-public" + readOnly: true + mountPath: "/var/www/fence/fence-config-public.yaml" + subPath: fence-config-public.yaml + - name: "yaml-merge" + readOnly: true + mountPath: "/var/www/fence/yaml_merge.py" + subPath: yaml_merge.py + - name: "fence-google-app-creds-secret-volume" + readOnly: true + mountPath: "/var/www/fence/fence_google_app_creds_secret.json" + subPath: fence_google_app_creds_secret.json + - name: "fence-google-storage-creds-secret-volume" + readOnly: true + mountPath: "/var/www/fence/fence_google_storage_creds_secret.json" + subPath: fence_google_storage_creds_secret.json + + podSecurityContext: runAsNonRoot: true runAsUser: 1000 From 7ed3c3bc4cb35c7970a3b263915335f9ff77c7ca Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Fri, 30 May 2025 17:57:41 -0700 Subject: [PATCH 041/126] updates --- ...anuensis-validate-filter-sets-cronjob.yaml | 92 ---- .../amanuensis-validate-filter-sets-job.yaml | 92 ++++ helm/amanuensis/templates/deployment.yaml | 3 +- helm/common/templates/_jwt_key_pairs.tpl | 2 - helm/gen3/confighelper/config_helper.py | 486 ++++++++++++++++++ helm/gen3/templates/config-helper.yaml | 6 + .../pcdcanalysistools-secret/confighelper.py | 452 +++++++++++++++- .../pcdcanalysistools-secret/settings.py | 4 +- .../templates/deployment.yaml | 32 +- pelican.yaml | 52 ++ 10 files changed, 1100 insertions(+), 121 deletions(-) delete mode 100644 helm/amanuensis/templates/amanuensis-validate-filter-sets-cronjob.yaml create mode 100644 helm/amanuensis/templates/amanuensis-validate-filter-sets-job.yaml create mode 100644 helm/gen3/confighelper/config_helper.py create mode 100644 helm/gen3/templates/config-helper.yaml create mode 100644 pelican.yaml diff --git a/helm/amanuensis/templates/amanuensis-validate-filter-sets-cronjob.yaml b/helm/amanuensis/templates/amanuensis-validate-filter-sets-cronjob.yaml deleted file mode 100644 index 26b233682..000000000 --- a/helm/amanuensis/templates/amanuensis-validate-filter-sets-cronjob.yaml +++ /dev/null @@ -1,92 +0,0 @@ -apiVersion: batch/v1 -kind: CronJob -metadata: - name: amanuensis-validate-filter-sets - labels: - redeploy-hash: "{{ .Release.Revision }}" -spec: - schedule: "0 0 1 * *" - concurrencyPolicy: Forbid - successfulJobsHistoryLimit: 3 - jobTemplate: - spec: - template: - metadata: - labels: - app: gen3job - spec: - automountServiceAccountToken: false - volumes: - - name: config-volume - secret: - secretName: "amanuensis-config" - - name: es-dd-config-volume - emptyDir: {} - - name: portal-config - secret: - secretName: "portal-config" - initContainers: - - name: amanuensis-db-filter-set-filler - image: "quay.io/pcdc/amanuensis-db-filter-set-filler:main" - imagePullPolicy: IfNotPresent - volumeMounts: - - name: "config-volume" - readOnly: true - mountPath: "/var/www/amanuensis/amanuensis-config.yaml" - subPath: amanuensis-config.yaml - - name: create-es-dd-config - image: "quay.io/cdis/awshelper:stable" - imagePullPolicy: IfNotPresent - env: - - name: BASE_URL - value: "https://portal.pedscommons.org/" - - name: OUTPUT_FILE - value: "/tmp/es-dd-config/es_to_dd_map.json" - - name: DICTIONARY_URL - value: "https://portal.pedscommons.org/api/v0/submission/_dictionary/_all" - volumeMounts: - - name: "es-dd-config-volume" - mountPath: "/tmp/es-dd-config" - args: - - /bin/bash - - -c - - | - - cd /tmp - - export PATH="/home/ubuntu/.local/bin:$PATH" - - git clone https://github.com/chicagopcdc/gen3_etl.git - echo "Repository cloned successfully." - - cd gen3_etl/elasticsearch - - pip install --user -r requirements-ES-DD.txt - - cd etl - - python create_es_dd_mapping.py add-manual-fields - - echo "ES-DD config created successfully." - containers: - - name: validate-filter-sets - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - imagePullPolicy: "{{ .Values.image.pullPolicy }}" - volumeMounts: - - name: "es-dd-config-volume" - mountPath: "/var/www/amanuensis/es_to_dd_map.json" - subPath: es_to_dd_map.json - - name: "config-volume" - readOnly: true - mountPath: "/var/www/amanuensis/amanuensis-config.yaml" - subPath: amanuensis-config.yaml - - name: "portal-config" - readOnly: true - mountPath: "/var/www/amanuensis/gitops.json" - subPath: gitops.json - args: - - /bin/bash - - -c - - | - validate-filter-sets - restartPolicy: Never \ No newline at end of file diff --git a/helm/amanuensis/templates/amanuensis-validate-filter-sets-job.yaml b/helm/amanuensis/templates/amanuensis-validate-filter-sets-job.yaml new file mode 100644 index 000000000..376150a31 --- /dev/null +++ b/helm/amanuensis/templates/amanuensis-validate-filter-sets-job.yaml @@ -0,0 +1,92 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: amanuensis-validate-filter-sets + labels: + redeploy-hash: "{{ .Release.Revision }}" +spec: + # Job spec starts here directly (no schedule or jobTemplate needed) + template: + metadata: + labels: + app: gen3job + spec: + automountServiceAccountToken: false + volumes: + - name: config-volume + secret: + secretName: "amanuensis-config" + - name: es-dd-config-volume + emptyDir: {} + - name: portal-config + secret: + secretName: "portal-config" + - name: invalid-filter-set-volume + emptyDir: {} + initContainers: + - name: amanuensis-db-filter-set-filler + image: "amanuensis-db-filter-set-filler:test" + imagePullPolicy: Never + env: + - name: invalid_filters_list_path + value: "/var/www/amanuensis/invalid-filters/invalid-filters.json" + volumeMounts: + - name: "config-volume" + readOnly: true + mountPath: "/var/www/amanuensis/amanuensis-config.yaml" + subPath: amanuensis-config.yaml + - name: "invalid-filter-set-volume" + mountPath: "/var/www/amanuensis/invalid-filters/" + - name: create-es-dd-config + image: "quay.io/cdis/awshelper:stable" + imagePullPolicy: IfNotPresent + env: + - name: BASE_URL + value: "https://portal.pedscommons.org/" + - name: OUTPUT_FILE + value: "/tmp/es-dd-config/es_to_dd_map.json" + - name: DICTIONARY_URL + value: "https://portal.pedscommons.org/api/v0/submission/_dictionary/_all" + volumeMounts: + - name: "es-dd-config-volume" + mountPath: "/tmp/es-dd-config" + args: + - /bin/bash + - -c + - | + cd /tmp + export PATH="/home/ubuntu/.local/bin:$PATH" + git clone https://github.com/chicagopcdc/gen3_etl.git + echo "Repository cloned successfully." + cd gen3_etl/elasticsearch + pip install -r requirements-ES-DD.txt + cd etl + python create_es_dd_mapping.py add-manual-fields + echo "ES-DD config created successfully." + containers: + - name: validate-filter-sets + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + volumeMounts: + - name: "es-dd-config-volume" + mountPath: "/var/www/amanuensis/es_to_dd_map.json" + subPath: es_to_dd_map.json + - name: "config-volume" + readOnly: true + mountPath: "/var/www/amanuensis/amanuensis-config.yaml" + subPath: amanuensis-config.yaml + - name: "portal-config" + readOnly: true + mountPath: "/var/www/amanuensis/gitops.json" + subPath: gitops.json + - name: "invalid-filter-set-volume" + mountPath: "/var/www/amanuensis/invalid-filters.json" + subPath: invalid-filters.json + args: + - /bin/bash + - -c + - | + validate-filter-sets + restartPolicy: Never + # Optional: add a backoff limit to control retries + backoffLimit: 3 \ No newline at end of file diff --git a/helm/amanuensis/templates/deployment.yaml b/helm/amanuensis/templates/deployment.yaml index 33761c5ec..a47e7b6a3 100644 --- a/helm/amanuensis/templates/deployment.yaml +++ b/helm/amanuensis/templates/deployment.yaml @@ -64,8 +64,7 @@ spec: - | echo "${AMANUENSIS_PUBLIC_CONFIG:-""}" > "/var/www/amanuensis/amanuensis-config-public.yaml" python /var/www/amanuensis/yaml_merge.py /var/www/amanuensis/amanuensis-config-public.yaml /var/www/amanuensis/amanuensis-config-secret.yaml > /var/www/amanuensis/amanuensis-config.yaml - #bash /amanuensis/dockerrun.bash && if [[ -f /dockerrun.sh ]]; then /dockerrun.sh; fi - bash /dockerrun.sh + if [[ -f /amanuensis/dockerrun.bash ]]; then bash /amanuensis/dockerrun.bash; elif [[ -f /dockerrun.sh ]]; then bash /dockerrun.sh; else echo 'Error: Neither /amanuensis/dockerrun.bash nor /dockerrun.sh exists.' >&2; exit 1; fi env: {{- if .Values.global.ddEnabled }} {{- include "common.datadogEnvVar" . | nindent 12 }} diff --git a/helm/common/templates/_jwt_key_pairs.tpl b/helm/common/templates/_jwt_key_pairs.tpl index 809be5a8d..89bc22d61 100644 --- a/helm/common/templates/_jwt_key_pairs.tpl +++ b/helm/common/templates/_jwt_key_pairs.tpl @@ -96,8 +96,6 @@ apiVersion: v1 kind: Secret metadata: name: {{ $.Chart.Name }}-jwt-keys - annotations: - helm.sh/resource-policy: keep type: Opaque data: {{- $existingSecret := (lookup "v1" "Secret" .Release.Namespace (printf "%s-jwt-keys" .Chart.Name)) }} diff --git a/helm/gen3/confighelper/config_helper.py b/helm/gen3/confighelper/config_helper.py new file mode 100644 index 000000000..869ca25af --- /dev/null +++ b/helm/gen3/confighelper/config_helper.py @@ -0,0 +1,486 @@ +import json +import os +import copy +import argparse +import re +import types + +# +# make it easy to change this for testing +XDG_DATA_HOME = os.getenv("XDG_DATA_HOME", "/usr/share/") + + +def default_search_folders(app_name): + """ + Return the list of folders to search for configuration files + """ + return [ + "%s/cdis/%s" % (XDG_DATA_HOME, app_name), + "/usr/share/cdis/%s" % app_name, + "%s/gen3/%s" % (XDG_DATA_HOME, app_name), + "/usr/share/gen3/%s" % app_name, + "/var/www/%s" % app_name, + "/etc/gen3/%s" % app_name, + ] + + +def find_paths(file_name, app_name, search_folders=None): + """ + Search the given folders for file_name + search_folders defaults to default_search_folders if not specified + return the first path to file_name found + """ + search_folders = search_folders or default_search_folders(app_name) + possible_files = [os.path.join(folder, file_name) for folder in search_folders] + return [path for path in possible_files if os.path.exists(path)] + + +def load_json(file_name, app_name, search_folders=None): + """ + json.load(file_name) after finding file_name in search_folders + + return the loaded json data or None if file not found + """ + actual_files = find_paths(file_name, app_name, search_folders) + if not actual_files: + return None + with open(actual_files[0], "r") as reader: + return json.load(reader) + + +def inject_creds_into_fence_config(creds_file_path, config_file_path): + creds_file = open(creds_file_path, "r") + creds = json.load(creds_file) + creds_file.close() + + # get secret values from creds.json file + db_host = _get_nested_value(creds, "db_host") + db_username = _get_nested_value(creds, "db_username") + db_password = _get_nested_value(creds, "db_password") + db_database = _get_nested_value(creds, "db_database") + hostname = _get_nested_value(creds, "hostname") + indexd_password = _get_nested_value(creds, "indexd_password") + google_client_secret = _get_nested_value(creds, "google_client_secret") + google_client_id = _get_nested_value(creds, "google_client_id") + hmac_key = _get_nested_value(creds, "hmac_key") + db_path = "postgresql://{}:{}@{}:5432/{}".format( + db_username, db_password, db_host, db_database + ) + + config_file = open(config_file_path, "r").read() + + print(" DB injected with value(s) from creds.json") + config_file = _replace(config_file, "DB", db_path) + + print(" BASE_URL injected with value(s) from creds.json") + config_file = _replace(config_file, "BASE_URL", "https://{}/user".format(hostname)) + + print(" INDEXD_PASSWORD injected with value(s) from creds.json") + config_file = _replace(config_file, "INDEXD_PASSWORD", indexd_password) + config_file = _replace(config_file, "INDEXD_USERNAME", "fence") + + print(" ENCRYPTION_KEY injected with value(s) from creds.json") + config_file = _replace(config_file, "ENCRYPTION_KEY", hmac_key) + + print( + " OPENID_CONNECT/google/client_secret injected with value(s) " + "from creds.json" + ) + config_file = _replace( + config_file, "OPENID_CONNECT/google/client_secret", google_client_secret + ) + + print(" OPENID_CONNECT/google/client_id injected with value(s) from creds.json") + config_file = _replace( + config_file, "OPENID_CONNECT/google/client_id", google_client_id + ) + + open(config_file_path, "w+").write(config_file) + +def inject_creds_into_amanuensis_config(creds_file_path, config_file_path): + creds_file = open(creds_file_path, "r") + creds = json.load(creds_file) + creds_file.close() + + # get secret values from creds.json file + db_host = _get_nested_value(creds, "db_host") + db_username = _get_nested_value(creds, "db_username") + db_password = _get_nested_value(creds, "db_password") + db_database = _get_nested_value(creds, "db_database") + hostname = _get_nested_value(creds, "hostname") + data_delivery_bucket = _get_nested_value(creds, "data_delivery_bucket") + data_delivery_bucket_aws_key_id = _get_nested_value(creds, "data_delivery_bucket_aws_key_id") + data_delivery_bucket_aws_access_key = _get_nested_value(creds, "data_delivery_bucket_aws_access_key") + csl_key = _get_nested_value(creds, "csl_key") + + db_path = "postgresql://{}:{}@{}:5432/{}".format( + db_username, db_password, db_host, db_database + ) + + config_file = open(config_file_path, "r").read() + + print(" DB injected with value(s) from creds.json") + config_file = _replace(config_file, "DB", db_path) + + print(" BASE_URL injected with value(s) from creds.json") + config_file = _replace(config_file, "BASE_URL", "https://{}/amanuensis".format(hostname)) + + print(" HOSTNAME injected with value(s) from creds.json") + config_file = _replace(config_file, "HOSTNAME", "{}".format(hostname)) + + print(" AWS_CREDENTIALS/DATA_DELIVERY_S3_BUCKET/aws_access_key_id injected with value(s) from creds.json") + config_file = _replace( + config_file, "AWS_CREDENTIALS/DATA_DELIVERY_S3_BUCKET/aws_access_key_id", data_delivery_bucket_aws_key_id + ) + + print(" AWS_CREDENTIALS/DATA_DELIVERY_S3_BUCKET/aws_secret_access_key injected with value(s) from creds.json") + config_file = _replace( + config_file, "AWS_CREDENTIALS/DATA_DELIVERY_S3_BUCKET/aws_secret_access_key", data_delivery_bucket_aws_access_key + ) + + print(" AWS_CREDENTIALS/DATA_DELIVERY_S3_BUCKET/bucket_name injected with value(s) from creds.json") + config_file = _replace( + config_file, "AWS_CREDENTIALS/DATA_DELIVERY_S3_BUCKET/bucket_name", data_delivery_bucket + ) + + print(" CSL_KEY injected with value(s) from creds.json") + config_file = _replace( + config_file, "CSL_KEY", csl_key + ) + + # modify USER_API to http://user-service/ if hostname is localhost + + if hostname == "localhost": + print(" USER_API set to http://fence-service/") + config_file = _replace(config_file, "USER_API", "http://fence-service/") + # print(" ENCRYPTION_KEY injected with value(s) from creds.json") + # config_file = _replace(config_file, "ENCRYPTION_KEY", hmac_key) + + + open(config_file_path, "w+").write(config_file) + + +def set_prod_defaults(config_file_path): + config_file = open(config_file_path, "r").read() + + print( + " CIRRUS_CFG/GOOGLE_APPLICATION_CREDENTIALS set as " + "var/www/fence/fence_google_app_creds_secret.json" + ) + config_file = _replace( + config_file, + "CIRRUS_CFG/GOOGLE_APPLICATION_CREDENTIALS", + "/var/www/fence/fence_google_app_creds_secret.json", + ) + + print( + " CIRRUS_CFG/GOOGLE_STORAGE_CREDS set as " + "var/www/fence/fence_google_storage_creds_secret.json" + ) + config_file = _replace( + config_file, + "CIRRUS_CFG/GOOGLE_STORAGE_CREDS", + "/var/www/fence/fence_google_storage_creds_secret.json", + ) + + print(" INDEXD set as http://indexd-service/") + config_file = _replace(config_file, "INDEXD", "http://indexd-service/") + + print(" ARBORIST set as http://arborist-service/") + config_file = _replace(config_file, "ARBORIST", "http://arborist-service/") + + print(" HTTP_PROXY/host set as cloud-proxy.internal.io") + config_file = _replace(config_file, "HTTP_PROXY/host", "cloud-proxy.internal.io") + + print(" HTTP_PROXY/port set as 3128") + config_file = _replace(config_file, "HTTP_PROXY/port", 3128) + + print(" DEBUG set to false") + config_file = _replace(config_file, "DEBUG", False) + + print(" MOCK_AUTH set to false") + config_file = _replace(config_file, "MOCK_AUTH", False) + + print(" MOCK_GOOGLE_AUTH set to false") + config_file = _replace(config_file, "MOCK_GOOGLE_AUTH", False) + + print(" AUTHLIB_INSECURE_TRANSPORT set to true") + config_file = _replace(config_file, "AUTHLIB_INSECURE_TRANSPORT", True) + + print(" SESSION_COOKIE_SECURE set to true") + config_file = _replace(config_file, "SESSION_COOKIE_SECURE", True) + + print(" ENABLE_CSRF_PROTECTION set to true") + config_file = _replace(config_file, "ENABLE_CSRF_PROTECTION", True) + + open(config_file_path, "w+").write(config_file) + +def set_prod_defaults_amanuensis(config_file_path): + config_file = open(config_file_path, "r").read() + + print(" INDEXD set as http://indexd-service/") + config_file = _replace(config_file, "INDEXD", "http://indexd-service/") + + print(" ARBORIST set as http://arborist-service/") + config_file = _replace(config_file, "ARBORIST", "http://arborist-service/") + + print(" HTTP_PROXY/host set as cloud-proxy.internal.io") + config_file = _replace(config_file, "HTTP_PROXY/host", "cloud-proxy.internal.io") + + print(" HTTP_PROXY/port set as 3128") + config_file = _replace(config_file, "HTTP_PROXY/port", 3128) + + print(" DEBUG set to false") + config_file = _replace(config_file, "DEBUG", False) + + print(" MOCK_AUTH set to false") + config_file = _replace(config_file, "MOCK_AUTH", False) + + print(" MOCK_GOOGLE_AUTH set to false") + config_file = _replace(config_file, "MOCK_GOOGLE_AUTH", False) + + print(" AUTHLIB_INSECURE_TRANSPORT set to true") + config_file = _replace(config_file, "AUTHLIB_INSECURE_TRANSPORT", True) + + print(" SESSION_COOKIE_SECURE set to true") + config_file = _replace(config_file, "SESSION_COOKIE_SECURE", True) + + print(" ENABLE_CSRF_PROTECTION set to true") + config_file = _replace(config_file, "ENABLE_CSRF_PROTECTION", True) + + open(config_file_path, "w+").write(config_file) + +def inject_other_files_into_fence_config(other_files, config_file_path): + additional_cfgs = _get_all_additional_configs(other_files) + + config_file = open(config_file_path, "r").read() + + for key, value in additional_cfgs.iteritems(): + print(" {} set to {}".format(key, value)) + config_file = _nested_replace(config_file, key, value) + + open(config_file_path, "w+").write(config_file) + + +def _get_all_additional_configs(other_files): + """ + Attempt to parse given list of files and extract configuration variables and values + """ + additional_configs = dict() + for file_path in other_files: + try: + file_ext = file_path.strip().split(".")[-1] + if file_ext == "json": + json_file = open(file_path, "r") + configs = json.load(json_file) + json_file.close() + elif file_ext == "py": + configs = from_pyfile(file_path) + else: + print( + "Cannot load config vars from a file with extention: {}".format( + file_ext + ) + ) + except Exception as exc: + # if there's any issue reading the file, exit + print( + "Error reading {}. Cannot get configuration. Skipping this file. " + "Details: {}".format(other_files, str(exc)) + ) + continue + + if configs: + additional_configs.update(configs) + + return additional_configs + + +def _nested_replace(config_file, key, value, replacement_path=None): + replacement_path = replacement_path or key + try: + for inner_key, inner_value in value.iteritems(): + temp_path = replacement_path + temp_path = temp_path + "/" + inner_key + config_file = _nested_replace( + config_file, inner_key, inner_value, temp_path + ) + except AttributeError: + # not a dict so replace + if value is not None: + config_file = _replace(config_file, replacement_path, value) + + return config_file + + +def _replace(yaml_config, path_to_key, replacement_value, start=0, nested_level=0, key_only=False): + """ + Replace a nested value in a YAML file string with the given value without + losing comments. Uses a regex to do the replacement. + + Args: + yaml_config (str): a string representing a full configuration file + path_to_key (str): nested/path/to/key. The value of this key will be + replaced + replacement_value (str): Replacement value for the key from + path_to_key + """ + nested_path_to_replace = path_to_key.split("/") + + # our regex looks for a specific number of spaces to ensure correct + # level of nesting. It matches to the end of the line + search_string = ( + " " * nested_level + ".*" + nested_path_to_replace[0] + "(')?(\")?:.*\n" + ) + matches = re.search(search_string, yaml_config[start:]) + + # early return if we haven't found anything + if not matches: + return yaml_config + + # if we're on the last item in the path, we need to get the value and + # replace it in the original file + if len(nested_path_to_replace) == 1: + # replace the current key:value with the new replacement value + match_start = start + matches.start(0) + len(" " * nested_level) + match_end = start + matches.end(0) + if not key_only: + yaml_config = ( + yaml_config[:match_start] + + "{}: {}\n".format( + nested_path_to_replace[0], + _get_yaml_replacement_value(replacement_value, nested_level), + ) + + yaml_config[match_end:] + ) + else: + yaml_config = ( + yaml_config[:match_start] + + "{}:\n".format( + _get_yaml_replacement_value(replacement_value, nested_level), + ) + + yaml_config[match_end:] + ) + + return yaml_config + + # set new start point to past current match and move on to next match + start = start + matches.end(0) + nested_level += 1 + del nested_path_to_replace[0] + + return _replace( + yaml_config, + "/".join(nested_path_to_replace), + replacement_value, + start, + nested_level, + key_only=key_only, + ) + + +def from_pyfile(filename, silent=False): + """ + Modeled after flask's ability to load in python files: + https://github.com/pallets/flask/blob/master/flask/config.py + + Some alterations were made but logic is essentially the same + """ + filename = os.path.abspath(filename) + d = types.ModuleType("config") + d.__file__ = filename + try: + with open(filename, mode="rb") as config_file: + exec(compile(config_file.read(), filename, "exec"), d.__dict__) + except IOError as e: + print("Unable to load configuration file ({})".format(e.strerror)) + if silent: + return False + raise + return _from_object(d) + + +def _from_object(obj): + configs = {} + for key in dir(obj): + if key.isupper(): + configs[key] = getattr(obj, key) + return configs + + +def _get_yaml_replacement_value(value, nested_level=0): + if isinstance(value, str): + return "'" + value + "'" + elif isinstance(value, bool): + return str(value).lower() + elif isinstance(value, list) or isinstance(value, set): + output = "" + for item in value: + # spaces for nested level then spaces and hyphen for each list item + output += ( + "\n" + + " " * nested_level + + " - " + + _get_yaml_replacement_value(item) + + "" + ) + return output + else: + return value + + +def _get_nested_value(dictionary, nested_path): + """ + Return a value from a dictionary given a path-like nesting of keys. + + Will default to an empty string if value cannot be found. + + Args: + dictionary (dict): a dictionary + nested_path (str): nested/path/to/key + + Returns: + ?: Value from dict + """ + replacement_value_path = nested_path.split("/") + replacement_value = copy.deepcopy(dictionary) + + for item in replacement_value_path: + replacement_value = replacement_value.get(item, {}) + + if replacement_value == {}: + replacement_value = "" + + return replacement_value + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "-i", + "--creds_file_to_inject", + default="creds.json", + help="creds file to inject into the configuration yaml", + ) + parser.add_argument( + "--other_files_to_inject", + nargs="+", + help="fence_credentials.json, local_settings.py, fence_settings.py file(s) to " + "inject into the configuration yaml", + ) + parser.add_argument( + "-c", "--config_file", default="config.yaml", help="configuration yaml" + ) + args = parser.parse_args() + + if args.config_file == "new-amanuensis-config.yaml": + inject_creds_into_amanuensis_config(args.creds_file_to_inject, args.config_file) + set_prod_defaults_amanuensis(args.config_file) + else: + inject_creds_into_fence_config(args.creds_file_to_inject, args.config_file) + set_prod_defaults(args.config_file) + + if args.other_files_to_inject: + inject_other_files_into_fence_config( + args.other_files_to_inject, args.config_file + ) diff --git a/helm/gen3/templates/config-helper.yaml b/helm/gen3/templates/config-helper.yaml new file mode 100644 index 000000000..fe20be2f2 --- /dev/null +++ b/helm/gen3/templates/config-helper.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-helper +data: +{{ (.Files.Glob "confighelper/*").AsConfig | indent 2 }} \ No newline at end of file diff --git a/helm/pcdcanalysistools/pcdcanalysistools-secret/confighelper.py b/helm/pcdcanalysistools/pcdcanalysistools-secret/confighelper.py index ad7b8d697..869ca25af 100644 --- a/helm/pcdcanalysistools/pcdcanalysistools-secret/confighelper.py +++ b/helm/pcdcanalysistools/pcdcanalysistools-secret/confighelper.py @@ -1,15 +1,9 @@ -""" -Originally copied from `cloud-automation/apis_configs/config_helper.py` -(renamed `confighelper.py` so it isn't overwritten by the file that cloud-automation -still mounts for backwards compatibility). - -TODO: once everyone has this independent version of PcdcAnalysisTools, remove `wsgi.py` and -`config_helper.py` here: -https://github.com/uc-cdis/cloud-automation/blob/afb750d/kube/services/PcdcAnalysisTools/PcdcAnalysisTools-deploy.yaml#L166-L177 -""" - import json import os +import copy +import argparse +import re +import types # # make it easy to change this for testing @@ -52,3 +46,441 @@ def load_json(file_name, app_name, search_folders=None): return None with open(actual_files[0], "r") as reader: return json.load(reader) + + +def inject_creds_into_fence_config(creds_file_path, config_file_path): + creds_file = open(creds_file_path, "r") + creds = json.load(creds_file) + creds_file.close() + + # get secret values from creds.json file + db_host = _get_nested_value(creds, "db_host") + db_username = _get_nested_value(creds, "db_username") + db_password = _get_nested_value(creds, "db_password") + db_database = _get_nested_value(creds, "db_database") + hostname = _get_nested_value(creds, "hostname") + indexd_password = _get_nested_value(creds, "indexd_password") + google_client_secret = _get_nested_value(creds, "google_client_secret") + google_client_id = _get_nested_value(creds, "google_client_id") + hmac_key = _get_nested_value(creds, "hmac_key") + db_path = "postgresql://{}:{}@{}:5432/{}".format( + db_username, db_password, db_host, db_database + ) + + config_file = open(config_file_path, "r").read() + + print(" DB injected with value(s) from creds.json") + config_file = _replace(config_file, "DB", db_path) + + print(" BASE_URL injected with value(s) from creds.json") + config_file = _replace(config_file, "BASE_URL", "https://{}/user".format(hostname)) + + print(" INDEXD_PASSWORD injected with value(s) from creds.json") + config_file = _replace(config_file, "INDEXD_PASSWORD", indexd_password) + config_file = _replace(config_file, "INDEXD_USERNAME", "fence") + + print(" ENCRYPTION_KEY injected with value(s) from creds.json") + config_file = _replace(config_file, "ENCRYPTION_KEY", hmac_key) + + print( + " OPENID_CONNECT/google/client_secret injected with value(s) " + "from creds.json" + ) + config_file = _replace( + config_file, "OPENID_CONNECT/google/client_secret", google_client_secret + ) + + print(" OPENID_CONNECT/google/client_id injected with value(s) from creds.json") + config_file = _replace( + config_file, "OPENID_CONNECT/google/client_id", google_client_id + ) + + open(config_file_path, "w+").write(config_file) + +def inject_creds_into_amanuensis_config(creds_file_path, config_file_path): + creds_file = open(creds_file_path, "r") + creds = json.load(creds_file) + creds_file.close() + + # get secret values from creds.json file + db_host = _get_nested_value(creds, "db_host") + db_username = _get_nested_value(creds, "db_username") + db_password = _get_nested_value(creds, "db_password") + db_database = _get_nested_value(creds, "db_database") + hostname = _get_nested_value(creds, "hostname") + data_delivery_bucket = _get_nested_value(creds, "data_delivery_bucket") + data_delivery_bucket_aws_key_id = _get_nested_value(creds, "data_delivery_bucket_aws_key_id") + data_delivery_bucket_aws_access_key = _get_nested_value(creds, "data_delivery_bucket_aws_access_key") + csl_key = _get_nested_value(creds, "csl_key") + + db_path = "postgresql://{}:{}@{}:5432/{}".format( + db_username, db_password, db_host, db_database + ) + + config_file = open(config_file_path, "r").read() + + print(" DB injected with value(s) from creds.json") + config_file = _replace(config_file, "DB", db_path) + + print(" BASE_URL injected with value(s) from creds.json") + config_file = _replace(config_file, "BASE_URL", "https://{}/amanuensis".format(hostname)) + + print(" HOSTNAME injected with value(s) from creds.json") + config_file = _replace(config_file, "HOSTNAME", "{}".format(hostname)) + + print(" AWS_CREDENTIALS/DATA_DELIVERY_S3_BUCKET/aws_access_key_id injected with value(s) from creds.json") + config_file = _replace( + config_file, "AWS_CREDENTIALS/DATA_DELIVERY_S3_BUCKET/aws_access_key_id", data_delivery_bucket_aws_key_id + ) + + print(" AWS_CREDENTIALS/DATA_DELIVERY_S3_BUCKET/aws_secret_access_key injected with value(s) from creds.json") + config_file = _replace( + config_file, "AWS_CREDENTIALS/DATA_DELIVERY_S3_BUCKET/aws_secret_access_key", data_delivery_bucket_aws_access_key + ) + + print(" AWS_CREDENTIALS/DATA_DELIVERY_S3_BUCKET/bucket_name injected with value(s) from creds.json") + config_file = _replace( + config_file, "AWS_CREDENTIALS/DATA_DELIVERY_S3_BUCKET/bucket_name", data_delivery_bucket + ) + + print(" CSL_KEY injected with value(s) from creds.json") + config_file = _replace( + config_file, "CSL_KEY", csl_key + ) + + # modify USER_API to http://user-service/ if hostname is localhost + + if hostname == "localhost": + print(" USER_API set to http://fence-service/") + config_file = _replace(config_file, "USER_API", "http://fence-service/") + # print(" ENCRYPTION_KEY injected with value(s) from creds.json") + # config_file = _replace(config_file, "ENCRYPTION_KEY", hmac_key) + + + open(config_file_path, "w+").write(config_file) + + +def set_prod_defaults(config_file_path): + config_file = open(config_file_path, "r").read() + + print( + " CIRRUS_CFG/GOOGLE_APPLICATION_CREDENTIALS set as " + "var/www/fence/fence_google_app_creds_secret.json" + ) + config_file = _replace( + config_file, + "CIRRUS_CFG/GOOGLE_APPLICATION_CREDENTIALS", + "/var/www/fence/fence_google_app_creds_secret.json", + ) + + print( + " CIRRUS_CFG/GOOGLE_STORAGE_CREDS set as " + "var/www/fence/fence_google_storage_creds_secret.json" + ) + config_file = _replace( + config_file, + "CIRRUS_CFG/GOOGLE_STORAGE_CREDS", + "/var/www/fence/fence_google_storage_creds_secret.json", + ) + + print(" INDEXD set as http://indexd-service/") + config_file = _replace(config_file, "INDEXD", "http://indexd-service/") + + print(" ARBORIST set as http://arborist-service/") + config_file = _replace(config_file, "ARBORIST", "http://arborist-service/") + + print(" HTTP_PROXY/host set as cloud-proxy.internal.io") + config_file = _replace(config_file, "HTTP_PROXY/host", "cloud-proxy.internal.io") + + print(" HTTP_PROXY/port set as 3128") + config_file = _replace(config_file, "HTTP_PROXY/port", 3128) + + print(" DEBUG set to false") + config_file = _replace(config_file, "DEBUG", False) + + print(" MOCK_AUTH set to false") + config_file = _replace(config_file, "MOCK_AUTH", False) + + print(" MOCK_GOOGLE_AUTH set to false") + config_file = _replace(config_file, "MOCK_GOOGLE_AUTH", False) + + print(" AUTHLIB_INSECURE_TRANSPORT set to true") + config_file = _replace(config_file, "AUTHLIB_INSECURE_TRANSPORT", True) + + print(" SESSION_COOKIE_SECURE set to true") + config_file = _replace(config_file, "SESSION_COOKIE_SECURE", True) + + print(" ENABLE_CSRF_PROTECTION set to true") + config_file = _replace(config_file, "ENABLE_CSRF_PROTECTION", True) + + open(config_file_path, "w+").write(config_file) + +def set_prod_defaults_amanuensis(config_file_path): + config_file = open(config_file_path, "r").read() + + print(" INDEXD set as http://indexd-service/") + config_file = _replace(config_file, "INDEXD", "http://indexd-service/") + + print(" ARBORIST set as http://arborist-service/") + config_file = _replace(config_file, "ARBORIST", "http://arborist-service/") + + print(" HTTP_PROXY/host set as cloud-proxy.internal.io") + config_file = _replace(config_file, "HTTP_PROXY/host", "cloud-proxy.internal.io") + + print(" HTTP_PROXY/port set as 3128") + config_file = _replace(config_file, "HTTP_PROXY/port", 3128) + + print(" DEBUG set to false") + config_file = _replace(config_file, "DEBUG", False) + + print(" MOCK_AUTH set to false") + config_file = _replace(config_file, "MOCK_AUTH", False) + + print(" MOCK_GOOGLE_AUTH set to false") + config_file = _replace(config_file, "MOCK_GOOGLE_AUTH", False) + + print(" AUTHLIB_INSECURE_TRANSPORT set to true") + config_file = _replace(config_file, "AUTHLIB_INSECURE_TRANSPORT", True) + + print(" SESSION_COOKIE_SECURE set to true") + config_file = _replace(config_file, "SESSION_COOKIE_SECURE", True) + + print(" ENABLE_CSRF_PROTECTION set to true") + config_file = _replace(config_file, "ENABLE_CSRF_PROTECTION", True) + + open(config_file_path, "w+").write(config_file) + +def inject_other_files_into_fence_config(other_files, config_file_path): + additional_cfgs = _get_all_additional_configs(other_files) + + config_file = open(config_file_path, "r").read() + + for key, value in additional_cfgs.iteritems(): + print(" {} set to {}".format(key, value)) + config_file = _nested_replace(config_file, key, value) + + open(config_file_path, "w+").write(config_file) + + +def _get_all_additional_configs(other_files): + """ + Attempt to parse given list of files and extract configuration variables and values + """ + additional_configs = dict() + for file_path in other_files: + try: + file_ext = file_path.strip().split(".")[-1] + if file_ext == "json": + json_file = open(file_path, "r") + configs = json.load(json_file) + json_file.close() + elif file_ext == "py": + configs = from_pyfile(file_path) + else: + print( + "Cannot load config vars from a file with extention: {}".format( + file_ext + ) + ) + except Exception as exc: + # if there's any issue reading the file, exit + print( + "Error reading {}. Cannot get configuration. Skipping this file. " + "Details: {}".format(other_files, str(exc)) + ) + continue + + if configs: + additional_configs.update(configs) + + return additional_configs + + +def _nested_replace(config_file, key, value, replacement_path=None): + replacement_path = replacement_path or key + try: + for inner_key, inner_value in value.iteritems(): + temp_path = replacement_path + temp_path = temp_path + "/" + inner_key + config_file = _nested_replace( + config_file, inner_key, inner_value, temp_path + ) + except AttributeError: + # not a dict so replace + if value is not None: + config_file = _replace(config_file, replacement_path, value) + + return config_file + + +def _replace(yaml_config, path_to_key, replacement_value, start=0, nested_level=0, key_only=False): + """ + Replace a nested value in a YAML file string with the given value without + losing comments. Uses a regex to do the replacement. + + Args: + yaml_config (str): a string representing a full configuration file + path_to_key (str): nested/path/to/key. The value of this key will be + replaced + replacement_value (str): Replacement value for the key from + path_to_key + """ + nested_path_to_replace = path_to_key.split("/") + + # our regex looks for a specific number of spaces to ensure correct + # level of nesting. It matches to the end of the line + search_string = ( + " " * nested_level + ".*" + nested_path_to_replace[0] + "(')?(\")?:.*\n" + ) + matches = re.search(search_string, yaml_config[start:]) + + # early return if we haven't found anything + if not matches: + return yaml_config + + # if we're on the last item in the path, we need to get the value and + # replace it in the original file + if len(nested_path_to_replace) == 1: + # replace the current key:value with the new replacement value + match_start = start + matches.start(0) + len(" " * nested_level) + match_end = start + matches.end(0) + if not key_only: + yaml_config = ( + yaml_config[:match_start] + + "{}: {}\n".format( + nested_path_to_replace[0], + _get_yaml_replacement_value(replacement_value, nested_level), + ) + + yaml_config[match_end:] + ) + else: + yaml_config = ( + yaml_config[:match_start] + + "{}:\n".format( + _get_yaml_replacement_value(replacement_value, nested_level), + ) + + yaml_config[match_end:] + ) + + return yaml_config + + # set new start point to past current match and move on to next match + start = start + matches.end(0) + nested_level += 1 + del nested_path_to_replace[0] + + return _replace( + yaml_config, + "/".join(nested_path_to_replace), + replacement_value, + start, + nested_level, + key_only=key_only, + ) + + +def from_pyfile(filename, silent=False): + """ + Modeled after flask's ability to load in python files: + https://github.com/pallets/flask/blob/master/flask/config.py + + Some alterations were made but logic is essentially the same + """ + filename = os.path.abspath(filename) + d = types.ModuleType("config") + d.__file__ = filename + try: + with open(filename, mode="rb") as config_file: + exec(compile(config_file.read(), filename, "exec"), d.__dict__) + except IOError as e: + print("Unable to load configuration file ({})".format(e.strerror)) + if silent: + return False + raise + return _from_object(d) + + +def _from_object(obj): + configs = {} + for key in dir(obj): + if key.isupper(): + configs[key] = getattr(obj, key) + return configs + + +def _get_yaml_replacement_value(value, nested_level=0): + if isinstance(value, str): + return "'" + value + "'" + elif isinstance(value, bool): + return str(value).lower() + elif isinstance(value, list) or isinstance(value, set): + output = "" + for item in value: + # spaces for nested level then spaces and hyphen for each list item + output += ( + "\n" + + " " * nested_level + + " - " + + _get_yaml_replacement_value(item) + + "" + ) + return output + else: + return value + + +def _get_nested_value(dictionary, nested_path): + """ + Return a value from a dictionary given a path-like nesting of keys. + + Will default to an empty string if value cannot be found. + + Args: + dictionary (dict): a dictionary + nested_path (str): nested/path/to/key + + Returns: + ?: Value from dict + """ + replacement_value_path = nested_path.split("/") + replacement_value = copy.deepcopy(dictionary) + + for item in replacement_value_path: + replacement_value = replacement_value.get(item, {}) + + if replacement_value == {}: + replacement_value = "" + + return replacement_value + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "-i", + "--creds_file_to_inject", + default="creds.json", + help="creds file to inject into the configuration yaml", + ) + parser.add_argument( + "--other_files_to_inject", + nargs="+", + help="fence_credentials.json, local_settings.py, fence_settings.py file(s) to " + "inject into the configuration yaml", + ) + parser.add_argument( + "-c", "--config_file", default="config.yaml", help="configuration yaml" + ) + args = parser.parse_args() + + if args.config_file == "new-amanuensis-config.yaml": + inject_creds_into_amanuensis_config(args.creds_file_to_inject, args.config_file) + set_prod_defaults_amanuensis(args.config_file) + else: + inject_creds_into_fence_config(args.creds_file_to_inject, args.config_file) + set_prod_defaults(args.config_file) + + if args.other_files_to_inject: + inject_other_files_into_fence_config( + args.other_files_to_inject, args.config_file + ) diff --git a/helm/pcdcanalysistools/pcdcanalysistools-secret/settings.py b/helm/pcdcanalysistools/pcdcanalysistools-secret/settings.py index 6f6ea4f86..5518e99dc 100644 --- a/helm/pcdcanalysistools/pcdcanalysistools-secret/settings.py +++ b/helm/pcdcanalysistools/pcdcanalysistools-secret/settings.py @@ -74,7 +74,9 @@ # trailing slash intentionally omitted config['GUPPY_API'] = 'http://guppy-service' -config["USER_API"] = config["OIDC_ISSUER"] # for use by authutils +config["USER_API"] = 'http://fence-service/' # for use by authutils +config["FENCE"] = 'http://fence-service' + # config['USER_API'] = 'http://fence-service/' # option to force authutils to prioritize USER_API setting over the issuer from # token when redirecting, used during local docker compose setup when the diff --git a/helm/pcdcanalysistools/templates/deployment.yaml b/helm/pcdcanalysistools/templates/deployment.yaml index e18375a83..91d2300e8 100644 --- a/helm/pcdcanalysistools/templates/deployment.yaml +++ b/helm/pcdcanalysistools/templates/deployment.yaml @@ -36,12 +36,12 @@ spec: items: - key: jwt_private_key.pem path: jwt_private_key.pem - # - name: config-helper - # secret: - # secretName: "pcdcanalysistools-secret" - # - name: creds-volume - # secret: - # secretName: "sheepdog-creds" + - name: config-helper + secret: + secretName: "pcdcanalysistools-secret" + - name: creds-volume + secret: + secretName: "sheepdog-creds" {{- with .Values.imagePullSecrets }} @@ -146,18 +146,22 @@ spec: readOnly: true mountPath: "/var/www/PcdcAnalysisTools/wsgi.py" subPath: "settings.py" + - name: "config-volume" + readOnly: true + mountPath: "/PcdcAnalysisTools/bin/settings.py" + subPath: "settings.py" - name: "pcdcanalysistools-jwt-keys" readOnly: true mountPath: "/var/www/PcdcAnalysisTools/jwt_private_key.pem" subPath: "jwt_private_key.pem" - # - name: "creds-volume" - # readOnly: true - # mountPath: "/var/www/PcdcAnalysisTools/creds.json" - # subPath: creds.json - # - name: "config-helper" - # readOnly: true - # mountPath: "/var/www/PcdcAnalysisTools/config_helper.py" - # subPath: confighelper.py + - name: "creds-volume" + readOnly: true + mountPath: "/var/www/PcdcAnalysisTools/creds.json" + subPath: creds.json + - name: "config-helper" + readOnly: true + mountPath: "/PcdcAnalysisTools/bin/config_helper.py" + subPath: confighelper.py ports: - name: http containerPort: 80 diff --git a/pelican.yaml b/pelican.yaml new file mode 100644 index 000000000..c226e3ce3 --- /dev/null +++ b/pelican.yaml @@ -0,0 +1,52 @@ +apiVersion: v1 +kind: Pod +metadata: + name: pelican-pod +spec: + volumes: + - name: pelican-creds-volume + secret: + secretName: pelicanservice-g3auto + - name: peregrine-creds-volume + secret: + secretName: peregrine-creds + containers: + - name: pelican-container + image: pelican:test + imagePullPolicy: Never + command: ["/bin/sh"] + args: ["-c", "cd / && tail -f /dev/null"] + ports: + - containerPort: 80 + env: + - name: DICTIONARY_URL + valueFrom: + configMapKeyRef: + name: manifest-global + key: dictionary_url + - name: GEN3_HOSTNAME + valueFrom: + configMapKeyRef: + name: manifest-global + key: hostname + - name: ROOT_NODE + value: subject + - name: OUTPUT_FILE_FORMAT + value: ZIP + - name: INPUT_DATA + value: "{}" + - name: ACCESS_FORMAT + value: presigned_url + - name: INPUT_VARIABLES_FOR_FILE_CONVERSION + value: '{"config": "config.py","analysis": "INRG","is_black_list": false}' + - name: ACCESS_TOKEN + value: "" + volumeMounts: + - name: pelican-creds-volume + readOnly: true + mountPath: "/pelican-creds.json" + subPath: config.json + - name: peregrine-creds-volume + readOnly: true + mountPath: "/peregrine-creds.json" + subPath: creds.json \ No newline at end of file From 1cfc7ac4e0d875fffec90d8949c5735cf129f52f Mon Sep 17 00:00:00 2001 From: pkellyc Date: Sun, 29 Jun 2025 20:19:02 -0500 Subject: [PATCH 042/126] Connect external resources to subjects for testing --- .../external/update_external_references.py | 86 +++++++++++++++++++ pcdc_data/generate_data.sh | 14 +++ pcdc_data/run_all.sh | 4 +- 3 files changed, 102 insertions(+), 2 deletions(-) create mode 100644 pcdc_data/external/update_external_references.py diff --git a/pcdc_data/external/update_external_references.py b/pcdc_data/external/update_external_references.py new file mode 100644 index 000000000..b34c27b84 --- /dev/null +++ b/pcdc_data/external/update_external_references.py @@ -0,0 +1,86 @@ +import json +import sys +from pathlib import Path + + +def ensure_subjects_list(ref): + """ + This function ensures we are looking at a list. + """ + subjects = ref.get("subjects") + + # If it's a dict (one subject), wrap it in a list + if isinstance(subjects, dict): + return [subjects] + + # If it's already a list, just return it + elif isinstance(subjects, list): + return subjects + + # If it's missing or an unexpected format, return a list with a blank dict + else: + return [{"submitter_id": ""}] + + +def update_external_refs(subject_path, external_ref_path, output_path=None): + """ + This function reads a list of subjects and external references, + then updates each external reference's subjects[0]['submitter_id'] + with the matching subject's submitter_id (by index). + """ + + # Load the subject file (expects a list of subject objects) + with open(subject_path) as f: + subjects = json.load(f) + + # Extract just the top-level 'submitter_id' from each subject + subject_ids = [s["submitter_id"] for s in subjects] + + # Load the external_reference file (expects a list of reference objects) + with open(external_ref_path) as f: + external_refs = json.load(f) + + # Loop over each external reference + for i, ref in enumerate(external_refs): + # If there are more external references than subjects, warn and skip + if i >= len(subject_ids): + print(f"Warning: Not enough subject IDs for external reference {i}") + continue + + # Get the corresponding subject submitter_id + subject_id = subject_ids[i] + + # Make sure the 'subjects' field is a list with one dict + ref["subjects"] = ensure_subjects_list(ref) + + # Update the first subject's submitter_id with the new one + ref["subjects"][0]["submitter_id"] = subject_id + + # Use the provided output path, or overwrite the original file + output_path = output_path or external_ref_path + + # Save the updated external references back to a JSON file + with open(output_path, "w") as f: + json.dump(external_refs, f, indent=4) + + print(f"Updated file written to {output_path}") + + +# This block only runs if the script is called directly, not if imported +if __name__ == "__main__": + # Expect at least 2 arguments: subject.json and external_reference.json + if len(sys.argv) < 3: + print( + "Usage: python update_external_references.py [output.json]" + ) + sys.exit(1) # Exit with error if not enough arguments are provided + + # Read the input file paths from the command-line arguments + subject_file = Path(sys.argv[1]) + external_ref_file = Path(sys.argv[2]) + + # Optional third argument: a custom output file path + output_file = Path(sys.argv[3]) if len(sys.argv) > 3 else None + + # Call the main function + update_external_refs(subject_file, external_ref_file, output_file) diff --git a/pcdc_data/generate_data.sh b/pcdc_data/generate_data.sh index 2d21aeba7..85c2f1494 100755 --- a/pcdc_data/generate_data.sh +++ b/pcdc_data/generate_data.sh @@ -20,4 +20,18 @@ echo "data-simulator branch changed to pyyaml-patch change when PR is completed" cd ./gen3_etl/graph mkdir ./fake_data ./generate.sh +# Copy a version of subject.json that we will use in the script +cp ./pcdc_data/gen3_etl/graph/fake_data/data-simulator/subject.json ./pcdc_data/external/ + +# run our update script +SUBJECT_JSON="./pcdc_data/external/subject.json" +EXTERNAL_JSON="./pcdc_data/external/external_reference.json" +UPDATE_SCRIPT="./pcdc_data/external/update_external_references.py" + +# Grab a version of the external_reference, as well as the subject, and connect them together via submitter_id +python3 "$UPDATE_SCRIPT" "$SUBJECT_JSON" "$EXTERNAL_JSON" + +# copy our external_refernce.json to where it belongs +cp ./pcdc_data/external/external_reference.json ./pcdc_data/gen3_etl/graph/fake_data/data-simulator/ + cd ../../ \ No newline at end of file diff --git a/pcdc_data/run_all.sh b/pcdc_data/run_all.sh index c9ebeb233..37fe47039 100755 --- a/pcdc_data/run_all.sh +++ b/pcdc_data/run_all.sh @@ -28,9 +28,9 @@ fi chmod +x "$(dirname "$0")"/*.sh ./load_gen3_etl.sh - +# Update the build json file ./generate_data.sh - +# Update the external ref file ./load_graph_db.sh ./load_elasticsearch.sh \ No newline at end of file From 6d8ccc974a2ffcc78bd9b05472de23f09b2824af Mon Sep 17 00:00:00 2001 From: pkellyc Date: Mon, 30 Jun 2025 11:19:49 -0500 Subject: [PATCH 043/126] adding external_reference.json with small dataset for testing --- pcdc_data/external/external_reference.json | 68 ++++++++++++++++++++++ 1 file changed, 68 insertions(+) create mode 100644 pcdc_data/external/external_reference.json diff --git a/pcdc_data/external/external_reference.json b/pcdc_data/external/external_reference.json new file mode 100644 index 000000000..fd63b3029 --- /dev/null +++ b/pcdc_data/external/external_reference.json @@ -0,0 +1,68 @@ +[ + [ + { + "external_links": "TARGET - GDC|https://pcdc-external-resource-files.s3.amazonaws.com/NHI_GDC_DataPortal-logo.23e6ca47.svg|https://portal.gdc.cancer.gov/cases/4e824cfb-d887-57b3-bff2-95e2e7b4d410", + "external_resource_icon_path": "https://pcdc-external-resource-files.s3.amazonaws.com/NHI_GDC_DataPortal-logo.23e6ca47.svg", + "external_resource_id": "1", + "external_resource_name": "TARGET - GDC", + "external_subject_id": "4e824cfb-d887-57b3-bff2-95e2e7b4d410", + "external_subject_submitter_id": "TARGET-30-PADLJN", + "external_subject_url": "https://portal.gdc.cancer.gov/cases/4e824cfb-d887-57b3-bff2-95e2e7b4d410", + "subjects": [ + { + "submitter_id": "COG_PADLJN" + } + ], + "submitter_id": "external_reference_gdc_COG_PADLJN", + "type": "external_reference" + }, + { + "external_links": "TARGET - GDC|https://pcdc-external-resource-files.s3.amazonaws.com/NHI_GDC_DataPortal-logo.23e6ca47.svg|https://portal.gdc.cancer.gov/cases/448a7c70-73e8-5b2f-b226-83e4065dc6ef", + "external_resource_icon_path": "https://pcdc-external-resource-files.s3.amazonaws.com/NHI_GDC_DataPortal-logo.23e6ca47.svg", + "external_resource_id": "1", + "external_resource_name": "TARGET - GDC", + "external_subject_id": "448a7c70-73e8-5b2f-b226-83e4065dc6ef", + "external_subject_submitter_id": "TARGET-30-PAKPAL", + "external_subject_url": "https://portal.gdc.cancer.gov/cases/448a7c70-73e8-5b2f-b226-83e4065dc6ef", + "subjects": [ + { + "submitter_id": "COG_PAKPAL" + } + ], + "submitter_id": "external_reference_gdc_COG_PAKPAL", + "type": "external_reference" + }, + { + "external_links": "GMKF|https://pcdc-external-resource-files.s3.us-east-1.amazonaws.com/Kids_First_Graphic_Horizontal_OL_FINAL.DRC-01-scaled.png|https://portal.kidsfirstdrc.org/participant/PT_72AZK0JR", + "external_resource_icon_path": "https://pcdc-external-resource-files.s3.us-east-1.amazonaws.com/Kids_First_Graphic_Horizontal_OL_FINAL.DRC-01-scaled.png", + "external_resource_id": "2", + "external_resource_name": "GMKF", + "external_subject_id": null, + "external_subject_submitter_id": "PT_72AZK0JR", + "external_subject_url": "https://portal.kidsfirstdrc.org/participant/PT_72AZK0JR", + "subjects": [ + { + "submitter_id": "COG_PAKVUY" + } + ], + "submitter_id": "external_reference_gmkf_COG_PAKVUY", + "type": "external_reference" + }, + { + "external_links": "GMKF|https://pcdc-external-resource-files.s3.us-east-1.amazonaws.com/Kids_First_Graphic_Horizontal_OL_FINAL.DRC-01-scaled.png|https://portal.kidsfirstdrc.org/participant/PT_N5N59J9M", + "external_resource_icon_path": "https://pcdc-external-resource-files.s3.us-east-1.amazonaws.com/Kids_First_Graphic_Horizontal_OL_FINAL.DRC-01-scaled.png", + "external_resource_id": "2", + "external_resource_name": "GMKF", + "external_subject_id": null, + "external_subject_submitter_id": "PT_N5N59J9M", + "external_subject_url": "https://portal.kidsfirstdrc.org/participant/PT_N5N59J9M", + "subjects": [ + { + "submitter_id": "COG_PAKXDZ" + } + ], + "submitter_id": "external_reference_gmkf_COG_PAKXDZ", + "type": "external_reference" + } + ] +] \ No newline at end of file From 034fb72082042dcfdbace1af12cc3f83a4c13198 Mon Sep 17 00:00:00 2001 From: pkellyc Date: Mon, 30 Jun 2025 11:23:37 -0500 Subject: [PATCH 044/126] Remove unnecessary cp of subject.json and use the file that is generated --- pcdc_data/generate_data.sh | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/pcdc_data/generate_data.sh b/pcdc_data/generate_data.sh index 85c2f1494..7caec4cdc 100755 --- a/pcdc_data/generate_data.sh +++ b/pcdc_data/generate_data.sh @@ -20,11 +20,9 @@ echo "data-simulator branch changed to pyyaml-patch change when PR is completed" cd ./gen3_etl/graph mkdir ./fake_data ./generate.sh -# Copy a version of subject.json that we will use in the script -cp ./pcdc_data/gen3_etl/graph/fake_data/data-simulator/subject.json ./pcdc_data/external/ # run our update script -SUBJECT_JSON="./pcdc_data/external/subject.json" +SUBJECT_JSON="./pcdc_data/gen3_etl/graph/fake_data/data-simulator/subject.json" EXTERNAL_JSON="./pcdc_data/external/external_reference.json" UPDATE_SCRIPT="./pcdc_data/external/update_external_references.py" From 678f89cbcadb139bce593e2bf18c33d3e73986a8 Mon Sep 17 00:00:00 2001 From: pkellyc Date: Mon, 7 Jul 2025 13:57:11 -0500 Subject: [PATCH 045/126] correct rel paths that were invalid --- pcdc_data/generate_data.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pcdc_data/generate_data.sh b/pcdc_data/generate_data.sh index 7caec4cdc..83ce4c970 100755 --- a/pcdc_data/generate_data.sh +++ b/pcdc_data/generate_data.sh @@ -22,14 +22,14 @@ mkdir ./fake_data ./generate.sh # run our update script -SUBJECT_JSON="./pcdc_data/gen3_etl/graph/fake_data/data-simulator/subject.json" -EXTERNAL_JSON="./pcdc_data/external/external_reference.json" -UPDATE_SCRIPT="./pcdc_data/external/update_external_references.py" +SUBJECT_JSON="./fake_data/data-simulator/subject.json" +EXTERNAL_JSON="../../external/external_reference.json" +UPDATE_SCRIPT="../../external/update_external_references.py" # Grab a version of the external_reference, as well as the subject, and connect them together via submitter_id python3 "$UPDATE_SCRIPT" "$SUBJECT_JSON" "$EXTERNAL_JSON" # copy our external_refernce.json to where it belongs -cp ./pcdc_data/external/external_reference.json ./pcdc_data/gen3_etl/graph/fake_data/data-simulator/ +cp ../../external/external_reference.json ./fake_data/data-simulator/ cd ../../ \ No newline at end of file From 4277862767c5ea5408baed0771eaa7c8a3baba38 Mon Sep 17 00:00:00 2001 From: pkellyc Date: Mon, 7 Jul 2025 14:02:39 -0500 Subject: [PATCH 046/126] flatten the json --- pcdc_data/external/update_external_references.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pcdc_data/external/update_external_references.py b/pcdc_data/external/update_external_references.py index b34c27b84..61bbcd7d7 100644 --- a/pcdc_data/external/update_external_references.py +++ b/pcdc_data/external/update_external_references.py @@ -40,6 +40,10 @@ def update_external_refs(subject_path, external_ref_path, output_path=None): with open(external_ref_path) as f: external_refs = json.load(f) + # Flatten outer list if needed + if isinstance(external_refs, list) and isinstance(external_refs[0], list): + external_refs = [item for sublist in external_refs for item in sublist] + # Loop over each external reference for i, ref in enumerate(external_refs): # If there are more external references than subjects, warn and skip From b780831d962881b098df3fc79b44871e1bc4e9ef Mon Sep 17 00:00:00 2001 From: pkellyc Date: Mon, 7 Jul 2025 14:03:39 -0500 Subject: [PATCH 047/126] one last update for external_reference, before adding it to the .gitignore, because the file will change every build. --- pcdc_data/external/external_reference.json | 130 ++++++++++----------- 1 file changed, 64 insertions(+), 66 deletions(-) diff --git a/pcdc_data/external/external_reference.json b/pcdc_data/external/external_reference.json index fd63b3029..e02cf2ca5 100644 --- a/pcdc_data/external/external_reference.json +++ b/pcdc_data/external/external_reference.json @@ -1,68 +1,66 @@ [ - [ - { - "external_links": "TARGET - GDC|https://pcdc-external-resource-files.s3.amazonaws.com/NHI_GDC_DataPortal-logo.23e6ca47.svg|https://portal.gdc.cancer.gov/cases/4e824cfb-d887-57b3-bff2-95e2e7b4d410", - "external_resource_icon_path": "https://pcdc-external-resource-files.s3.amazonaws.com/NHI_GDC_DataPortal-logo.23e6ca47.svg", - "external_resource_id": "1", - "external_resource_name": "TARGET - GDC", - "external_subject_id": "4e824cfb-d887-57b3-bff2-95e2e7b4d410", - "external_subject_submitter_id": "TARGET-30-PADLJN", - "external_subject_url": "https://portal.gdc.cancer.gov/cases/4e824cfb-d887-57b3-bff2-95e2e7b4d410", - "subjects": [ - { - "submitter_id": "COG_PADLJN" - } - ], - "submitter_id": "external_reference_gdc_COG_PADLJN", - "type": "external_reference" - }, - { - "external_links": "TARGET - GDC|https://pcdc-external-resource-files.s3.amazonaws.com/NHI_GDC_DataPortal-logo.23e6ca47.svg|https://portal.gdc.cancer.gov/cases/448a7c70-73e8-5b2f-b226-83e4065dc6ef", - "external_resource_icon_path": "https://pcdc-external-resource-files.s3.amazonaws.com/NHI_GDC_DataPortal-logo.23e6ca47.svg", - "external_resource_id": "1", - "external_resource_name": "TARGET - GDC", - "external_subject_id": "448a7c70-73e8-5b2f-b226-83e4065dc6ef", - "external_subject_submitter_id": "TARGET-30-PAKPAL", - "external_subject_url": "https://portal.gdc.cancer.gov/cases/448a7c70-73e8-5b2f-b226-83e4065dc6ef", - "subjects": [ - { - "submitter_id": "COG_PAKPAL" - } - ], - "submitter_id": "external_reference_gdc_COG_PAKPAL", - "type": "external_reference" - }, - { - "external_links": "GMKF|https://pcdc-external-resource-files.s3.us-east-1.amazonaws.com/Kids_First_Graphic_Horizontal_OL_FINAL.DRC-01-scaled.png|https://portal.kidsfirstdrc.org/participant/PT_72AZK0JR", - "external_resource_icon_path": "https://pcdc-external-resource-files.s3.us-east-1.amazonaws.com/Kids_First_Graphic_Horizontal_OL_FINAL.DRC-01-scaled.png", - "external_resource_id": "2", - "external_resource_name": "GMKF", - "external_subject_id": null, - "external_subject_submitter_id": "PT_72AZK0JR", - "external_subject_url": "https://portal.kidsfirstdrc.org/participant/PT_72AZK0JR", - "subjects": [ - { - "submitter_id": "COG_PAKVUY" - } - ], - "submitter_id": "external_reference_gmkf_COG_PAKVUY", - "type": "external_reference" - }, - { - "external_links": "GMKF|https://pcdc-external-resource-files.s3.us-east-1.amazonaws.com/Kids_First_Graphic_Horizontal_OL_FINAL.DRC-01-scaled.png|https://portal.kidsfirstdrc.org/participant/PT_N5N59J9M", - "external_resource_icon_path": "https://pcdc-external-resource-files.s3.us-east-1.amazonaws.com/Kids_First_Graphic_Horizontal_OL_FINAL.DRC-01-scaled.png", - "external_resource_id": "2", - "external_resource_name": "GMKF", - "external_subject_id": null, - "external_subject_submitter_id": "PT_N5N59J9M", - "external_subject_url": "https://portal.kidsfirstdrc.org/participant/PT_N5N59J9M", - "subjects": [ - { - "submitter_id": "COG_PAKXDZ" - } - ], - "submitter_id": "external_reference_gmkf_COG_PAKXDZ", - "type": "external_reference" - } - ] + { + "external_links": "TARGET - GDC|https://pcdc-external-resource-files.s3.amazonaws.com/NHI_GDC_DataPortal-logo.23e6ca47.svg|https://portal.gdc.cancer.gov/cases/4e824cfb-d887-57b3-bff2-95e2e7b4d410", + "external_resource_icon_path": "https://pcdc-external-resource-files.s3.amazonaws.com/NHI_GDC_DataPortal-logo.23e6ca47.svg", + "external_resource_id": "1", + "external_resource_name": "TARGET - GDC", + "external_subject_id": "4e824cfb-d887-57b3-bff2-95e2e7b4d410", + "external_subject_submitter_id": "TARGET-30-PADLJN", + "external_subject_url": "https://portal.gdc.cancer.gov/cases/4e824cfb-d887-57b3-bff2-95e2e7b4d410", + "subjects": [ + { + "submitter_id": "subject_republication_thirdly" + } + ], + "submitter_id": "external_reference_gdc_COG_PADLJN", + "type": "external_reference" + }, + { + "external_links": "TARGET - GDC|https://pcdc-external-resource-files.s3.amazonaws.com/NHI_GDC_DataPortal-logo.23e6ca47.svg|https://portal.gdc.cancer.gov/cases/448a7c70-73e8-5b2f-b226-83e4065dc6ef", + "external_resource_icon_path": "https://pcdc-external-resource-files.s3.amazonaws.com/NHI_GDC_DataPortal-logo.23e6ca47.svg", + "external_resource_id": "1", + "external_resource_name": "TARGET - GDC", + "external_subject_id": "448a7c70-73e8-5b2f-b226-83e4065dc6ef", + "external_subject_submitter_id": "TARGET-30-PAKPAL", + "external_subject_url": "https://portal.gdc.cancer.gov/cases/448a7c70-73e8-5b2f-b226-83e4065dc6ef", + "subjects": [ + { + "submitter_id": "subject_selenomancy_dacryops" + } + ], + "submitter_id": "external_reference_gdc_COG_PAKPAL", + "type": "external_reference" + }, + { + "external_links": "GMKF|https://pcdc-external-resource-files.s3.us-east-1.amazonaws.com/Kids_First_Graphic_Horizontal_OL_FINAL.DRC-01-scaled.png|https://portal.kidsfirstdrc.org/participant/PT_72AZK0JR", + "external_resource_icon_path": "https://pcdc-external-resource-files.s3.us-east-1.amazonaws.com/Kids_First_Graphic_Horizontal_OL_FINAL.DRC-01-scaled.png", + "external_resource_id": "2", + "external_resource_name": "GMKF", + "external_subject_id": null, + "external_subject_submitter_id": "PT_72AZK0JR", + "external_subject_url": "https://portal.kidsfirstdrc.org/participant/PT_72AZK0JR", + "subjects": [ + { + "submitter_id": "subject_nakedly_pleasure" + } + ], + "submitter_id": "external_reference_gmkf_COG_PAKVUY", + "type": "external_reference" + }, + { + "external_links": "GMKF|https://pcdc-external-resource-files.s3.us-east-1.amazonaws.com/Kids_First_Graphic_Horizontal_OL_FINAL.DRC-01-scaled.png|https://portal.kidsfirstdrc.org/participant/PT_N5N59J9M", + "external_resource_icon_path": "https://pcdc-external-resource-files.s3.us-east-1.amazonaws.com/Kids_First_Graphic_Horizontal_OL_FINAL.DRC-01-scaled.png", + "external_resource_id": "2", + "external_resource_name": "GMKF", + "external_subject_id": null, + "external_subject_submitter_id": "PT_N5N59J9M", + "external_subject_url": "https://portal.kidsfirstdrc.org/participant/PT_N5N59J9M", + "subjects": [ + { + "submitter_id": "subject_plunderable_firewood" + } + ], + "submitter_id": "external_reference_gmkf_COG_PAKXDZ", + "type": "external_reference" + } ] \ No newline at end of file From a50120fbc4518cde447b2513b9d5f341bf7f1b0c Mon Sep 17 00:00:00 2001 From: pkellyc Date: Mon, 7 Jul 2025 14:50:43 -0500 Subject: [PATCH 048/126] cleaned up .gitignore and added some descriptions --- .gitignore | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/.gitignore b/.gitignore index af3cc1070..4b362378a 100644 --- a/.gitignore +++ b/.gitignore @@ -1,14 +1,18 @@ -postgres.txt +# --- Folders --- **/charts/ notes/ -Chart.lock -.DS_Store -_sample-*/ -secret-values.yaml -.env -credentials.json CA/ -temp.yaml -/values.yaml gen3_scripts/ gen3_etl/ +_sample-*/ + +# --- Files --- +Chart.lock +.DS_Store # macOS system file (usually ignored) +secret-values.yaml # Helm secrets/values file +.env # Environment variables +credentials.json # Instance generated service account or credentials +temp.yaml +/values.yaml # Main Helm values file +postgres.txt +pcdc_data/external/external_reference.json # External reference data file for PCDC From 0bbeccb6a81cc3fb8a76db33aed69f44f6e3cf28 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Thu, 10 Jul 2025 16:46:53 -0700 Subject: [PATCH 049/126] remove helm hooks --- .../templates/amanuensis-secret.yaml | 3 - .../templates/amanuensis-secrets.yaml | 58 ++++++------ .../amanuensis-validate-filter-sets-job.yaml | 92 +++++++++++++++++++ helm/common/templates/_db_setup_job.tpl | 3 - helm/gearbox/templates/deployment.yaml | 4 +- helm/pcdcanalysistools/Chart.yaml | 7 +- .../templates/deployment.yaml | 4 + 7 files changed, 132 insertions(+), 39 deletions(-) create mode 100644 helm/amanuensis/templates/amanuensis-validate-filter-sets-job.yaml diff --git a/helm/amanuensis/templates/amanuensis-secret.yaml b/helm/amanuensis/templates/amanuensis-secret.yaml index a4b0cd830..c7ae2e52b 100644 --- a/helm/amanuensis/templates/amanuensis-secret.yaml +++ b/helm/amanuensis/templates/amanuensis-secret.yaml @@ -2,9 +2,6 @@ apiVersion: v1 kind: Secret metadata: name: amanuensis-secret - annotations: - "helm.sh/hook": pre-install,pre-upgrade - "helm.sh/hook-weight": "-1" type: Opaque data: {{ (.Files.Glob "amanuensis-secret/*").AsSecrets | indent 2 }} diff --git a/helm/amanuensis/templates/amanuensis-secrets.yaml b/helm/amanuensis/templates/amanuensis-secrets.yaml index fe879774b..dd1eb4719 100644 --- a/helm/amanuensis/templates/amanuensis-secrets.yaml +++ b/helm/amanuensis/templates/amanuensis-secrets.yaml @@ -2,17 +2,11 @@ apiVersion: v1 kind: ServiceAccount metadata: name: amanuensis-jobs - annotations: - "helm.sh/hook": pre-install,pre-upgrade - "helm.sh/hook-weight": "-2" --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: amanuensis-jobs-role - annotations: - "helm.sh/hook": pre-install,pre-upgrade - "helm.sh/hook-weight": "-2" rules: - apiGroups: [""] resources: ["secrets"] @@ -22,9 +16,6 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: amanuensis-jobs-role-binding - annotations: - "helm.sh/hook": pre-install,pre-upgrade - "helm.sh/hook-weight": "-2" roleRef: apiGroup: rbac.authorization.k8s.io kind: Role @@ -38,33 +29,41 @@ apiVersion: v1 kind: Secret metadata: name: amanuensis-config - annotations: - "helm.sh/hook": pre-install,pre-upgrade - "helm.sh/hook-weight": "-1" - "helm.sh/hook-delete-policy": before-hook-creation data: - amanuensis-config.yaml: "" + {{- $existingSecret := (lookup "v1" "Secret" .Release.Namespace (printf "amanuensis-config")) }} + {{- if and $existingSecret $existingSecret.data (hasKey $existingSecret.data "amanuensis-config.yaml") }} + amanuensis-config.yaml: {{ index $existingSecret.data "amanuensis-config.yaml" | quote }} + {{- else }} + {} + {{- end }} --- apiVersion: v1 kind: Secret metadata: name: amanuensis-creds - annotations: - "helm.sh/hook": pre-install,pre-upgrade - "helm.sh/hook-weight": "-1" data: - creds.json: "" + {{- $existingSecret := (lookup "v1" "Secret" .Release.Namespace (printf "amanuensis-creds")) }} + {{- if and $existingSecret $existingSecret.data (hasKey $existingSecret.data "creds.json") }} + creds.json: {{ index $existingSecret.data "creds.json" | quote }} + {{- else }} + {} + {{- end }} --- +{{- define "create-amanuensis-config-job" -}} +{{- $existingSecretConfig := lookup "v1" "Secret" .Release.Namespace (printf "amanuensis-config") }} +{{- $existingSecretCreds := lookup "v1" "Secret" .Release.Namespace (printf "amanuensis-creds") }} +{{- $shouldRunJob := true }} +{{- if and + (and $existingSecretConfig $existingSecretConfig.data (hasKey $existingSecretConfig.data "amanuensis-config.yaml")) + (and $existingSecretCreds $existingSecretCreds.data (hasKey $existingSecretCreds.data "creds.json")) +}} + {{- $shouldRunJob = false }} +{{- end }} +{{- if $shouldRunJob }} apiVersion: batch/v1 kind: Job metadata: name: amanuensis-secrets-{{ .Release.Revision }} - annotations: - "helm.sh/hook": pre-install,pre-upgrade - "helm.sh/hook-weight": "0" - "helm.sh/hook-delete-policy": before-hook-creation - labels: - redeploy-hash: "{{ .Release.Revision }}" spec: backoffLimit: 0 template: @@ -146,8 +145,8 @@ spec: } EOF - kubectl patch secret amanuensis-creds --type='json' -p='[{"op": "replace", "path": "/data/creds.json", "value": "'$(cat /mnt/shared/creds.json | base64 -w 0)'"}]' - + kubectl patch secret amanuensis-creds --type='json' -p='[{"op": "add", "path": "/data", "value": {"creds.json": "'$(cat /mnt/shared/creds.json | base64 -w 0)'"}}]' + - name: create-amanuensis-config image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" imagePullPolicy: "{{ .Values.image.pullPolicy }}" @@ -193,7 +192,12 @@ spec: if [[ -f /mnt/shared/new-amanuensis-config.yaml ]]; then # load yaml file into secrets echo "saving amanuensis configuration into amanuensis-config secret..." - kubectl patch secret amanuensis-config --type='json' -p='[{"op": "replace", "path": "/data/amanuensis-config.yaml", "value": "'$(cat /mnt/shared/new-amanuensis-config.yaml | base64 -w 0)'"}]' + kubectl patch secret amanuensis-config --type='json' -p='[{"op": "add", "path": "/data", "value": {"amanuensis-config.yaml": "'$(cat /mnt/shared/new-amanuensis-config.yaml | base64 -w 0)'"}}]' fi restartPolicy: Never +{{- end }} +{{- end }} +--- +# Include the job template here +{{- include "create-amanuensis-config-job" . }} \ No newline at end of file diff --git a/helm/amanuensis/templates/amanuensis-validate-filter-sets-job.yaml b/helm/amanuensis/templates/amanuensis-validate-filter-sets-job.yaml new file mode 100644 index 000000000..219de9f32 --- /dev/null +++ b/helm/amanuensis/templates/amanuensis-validate-filter-sets-job.yaml @@ -0,0 +1,92 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: amanuensis-validate-filter-sets + labels: + redeploy-hash: "{{ .Release.Revision }}" +spec: + # Job spec starts here directly (no schedule or jobTemplate needed) + template: + metadata: + labels: + app: gen3job + spec: + automountServiceAccountToken: false + volumes: + - name: config-volume + secret: + secretName: "amanuensis-config" + - name: es-dd-config-volume + emptyDir: {} + - name: portal-config + secret: + secretName: "portal-config" + - name: invalid-filter-set-volume + emptyDir: {} + initContainers: + - name: amanuensis-db-filter-set-filler + image: "quay.io/pcdc/amanuensis-db-filter-set-filler:0.1.0" + imagePullPolicy: IfNotPresent + env: + - name: invalid_filters_list_path + value: "/var/www/amanuensis/invalid-filters/invalid-filters.json" + volumeMounts: + - name: "config-volume" + readOnly: true + mountPath: "/var/www/amanuensis/amanuensis-config.yaml" + subPath: amanuensis-config.yaml + - name: "invalid-filter-set-volume" + mountPath: "/var/www/amanuensis/invalid-filters/" + - name: create-es-dd-config + image: "quay.io/cdis/awshelper:stable" + imagePullPolicy: IfNotPresent + env: + - name: BASE_URL + value: "https://portal.pedscommons.org/" + - name: OUTPUT_FILE + value: "/tmp/es-dd-config/es_to_dd_map.json" + - name: DICTIONARY_URL + value: "https://portal.pedscommons.org/api/v0/submission/_dictionary/_all" + volumeMounts: + - name: "es-dd-config-volume" + mountPath: "/tmp/es-dd-config" + args: + - /bin/bash + - -c + - | + cd /tmp + export PATH="/home/ubuntu/.local/bin:$PATH" + git clone https://github.com/chicagopcdc/gen3_etl.git + echo "Repository cloned successfully." + cd gen3_etl/elasticsearch + pip install -r requirements-ES-DD.txt + cd etl + python create_es_dd_mapping.py add-manual-fields + echo "ES-DD config created successfully." + containers: + - name: validate-filter-sets + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + volumeMounts: + - name: "es-dd-config-volume" + mountPath: "/var/www/amanuensis/es_to_dd_map.json" + subPath: es_to_dd_map.json + - name: "config-volume" + readOnly: true + mountPath: "/var/www/amanuensis/amanuensis-config.yaml" + subPath: amanuensis-config.yaml + - name: "portal-config" + readOnly: true + mountPath: "/var/www/amanuensis/gitops.json" + subPath: gitops.json + - name: "invalid-filter-set-volume" + mountPath: "/var/www/amanuensis/invalid-filters.json" + subPath: invalid-filters.json + args: + - /bin/bash + - -c + - | + validate-filter-sets + restartPolicy: Never + # Optional: add a backoff limit to control retries + backoffLimit: 3 \ No newline at end of file diff --git a/helm/common/templates/_db_setup_job.tpl b/helm/common/templates/_db_setup_job.tpl index 51d27963b..2b57d1816 100644 --- a/helm/common/templates/_db_setup_job.tpl +++ b/helm/common/templates/_db_setup_job.tpl @@ -175,9 +175,6 @@ apiVersion: v1 kind: Secret metadata: name: {{ $.Chart.Name }}-dbcreds - annotations: - "helm.sh/hook": pre-install,pre-upgrade - "helm.sh/hook-weight": "-5" data: {{- $existingSecret := (lookup "v1" "Secret" .Release.Namespace (printf "%s-dbcreds" .Chart.Name)) }} {{- if $existingSecret }} diff --git a/helm/gearbox/templates/deployment.yaml b/helm/gearbox/templates/deployment.yaml index b342a9c66..0f069da84 100644 --- a/helm/gearbox/templates/deployment.yaml +++ b/helm/gearbox/templates/deployment.yaml @@ -79,8 +79,8 @@ spec: args: - "-c" - | - /env/bin/alembic upgrade head - + poetry run alembic upgrade head + {{- with .Values.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} diff --git a/helm/pcdcanalysistools/Chart.yaml b/helm/pcdcanalysistools/Chart.yaml index 0eb76c931..4d464aecb 100644 --- a/helm/pcdcanalysistools/Chart.yaml +++ b/helm/pcdcanalysistools/Chart.yaml @@ -23,8 +23,7 @@ version: 0.1.0 # It is recommended to use it with quotes. appVersion: "1.16.0" - dependencies: -- name: common - version: 0.1.11 - repository: file://../common \ No newline at end of file + - name: common + version: 0.1.16 + repository: file://../common diff --git a/helm/pcdcanalysistools/templates/deployment.yaml b/helm/pcdcanalysistools/templates/deployment.yaml index e18375a83..6518d7807 100644 --- a/helm/pcdcanalysistools/templates/deployment.yaml +++ b/helm/pcdcanalysistools/templates/deployment.yaml @@ -141,6 +141,10 @@ spec: value: /etc/ssl/certs/ca-certificates.crt - name: GEN3_DEBUG value: "False" + - name: FLASK_ENV + value: development + - name: FLASK_APP + value: PcdcAnalysisTools.wsgi volumeMounts: - name: "config-volume" readOnly: true From ed60ab0fdbc186393c0855b46c19ac7fe8715d53 Mon Sep 17 00:00:00 2001 From: pkellyc Date: Thu, 10 Jul 2025 20:42:29 -0500 Subject: [PATCH 050/126] The ETL job now includes external_references in the subject mapping when building ES docs. --- helm/etl/values.yaml | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/helm/etl/values.yaml b/helm/etl/values.yaml index 032209d05..6372fb678 100644 --- a/helm/etl/values.yaml +++ b/helm/etl/values.yaml @@ -16,14 +16,12 @@ image: # -- (string) Overrides the image tag whose default is the chart appVersion. tag: "2024.11" - # -- (list) Docker image pull secrets. imagePullSecrets: [] # -- (map) Annotations to add to the pod podAnnotations: {} - # -- (map) Resource requests and limits for the containers in the pod resources: tube: @@ -41,7 +39,6 @@ resources: # -- (string) The amount of memory requested memory: 128Mi - esEndpoint: gen3-elasticsearch-master etlMapping: @@ -134,6 +131,14 @@ etlMapping: target_nodes: - name: slide_image path: slides.samples.cases + - name: subject + doc_type: subject + type: aggregator + root: subject + props: + - name: submitter_id + - name: project_id + - name: external_references # -- (map) Configuration options for es garbage cronjob. esGarbageCollect: From 83af06ae1ee57078f281b4d3f2b86f945f9fa5f9 Mon Sep 17 00:00:00 2001 From: pkellyc Date: Thu, 10 Jul 2025 20:45:55 -0500 Subject: [PATCH 051/126] aggregate external_references.external_resource_name, and prepare histogram data for it,but do not show a chart. --- helm/portal/defaults/gitops.json | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/helm/portal/defaults/gitops.json b/helm/portal/defaults/gitops.json index 73fa42f9c..cc333cd9f 100644 --- a/helm/portal/defaults/gitops.json +++ b/helm/portal/defaults/gitops.json @@ -127,13 +127,31 @@ "consortium": { "chartType": "bar", "title": "Consortium" + }, + "external_references.external_resource_name": { + "chartType": "bar", + "title": "External Resource Name", + "show": false } }, "filters": { "anchor": { "field": "disease_phase", - "options": ["Initial Diagnosis", "Relapse"], - "tabs": ["Disease", "Molecular", "Surgery", "Radiation", "Response", "SMN", "Imaging", "Labs", "SCT"] + "options": [ + "Initial Diagnosis", + "Relapse" + ], + "tabs": [ + "Disease", + "Molecular", + "Surgery", + "Radiation", + "Response", + "SMN", + "Imaging", + "Labs", + "SCT" + ] }, "tabs": [ { @@ -430,7 +448,7 @@ { "field": "tumor_assessments.tumor_laterality", "name": "Tumor Laterality" - }, + }, { "field": "stagings.irs_group", "name": "IRS Group" From e46405505b9ed955740119f8113933c34073f5cc Mon Sep 17 00:00:00 2001 From: pkellyc Date: Thu, 10 Jul 2025 20:50:11 -0500 Subject: [PATCH 052/126] Correct the external ref file --- pcdc_data/external/external_reference.json | 28 +++++++++++----------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/pcdc_data/external/external_reference.json b/pcdc_data/external/external_reference.json index e02cf2ca5..e3a0ac116 100644 --- a/pcdc_data/external/external_reference.json +++ b/pcdc_data/external/external_reference.json @@ -5,14 +5,14 @@ "external_resource_id": "1", "external_resource_name": "TARGET - GDC", "external_subject_id": "4e824cfb-d887-57b3-bff2-95e2e7b4d410", - "external_subject_submitter_id": "TARGET-30-PADLJN", + "external_subject_submitter_id": "subject_regraduate_Tremandra", "external_subject_url": "https://portal.gdc.cancer.gov/cases/4e824cfb-d887-57b3-bff2-95e2e7b4d410", "subjects": [ { - "submitter_id": "subject_republication_thirdly" + "submitter_id": "subject_myogram_Yakala" } ], - "submitter_id": "external_reference_gdc_COG_PADLJN", + "submitter_id": "external_reference_isomaltose_unmingling", "type": "external_reference" }, { @@ -21,14 +21,14 @@ "external_resource_id": "1", "external_resource_name": "TARGET - GDC", "external_subject_id": "448a7c70-73e8-5b2f-b226-83e4065dc6ef", - "external_subject_submitter_id": "TARGET-30-PAKPAL", + "external_subject_submitter_id": "subject_amenable_thermotropism", "external_subject_url": "https://portal.gdc.cancer.gov/cases/448a7c70-73e8-5b2f-b226-83e4065dc6ef", "subjects": [ { - "submitter_id": "subject_selenomancy_dacryops" + "submitter_id": "subject_euglobulin_unlacquered" } ], - "submitter_id": "external_reference_gdc_COG_PAKPAL", + "submitter_id": "external_reference_homemaking_antivibrator", "type": "external_reference" }, { @@ -36,15 +36,15 @@ "external_resource_icon_path": "https://pcdc-external-resource-files.s3.us-east-1.amazonaws.com/Kids_First_Graphic_Horizontal_OL_FINAL.DRC-01-scaled.png", "external_resource_id": "2", "external_resource_name": "GMKF", - "external_subject_id": null, - "external_subject_submitter_id": "PT_72AZK0JR", + "external_subject_id": "5554823-73e8-5b2f-b226-83e4065dc6ef", + "external_subject_submitter_id": "subject_Archidiskodon_somnambulance", "external_subject_url": "https://portal.kidsfirstdrc.org/participant/PT_72AZK0JR", "subjects": [ { - "submitter_id": "subject_nakedly_pleasure" + "submitter_id": "subject_pubiotomy_nonaction" } ], - "submitter_id": "external_reference_gmkf_COG_PAKVUY", + "submitter_id": "external_reference_irreverendly_subtrifid", "type": "external_reference" }, { @@ -52,15 +52,15 @@ "external_resource_icon_path": "https://pcdc-external-resource-files.s3.us-east-1.amazonaws.com/Kids_First_Graphic_Horizontal_OL_FINAL.DRC-01-scaled.png", "external_resource_id": "2", "external_resource_name": "GMKF", - "external_subject_id": null, - "external_subject_submitter_id": "PT_N5N59J9M", + "external_subject_id": "5554823-73e8-5b2f-777b-83e4065dc6ef", + "external_subject_submitter_id": "subject_coincidence_strom", "external_subject_url": "https://portal.kidsfirstdrc.org/participant/PT_N5N59J9M", "subjects": [ { - "submitter_id": "subject_plunderable_firewood" + "submitter_id": "subject_forerehearsed_subelongate" } ], - "submitter_id": "external_reference_gmkf_COG_PAKXDZ", + "submitter_id": "external_reference_communicative_syntactics", "type": "external_reference" } ] \ No newline at end of file From 7fcea388c811ec355a7419cbfdc8ac553cf5f176 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 <80228075+paulmurdoch19@users.noreply.github.com> Date: Fri, 11 Jul 2025 14:54:09 -0700 Subject: [PATCH 053/126] Create sync-cdis-with-pcdc.yml --- .github/workflows/sync-cdis-with-pcdc.yml | 43 +++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 .github/workflows/sync-cdis-with-pcdc.yml diff --git a/.github/workflows/sync-cdis-with-pcdc.yml b/.github/workflows/sync-cdis-with-pcdc.yml new file mode 100644 index 000000000..863ab2743 --- /dev/null +++ b/.github/workflows/sync-cdis-with-pcdc.yml @@ -0,0 +1,43 @@ +name: Sync upstream master to pcdc_dev + +on: + schedule: + - cron: '0 0 * * *' # every day at midnight UTC + workflow_dispatch: + +jobs: + sync-upstream: + runs-on: ubuntu-latest + + steps: + - name: Checkout your repo + uses: actions/checkout@v4 + with: + ref: pcdc_dev + fetch-depth: 0 + + - name: Add upstream remote and fetch + run: | + git remote add upstream https://github.com/uc-cdis/gen3-helm.git + git fetch upstream master + + - name: Create sync branch + run: | + git checkout -B sync-upstream-master + git merge upstream/master --no-edit + + - name: Push sync branch + uses: ad-m/github-push-action@v0.8.0 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + branch: sync-upstream-master + + - name: Create or update PR + uses: peter-evans/create-pull-request@v6 + with: + token: ${{ secrets.GITHUB_TOKEN }} + source_branch: sync-upstream-master + target_branch: pcdc_dev + title: "Sync upstream master to pcdc_dev" + body: "Automatic PR to sync latest changes from upstream master." + draft: false From 48a535b6c4a5e05950b02b43dc223ae2e912fb72 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 <80228075+paulmurdoch19@users.noreply.github.com> Date: Fri, 11 Jul 2025 15:07:42 -0700 Subject: [PATCH 054/126] Update sync-cdis-with-pcdc.yml --- .github/workflows/sync-cdis-with-pcdc.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/sync-cdis-with-pcdc.yml b/.github/workflows/sync-cdis-with-pcdc.yml index 863ab2743..e608e6603 100644 --- a/.github/workflows/sync-cdis-with-pcdc.yml +++ b/.github/workflows/sync-cdis-with-pcdc.yml @@ -2,7 +2,7 @@ name: Sync upstream master to pcdc_dev on: schedule: - - cron: '0 0 * * *' # every day at midnight UTC + - cron: '*/10 * * * *' # every day at midnight UTC workflow_dispatch: jobs: From 1af7de4732cb44a7ea852d82206a88aa7c7c5c93 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 <80228075+paulmurdoch19@users.noreply.github.com> Date: Fri, 11 Jul 2025 15:30:45 -0700 Subject: [PATCH 055/126] Update sync-cdis-with-pcdc.yml --- .github/workflows/sync-cdis-with-pcdc.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/sync-cdis-with-pcdc.yml b/.github/workflows/sync-cdis-with-pcdc.yml index e608e6603..2dfb765d6 100644 --- a/.github/workflows/sync-cdis-with-pcdc.yml +++ b/.github/workflows/sync-cdis-with-pcdc.yml @@ -1,8 +1,6 @@ name: Sync upstream master to pcdc_dev on: - schedule: - - cron: '*/10 * * * *' # every day at midnight UTC workflow_dispatch: jobs: From 5a53acaea4949e0020715e2558d4f84af3a28e08 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Mon, 14 Jul 2025 12:29:25 -0700 Subject: [PATCH 056/126] updates --- helm/fence/values.yaml | 7 +- helm/revproxy/nginx/helpers.js | 139 +++++++++++++++++++-------------- helm/revproxy/nginx/nginx.conf | 6 +- helm/sheepdog/values.yaml | 2 +- 4 files changed, 89 insertions(+), 65 deletions(-) diff --git a/helm/fence/values.yaml b/helm/fence/values.yaml index d9e9854f2..9ffb15790 100644 --- a/helm/fence/values.yaml +++ b/helm/fence/values.yaml @@ -418,7 +418,6 @@ volumes: configMap: name: "fence-yaml-merge" optional: true - - name: amanuensis-jwt-keys secret: secretName: "amanuensis-jwt-keys" @@ -802,7 +801,7 @@ FENCE_CONFIG: # print(key) ENCRYPTION_KEY: REPLACEME - AMANUENSIS_PUBLIC_KEY_PATH: '/amanuensis/jwt_public_key.pem' + AMANUENSIS_PUBLIC_KEY_PATH: "/amanuensis/jwt_public_key.pem" # -- (map) Debug and security settings # Modify based on whether you're in a dev environment or in production @@ -859,7 +858,7 @@ FENCE_CONFIG: OPENID_CONNECT: # any OIDC IDP that does not differ from the generic implementation can be # configured without code changes - generic_oidc_idp: # choose a unique ID and replace this key + generic_oidc_idp: # choose a unique ID and replace this key # -- (str) Optional; display name for this IDP name: "some_idp" # -- (str) Client ID @@ -867,7 +866,7 @@ FENCE_CONFIG: # -- (str) Client secret client_secret: "" # -- (str) Redirect URL for this IDP - redirect_url: "{{BASE_URL}}/login/some_idp/login" # replace IDP name + redirect_url: "{{BASE_URL}}/login/some_idp/login" # replace IDP name # use `discovery` to configure IDPs that do not expose a discovery # endpoint. One of `discovery_url` or `discovery` should be configured # -- (str) URL of the OIDC discovery endpoint for the IDP diff --git a/helm/revproxy/nginx/helpers.js b/helm/revproxy/nginx/helpers.js index 27b63aa69..586ea3a46 100644 --- a/helm/revproxy/nginx/helpers.js +++ b/helm/revproxy/nginx/helpers.js @@ -1,13 +1,13 @@ /** * This is a helper script used in the reverse proxy * Note that this is not technically javascript, but nginscript (or njs) - * See here for info: + * See here for info: * - http://nginx.org/en/docs/njs/ * - https://www.nginx.com/blog/introduction-nginscript/ */ /** global supporting atob polyfill below */ -var chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/='; +var chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/="; // default threshold for assigning a service to production // e.g. weight of 0 would mean all services are assigned to production var DEFAULT_WEIGHT = 0; @@ -17,20 +17,23 @@ var DEFAULT_WEIGHT = 0; * https://github.com/davidchambers/Base64.js/blob/master/base64.js */ function atob(input) { - var str = String(input).replace(/[=]+$/, ''); // #31: ExtendScript bad parse of /= + var str = String(input).replace(/[=]+$/, ""); // #31: ExtendScript bad parse of /= if (str.length % 4 == 1) { return input; } for ( // initialize result and counters - var bc = 0, bs, buffer, idx = 0, output = ''; + var bc = 0, bs, buffer, idx = 0, output = ""; // get next character - buffer = str.charAt(idx++); + (buffer = str.charAt(idx++)); // character found in table? initialize bit storage and add its ascii value; - ~buffer && (bs = bc % 4 ? bs * 64 + buffer : buffer, - // and if not first of each 4 characters, - // convert the first 8 bits to one ascii character - bc++ % 4) ? output += String.fromCharCode(255 & bs >> (-2 * bc & 6)) : 0 + ~buffer && + ((bs = bc % 4 ? bs * 64 + buffer : buffer), + // and if not first of each 4 characters, + // convert the first 8 bits to one ascii character + bc++ % 4) + ? (output += String.fromCharCode(255 & (bs >> ((-2 * bc) & 6)))) + : 0 ) { // try to find character in table (0-63, not found => -1) buffer = chars.indexOf(buffer); @@ -53,7 +56,9 @@ function userid(req, res) { if (token) { // note - raw token is secret, so do not expose in userid - var raw = atob((token.split('.')[1] || "").replace('-', '+').replace('_', '/')); + var raw = atob( + (token.split(".")[1] || "").replace("-", "+").replace("_", "/") + ); if (raw) { try { var data = JSON.parse(raw); @@ -73,7 +78,7 @@ function userid(req, res) { */ function MathAbs(x) { x = +x; - return (x > 0) ? x : 0 - x; + return x > 0 ? x : 0 - x; } /** @@ -83,9 +88,10 @@ function MathAbs(x) { * @param s - string to hash */ function simpleHash(s) { - var i, hash = 0; + var i, + hash = 0; for (i = 0; i < s.length; i++) { - hash += (s[i].charCodeAt() * (i+1)); + hash += s[i].charCodeAt() * (i + 1); } // mod 100 b/c we want a percentage range (ie 0-99) return MathAbs(hash) % 100; @@ -103,36 +109,36 @@ function simpleHash(s) { function selectRelease(hash_res, w) { // determine release by comparing hash val to service weight if (hash_res < parseInt(w)) { - return 'canary'; + return "canary"; } - return 'production'; + return "production"; } function getWeight(service, weights) { - if (typeof weights[service] === 'undefined') { - return weights['default']; + if (typeof weights[service] === "undefined") { + return weights["default"]; } return weights[service]; } function releasesObjToString(releases) { - var res = ''; + var res = ""; for (var service in releases) { if (releases.hasOwnProperty(service)) { - res = res + service + '.' + releases[service] + '&'; + res = res + service + "." + releases[service] + "&"; } } return res; } /** - * Checks cookie (dev_canaries or service_releases) + * Checks cookie (dev_canaries or service_releases) * for service release versions and assigns * release versions for services not in the cookie based * on hash value and the percent weight of the canary. * If the weight for a service is 0, it ignores the cookie * and sets the release to production. - * + * * @param req - nginx request object * @return a string of service assignments. E.g: * "fence.canary&sheepdog.production&" @@ -143,19 +149,27 @@ function getServiceReleases(req) { // developer override can force canary even when canary has // been deployed for general users by setting the canary weights to zero // - var devOverride= !!req.variables['cookie_dev_canaries']; - var release_cookie = req.variables['cookie_dev_canaries'] || req.variables['cookie_service_releases'] || ''; + var devOverride = !!req.variables["cookie_dev_canaries"]; + var release_cookie = + req.variables["cookie_dev_canaries"] || + req.variables["cookie_service_releases"] || + ""; // services to assign to a service (edit this if adding a new canary service) - var services = ['fence', 'fenceshib', 'sheepdog', 'indexd', 'peregrine']; + var services = ["fence", "fenceshib", "sheepdog", "indexd", "peregrine"]; // weights for services - if given a default weight, use it; else use the default weight from this file - var canary_weights = JSON.parse(req.variables['canary_percent_json']); - if (typeof canary_weights['default'] === 'undefined') { - canary_weights['default'] = DEFAULT_WEIGHT + var canary_weights = JSON.parse(req.variables["canary_percent_json"]); + if (typeof canary_weights["default"] === "undefined") { + canary_weights["default"] = DEFAULT_WEIGHT; } else { - canary_weights['default'] = parseInt(canary_weights['default']) + canary_weights["default"] = parseInt(canary_weights["default"]); } // the string to be hashed - var hash_str = ['app', req.variables['realip'], req.variables['http_user_agent'], req.variables['date_gmt']].join(); + var hash_str = [ + "app", + req.variables["realip"], + req.variables["http_user_agent"], + req.variables["date_gmt"], + ].join(); var hash_res = -1; // for each service: @@ -163,17 +177,20 @@ function getServiceReleases(req) { // else if it's in the cookie, use that release // else select one by hashing and comparing to weight var updated_releases = {}; - for (var i=0; i < services.length; i++) { + for (var i = 0; i < services.length; i++) { var service = services[i]; - var parsed_release = release_cookie.match(service+'\.(production|canary)'); - if ((!devOverride) && getWeight(service, canary_weights) === 0) { - updated_releases[service] = 'production'; + var parsed_release = release_cookie.match(service + ".(production|canary)"); + if (!devOverride && getWeight(service, canary_weights) === 0) { + updated_releases[service] = "production"; } else if (!parsed_release) { // if we haven't yet generated a hash value, do that now if (hash_res < 0) { hash_res = simpleHash(hash_str); } - updated_releases[service] = selectRelease(hash_res, getWeight(service, canary_weights)); + updated_releases[service] = selectRelease( + hash_res, + getWeight(service, canary_weights) + ); } else { // append the matched values from the cookie updated_releases[service] = parsed_release[1]; @@ -204,24 +221,27 @@ function getServiceReleases(req) { * to not include this header */ function isCredentialsAllowed(req) { - if (!!req.variables['http_origin']) { - var origins = JSON.parse(req.variables['origins_allow_credentials'] || '[]') || []; + if (!!req.variables["http_origin"]) { + var origins = + JSON.parse(req.variables["origins_allow_credentials"] || "[]") || []; for (var i = 0; i < origins.length; i++) { // cannot use === to compare byte strings, whose "typeof" is also confusingly "string" - if (origins[i].fromUTF8().toLowerCase().trim() === - req.variables['http_origin'].fromUTF8().toLowerCase().trim()) { - return 'true'; + if ( + origins[i].fromUTF8().toLowerCase().trim() === + req.variables["http_origin"].fromUTF8().toLowerCase().trim() + ) { + return "true"; } } } - return ''; + return ""; } /** * Test whether the given ipAddrStr is in the global blackListStr. * Currently does not support CIDR format - just list of IP's - * - * @param {string} ipAddrStr + * + * @param {string} ipAddrStr * @param {string} blackListStr comma separated black list - defaults to globalBlackListStr (see below) * @return {boolean} true if ipAddrStr is in the black list */ @@ -232,19 +252,19 @@ function isOnBlackList(ipAddrStr, blackListStr) { /** * Call via nginx.conf js_set after setting the blackListStr and * ipAddrStr variables via set: - * + * * set blackListStr="whatever" * set ipAddrStr="whatever" * js_set blackListCheck checkBlackList - * + * * Note: kube-setup-revproxy generates gen3-blacklist.conf - which * gets sucked into the nginx.conf config - * - * @param {Request} req - * @param {Response} res + * + * @param {Request} req + * @param {Response} res * @return "ok" or "block" - fail to "ok" in ambiguous situation */ -function checkBlackList(req,res) { +function checkBlackList(req, res) { var ipAddrStr = req.variables["ip_addr_str"]; var blackListStr = req.variables["black_list_str"]; @@ -254,29 +274,34 @@ function checkBlackList(req,res) { return "ok"; // + "-" + ipAddrStr + "-" + blackListStr; } - /** * Handle the js_content callout from /workspace-authorize. * Basically - redirect to a subdomain /wts/authorize endpoint * based on the state=SUBDOMAIN-... query parameter with * some guards to stop attacks. - * - * @param {*} req - * @param {*} res + * + * @param {*} req + * @param {*} res */ function gen3_workspace_authorize_handler(req) { - var subdomain = ''; + var subdomain = ""; var query = req.variables["args"] || ""; var matchGroups = null; - if (matchGroups = query.match(/(^state=|&state=)(\w+)-/)) { + if ((matchGroups = query.match(/(^state=|&state=)(\w+)-/))) { subdomain = matchGroups[2]; - var location = "https://" + subdomain + "." + req.variables["host"] + - "/wts/oauth2/authorize?" + query; + var location = + "https://" + + subdomain + + "." + + req.variables["host"] + + "/wts/oauth2/authorize?" + + query; req.return(302, location); } else { - req.headersOut["Content-Type"] = "application/json" + req.headersOut["Content-Type"] = "application/json"; req.return(400, '{ "status": "redirect failed validation" }'); } } +export default { userid, isCredentialsAllowed }; diff --git a/helm/revproxy/nginx/nginx.conf b/helm/revproxy/nginx/nginx.conf index 9e379ab6e..237a895d8 100644 --- a/helm/revproxy/nginx/nginx.conf +++ b/helm/revproxy/nginx/nginx.conf @@ -76,8 +76,8 @@ map $http_user_agent $loggable { # # Note - nginscript js_set, etc get processed # # on demand: https://www.nginx.com/blog/introduction-nginscript/ # # # - js_include helpers.js; - js_set $userid userid; + js_import helpers.js; + js_set $userid helpers.userid; perl_set $document_url_env 'sub { return $ENV{"DOCUMENT_URL"} || ""; }'; @@ -145,7 +145,7 @@ map $http_user_agent $loggable { # # CORS Credential White List # ## perl_set $origins_allow_credentials 'sub { return $ENV{"ORIGINS_ALLOW_CREDENTIALS"}; }'; - js_set $credentials_allowed isCredentialsAllowed; + js_set $credentials_allowed helpers.isCredentialsAllowed; # ## For multi-domain deployments perl_set $csrf_cookie_domain 'sub { return $ENV{"COOKIE_DOMAIN"} ? qq{;domain=$ENV{"COOKIE_DOMAIN"}} : ""; }'; diff --git a/helm/sheepdog/values.yaml b/helm/sheepdog/values.yaml index 90cfe2fe2..84fe044b9 100644 --- a/helm/sheepdog/values.yaml +++ b/helm/sheepdog/values.yaml @@ -108,7 +108,7 @@ postgresql: releaseLabel: production # -- (map) Annotations to add to the pod -podAnnotations: {"gen3.io/network-ingress": "sheepdog"} +podAnnotations: { "gen3.io/network-ingress": "sheepdog" } # -- (map) Configuration for autoscaling the number of replicas autoscaling: From 17860a81ad058a592079636c7dd47a015feeb5cb Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Tue, 15 Jul 2025 11:29:09 -0700 Subject: [PATCH 057/126] fix git action cdis sync --- .github/workflows/sync-cdis-with-pcdc.yml | 62 +++++++++++++---------- 1 file changed, 36 insertions(+), 26 deletions(-) diff --git a/.github/workflows/sync-cdis-with-pcdc.yml b/.github/workflows/sync-cdis-with-pcdc.yml index 2dfb765d6..4c027af04 100644 --- a/.github/workflows/sync-cdis-with-pcdc.yml +++ b/.github/workflows/sync-cdis-with-pcdc.yml @@ -1,41 +1,51 @@ -name: Sync upstream master to pcdc_dev +name: Sync from Upstream on: - workflow_dispatch: + schedule: + # Temporarily trigger on push to your feature branch for testing + push: + branches: + - update-sync-github-action jobs: - sync-upstream: + sync-and-create-pr: runs-on: ubuntu-latest - steps: - - name: Checkout your repo + - name: Checkout Repository uses: actions/checkout@v4 with: - ref: pcdc_dev + # Fetch all history for all branches and tags fetch-depth: 0 - - name: Add upstream remote and fetch + - name: Set up Git run: | - git remote add upstream https://github.com/uc-cdis/gen3-helm.git - git fetch upstream master + git config user.name "github-actions[bot]" + git config user.email "github-actions[bot]@users.noreply.github.com" - - name: Create sync branch + - name: Add upstream remote run: | - git checkout -B sync-upstream-master - git merge upstream/master --no-edit + # Replace with your actual upstream repository URL + git remote add upstream https://github.com/uc-cdis/gen3-helm.git - - name: Push sync branch - uses: ad-m/github-push-action@v0.8.0 - with: - github_token: ${{ secrets.GITHUB_TOKEN }} - branch: sync-upstream-master + - name: Fetch from all remotes + run: git fetch --all - - name: Create or update PR - uses: peter-evans/create-pull-request@v6 - with: - token: ${{ secrets.GITHUB_TOKEN }} - source_branch: sync-upstream-master - target_branch: pcdc_dev - title: "Sync upstream master to pcdc_dev" - body: "Automatic PR to sync latest changes from upstream master." - draft: false + - name: Create sync branch from pcdc_dev + run: | + # In case the branch exists from a previous run, delete it first + git push origin --delete sync-cdis-pcdc || true + # Create the new branch from the latest pcdc_dev + git checkout -b sync-cdis-pcdc origin/pcdc_dev + # Push the new branch to origin + git push -u origin sync-cdis-pcdc + + - name: Create Pull Request + run: | + gh pr create \ + --base sync-cdis-pcdc \ + --head upstream/master \ + --title "Sync: Upstream Master into sync-cdis-pcdc" \ + --body "This PR automatically syncs changes from upstream/master into the sync-cdis-pcdc branch." + env: + # The GITHUB_TOKEN is automatically created and provided by GitHub Actions + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file From 72341fa846cc965d22fcc9f876242e92eb95fe5f Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Tue, 15 Jul 2025 11:30:04 -0700 Subject: [PATCH 058/126] fix on --- .github/workflows/sync-cdis-with-pcdc.yml | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/.github/workflows/sync-cdis-with-pcdc.yml b/.github/workflows/sync-cdis-with-pcdc.yml index 4c027af04..b2290fc66 100644 --- a/.github/workflows/sync-cdis-with-pcdc.yml +++ b/.github/workflows/sync-cdis-with-pcdc.yml @@ -1,11 +1,9 @@ name: Sync from Upstream on: - schedule: - # Temporarily trigger on push to your feature branch for testing - push: - branches: - - update-sync-github-action + push: + branches: + - update-sync-github-action jobs: sync-and-create-pr: From b6a070998f62b562627f6d235f597e36ecffe904 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Tue, 15 Jul 2025 11:32:27 -0700 Subject: [PATCH 059/126] fix permissions --- .github/workflows/sync-cdis-with-pcdc.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/sync-cdis-with-pcdc.yml b/.github/workflows/sync-cdis-with-pcdc.yml index b2290fc66..81e537935 100644 --- a/.github/workflows/sync-cdis-with-pcdc.yml +++ b/.github/workflows/sync-cdis-with-pcdc.yml @@ -8,6 +8,9 @@ on: jobs: sync-and-create-pr: runs-on: ubuntu-latest + permissions: + contents: write + pull-requests: write steps: - name: Checkout Repository uses: actions/checkout@v4 @@ -45,5 +48,4 @@ jobs: --title "Sync: Upstream Master into sync-cdis-pcdc" \ --body "This PR automatically syncs changes from upstream/master into the sync-cdis-pcdc branch." env: - # The GITHUB_TOKEN is automatically created and provided by GitHub Actions GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file From 2c3ff840e2035c076ccffb38b7e7f65b805cfdcd Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Tue, 15 Jul 2025 11:42:46 -0700 Subject: [PATCH 060/126] fix pr portion --- .github/workflows/sync-cdis-with-pcdc.yml | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/.github/workflows/sync-cdis-with-pcdc.yml b/.github/workflows/sync-cdis-with-pcdc.yml index 81e537935..10c00d9b1 100644 --- a/.github/workflows/sync-cdis-with-pcdc.yml +++ b/.github/workflows/sync-cdis-with-pcdc.yml @@ -40,12 +40,14 @@ jobs: # Push the new branch to origin git push -u origin sync-cdis-pcdc - - name: Create Pull Request - run: | - gh pr create \ - --base sync-cdis-pcdc \ - --head upstream/master \ - --title "Sync: Upstream Master into sync-cdis-pcdc" \ - --body "This PR automatically syncs changes from upstream/master into the sync-cdis-pcdc branch." - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file + - name: Create or update PR + uses: peter-evans/create-pull-request@v6 + with: + token: ${{ secrets.GITHUB_TOKEN }} + source_branch: upstream/master + target_branch: sync-cdis-pcdc + title: "Sync upstream master to pcdc_dev" + body: "Automatic PR to sync latest changes from upstream master." + draft: false + # env: + # GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file From 8fe53aeac5f3921761e399f9fff86434db0e2524 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Tue, 15 Jul 2025 11:49:06 -0700 Subject: [PATCH 061/126] revert to old version --- .github/workflows/sync-cdis-with-pcdc.yml | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/.github/workflows/sync-cdis-with-pcdc.yml b/.github/workflows/sync-cdis-with-pcdc.yml index 10c00d9b1..75ca0e8c8 100644 --- a/.github/workflows/sync-cdis-with-pcdc.yml +++ b/.github/workflows/sync-cdis-with-pcdc.yml @@ -40,14 +40,13 @@ jobs: # Push the new branch to origin git push -u origin sync-cdis-pcdc - - name: Create or update PR - uses: peter-evans/create-pull-request@v6 - with: - token: ${{ secrets.GITHUB_TOKEN }} - source_branch: upstream/master - target_branch: sync-cdis-pcdc - title: "Sync upstream master to pcdc_dev" - body: "Automatic PR to sync latest changes from upstream master." - draft: false - # env: - # GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file + - name: Create Pull Request + run: | + gh pr create \ + --base sync-cdis-pcdc \ + --head upstream/master \ + --title "Sync: Upstream Master into sync-cdis-pcdc" \ + --body "This PR automatically syncs changes from upstream/master into the sync-cdis-pcdc branch." + env: + # Use the PAT secret instead of the default token + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file From 3e9d4a813369ede1c7a770328157c19aa2eb20d5 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Tue, 15 Jul 2025 11:59:08 -0700 Subject: [PATCH 062/126] update to deal with upstream --- .github/workflows/sync-cdis-with-pcdc.yml | 63 +++++++++++++++++------ 1 file changed, 46 insertions(+), 17 deletions(-) diff --git a/.github/workflows/sync-cdis-with-pcdc.yml b/.github/workflows/sync-cdis-with-pcdc.yml index 75ca0e8c8..c1f1ccc28 100644 --- a/.github/workflows/sync-cdis-with-pcdc.yml +++ b/.github/workflows/sync-cdis-with-pcdc.yml @@ -11,11 +11,11 @@ jobs: permissions: contents: write pull-requests: write + issues: write steps: - name: Checkout Repository uses: actions/checkout@v4 with: - # Fetch all history for all branches and tags fetch-depth: 0 - name: Set up Git @@ -23,30 +23,59 @@ jobs: git config user.name "github-actions[bot]" git config user.email "github-actions[bot]@users.noreply.github.com" - - name: Add upstream remote + - name: Add upstream remote and fetch run: | - # Replace with your actual upstream repository URL git remote add upstream https://github.com/uc-cdis/gen3-helm.git + git fetch upstream master - - name: Fetch from all remotes - run: git fetch --all - - - name: Create sync branch from pcdc_dev + - name: Create and push sync branch run: | - # In case the branch exists from a previous run, delete it first git push origin --delete sync-cdis-pcdc || true - # Create the new branch from the latest pcdc_dev git checkout -b sync-cdis-pcdc origin/pcdc_dev - # Push the new branch to origin + + - name: Attempt merge + id: merge + run: | + if git merge upstream/master --no-edit; then + echo "merge_success=true" >> $GITHUB_OUTPUT + echo "✅ Merge successful" + else + echo "merge_success=false" >> $GITHUB_OUTPUT + echo "❌ Merge conflicts detected" + + # Get list of conflicted files + CONFLICTED_FILES=$(git diff --name-only --diff-filter=U) + echo "conflicted_files<> $GITHUB_OUTPUT + echo "$CONFLICTED_FILES" >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT + fi + + - name: Push successful merge + if: steps.merge.outputs.merge_success == 'true' + run: | git push -u origin sync-cdis-pcdc - - name: Create Pull Request + - name: Create Pull Request for successful merge + if: steps.merge.outputs.merge_success == 'true' run: | gh pr create \ - --base sync-cdis-pcdc \ - --head upstream/master \ - --title "Sync: Upstream Master into sync-cdis-pcdc" \ - --body "This PR automatically syncs changes from upstream/master into the sync-cdis-pcdc branch." + --base pcdc_dev \ + --head sync-cdis-pcdc \ + --title "Sync: Upstream Master into pcdc_dev" \ + --body "This PR automatically syncs changes from upstream/master into the pcdc_dev branch." env: - # Use the PAT secret instead of the default token - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Create conflict resolution branch + if: steps.merge.outputs.merge_success == 'false' + run: | + # Create timestamped branch name + TIMESTAMP=$(date -u +%Y%m%d-%H%M%S) + CONFLICT_BRANCH="conflict-resolution-${TIMESTAMP}" + + # Create and push conflict resolution branch with unresolved conflicts + git checkout -b "$CONFLICT_BRANCH" + git push -u origin "$CONFLICT_BRANCH" + + echo "CONFLICT_BRANCH=${CONFLICT_BRANCH}" >> $GITHUB_ENV + echo "Created conflict resolution branch: $CONFLICT_BRANCH" \ No newline at end of file From 08f8cb0c4bbb812ff7ea7d0a3f1ccc081c28e2dc Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Tue, 15 Jul 2025 12:04:02 -0700 Subject: [PATCH 063/126] update permissions --- .github/workflows/sync-cdis-with-pcdc.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/sync-cdis-with-pcdc.yml b/.github/workflows/sync-cdis-with-pcdc.yml index c1f1ccc28..c417c3175 100644 --- a/.github/workflows/sync-cdis-with-pcdc.yml +++ b/.github/workflows/sync-cdis-with-pcdc.yml @@ -12,6 +12,8 @@ jobs: contents: write pull-requests: write issues: write + actions: read + metadata: read # Add this line steps: - name: Checkout Repository uses: actions/checkout@v4 From 11c042d67582c0aa2e075c8c19774c701889813b Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Tue, 15 Jul 2025 12:09:06 -0700 Subject: [PATCH 064/126] update permissions again --- .github/workflows/sync-cdis-with-pcdc.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/sync-cdis-with-pcdc.yml b/.github/workflows/sync-cdis-with-pcdc.yml index c417c3175..1d2be9879 100644 --- a/.github/workflows/sync-cdis-with-pcdc.yml +++ b/.github/workflows/sync-cdis-with-pcdc.yml @@ -13,7 +13,6 @@ jobs: pull-requests: write issues: write actions: read - metadata: read # Add this line steps: - name: Checkout Repository uses: actions/checkout@v4 From c57c5b7555fbf495e7dfc893b4eea1481cb591b6 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Tue, 15 Jul 2025 12:17:04 -0700 Subject: [PATCH 065/126] update token --- .github/workflows/sync-cdis-with-pcdc.yml | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/workflows/sync-cdis-with-pcdc.yml b/.github/workflows/sync-cdis-with-pcdc.yml index 1d2be9879..bd512ef06 100644 --- a/.github/workflows/sync-cdis-with-pcdc.yml +++ b/.github/workflows/sync-cdis-with-pcdc.yml @@ -4,6 +4,7 @@ on: push: branches: - update-sync-github-action + workflow_dispatch: jobs: sync-and-create-pr: @@ -18,11 +19,12 @@ jobs: uses: actions/checkout@v4 with: fetch-depth: 0 + token: ${{ secrets.PAT_TOKEN }} # Use PAT for checkout in forks - name: Set up Git run: | - git config user.name "github-actions[bot]" - git config user.email "github-actions[bot]@users.noreply.github.com" + git config user.name "paulmurdoch19" + git config user.email "paulmurdoch19@users.noreply.github.com" - name: Add upstream remote and fetch run: | @@ -65,7 +67,7 @@ jobs: --title "Sync: Upstream Master into pcdc_dev" \ --body "This PR automatically syncs changes from upstream/master into the pcdc_dev branch." env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GH_TOKEN: ${{ secrets.PAT_TOKEN }} # Use PAT instead of GITHUB_TOKEN - name: Create conflict resolution branch if: steps.merge.outputs.merge_success == 'false' From 367a91c8ddb0be52e8f7b42b91cc3f82638b2924 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Tue, 15 Jul 2025 12:23:01 -0700 Subject: [PATCH 066/126] updates --- .github/workflows/sync-cdis-with-pcdc.yml | 94 +++++++++++++++++++++-- 1 file changed, 87 insertions(+), 7 deletions(-) diff --git a/.github/workflows/sync-cdis-with-pcdc.yml b/.github/workflows/sync-cdis-with-pcdc.yml index bd512ef06..d0ef34b32 100644 --- a/.github/workflows/sync-cdis-with-pcdc.yml +++ b/.github/workflows/sync-cdis-with-pcdc.yml @@ -19,7 +19,7 @@ jobs: uses: actions/checkout@v4 with: fetch-depth: 0 - token: ${{ secrets.PAT_TOKEN }} # Use PAT for checkout in forks + token: ${{ secrets.PAT_TOKEN }} - name: Set up Git run: | @@ -30,18 +30,53 @@ jobs: run: | git remote add upstream https://github.com/uc-cdis/gen3-helm.git git fetch upstream master + git fetch origin # Ensure we have latest origin refs + + - name: Check if sync is needed + id: check_sync + run: | + # Get the latest commit hashes + UPSTREAM_SHA=$(git rev-parse upstream/master) + PCDC_SHA=$(git rev-parse origin/pcdc_dev) + + echo "Upstream SHA: $UPSTREAM_SHA" + echo "PCDC SHA: $PCDC_SHA" + + if [ "$UPSTREAM_SHA" = "$PCDC_SHA" ]; then + echo "sync_needed=false" >> $GITHUB_OUTPUT + echo "✅ No sync needed - branches are identical" + else + echo "sync_needed=true" >> $GITHUB_OUTPUT + echo "🔄 Sync needed - differences detected" + + # Check if there are commits ahead + COMMITS_AHEAD=$(git rev-list --count origin/pcdc_dev..upstream/master) + echo "Commits ahead: $COMMITS_AHEAD" + echo "commits_ahead=$COMMITS_AHEAD" >> $GITHUB_OUTPUT + fi - name: Create and push sync branch + if: steps.check_sync.outputs.sync_needed == 'true' run: | + # Delete existing sync branch if it exists git push origin --delete sync-cdis-pcdc || true + + # Create sync branch from pcdc_dev git checkout -b sync-cdis-pcdc origin/pcdc_dev + echo "Created sync-cdis-pcdc branch from pcdc_dev" - name: Attempt merge + if: steps.check_sync.outputs.sync_needed == 'true' id: merge run: | if git merge upstream/master --no-edit; then echo "merge_success=true" >> $GITHUB_OUTPUT echo "✅ Merge successful" + + # Verify the merge created changes + CHANGES=$(git diff --name-only HEAD~1 HEAD | wc -l) + echo "Files changed: $CHANGES" + echo "files_changed=$CHANGES" >> $GITHUB_OUTPUT else echo "merge_success=false" >> $GITHUB_OUTPUT echo "❌ Merge conflicts detected" @@ -54,23 +89,62 @@ jobs: fi - name: Push successful merge - if: steps.merge.outputs.merge_success == 'true' + if: steps.check_sync.outputs.sync_needed == 'true' && steps.merge.outputs.merge_success == 'true' run: | git push -u origin sync-cdis-pcdc + echo "✅ Pushed sync-cdis-pcdc branch" + + - name: Wait for branch to be available + if: steps.check_sync.outputs.sync_needed == 'true' && steps.merge.outputs.merge_success == 'true' + run: | + echo "Waiting for branch to be available on GitHub..." + sleep 10 + + # Verify branches exist on GitHub + echo "Checking if branches exist on GitHub..." + gh api repos/chicagopcdc/gen3-helm/branches/pcdc_dev --jq .name + gh api repos/chicagopcdc/gen3-helm/branches/sync-cdis-pcdc --jq .name + env: + GH_TOKEN: ${{ secrets.PAT_TOKEN }} + + - name: Check if PR already exists + if: steps.check_sync.outputs.sync_needed == 'true' && steps.merge.outputs.merge_success == 'true' + id: check_pr + run: | + # Check if PR already exists + PR_COUNT=$(gh pr list --head sync-cdis-pcdc --base pcdc_dev --json number --jq length) + echo "Existing PRs: $PR_COUNT" + echo "pr_exists=$PR_COUNT" >> $GITHUB_OUTPUT + + if [ "$PR_COUNT" = "0" ]; then + echo "✅ No existing PR found - will create new one" + else + echo "⚠️ PR already exists - skipping creation" + gh pr list --head sync-cdis-pcdc --base pcdc_dev --json number,title,url --jq '.[] | "PR #\(.number): \(.title) - \(.url)"' + fi + env: + GH_TOKEN: ${{ secrets.PAT_TOKEN }} - name: Create Pull Request for successful merge - if: steps.merge.outputs.merge_success == 'true' + if: steps.check_sync.outputs.sync_needed == 'true' && steps.merge.outputs.merge_success == 'true' && steps.check_pr.outputs.pr_exists == '0' run: | + echo "Creating PR with the following details:" + echo "Base: pcdc_dev" + echo "Head: sync-cdis-pcdc" + echo "Files changed: ${{ steps.merge.outputs.files_changed }}" + + # Create the PR with explicit repository context gh pr create \ + --repo chicagopcdc/gen3-helm \ --base pcdc_dev \ --head sync-cdis-pcdc \ --title "Sync: Upstream Master into pcdc_dev" \ - --body "This PR automatically syncs changes from upstream/master into the pcdc_dev branch." + --body "This PR automatically syncs changes from upstream/master into the pcdc_dev branch. env: - GH_TOKEN: ${{ secrets.PAT_TOKEN }} # Use PAT instead of GITHUB_TOKEN + GH_TOKEN: ${{ secrets.PAT_TOKEN }} - name: Create conflict resolution branch - if: steps.merge.outputs.merge_success == 'false' + if: steps.check_sync.outputs.sync_needed == 'true' && steps.merge.outputs.merge_success == 'false' run: | # Create timestamped branch name TIMESTAMP=$(date -u +%Y%m%d-%H%M%S) @@ -81,4 +155,10 @@ jobs: git push -u origin "$CONFLICT_BRANCH" echo "CONFLICT_BRANCH=${CONFLICT_BRANCH}" >> $GITHUB_ENV - echo "Created conflict resolution branch: $CONFLICT_BRANCH" \ No newline at end of file + echo "Created conflict resolution branch: $CONFLICT_BRANCH" + + - name: No sync needed notification + if: steps.check_sync.outputs.sync_needed == 'false' + run: | + echo "✅ No sync needed - pcdc_dev is already up to date with upstream/master" + echo "Upstream and pcdc_dev branches are identical" \ No newline at end of file From dc59cb399267f3b04e51ff807018d11f27ceb3bf Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Tue, 15 Jul 2025 14:47:11 -0700 Subject: [PATCH 067/126] updates --- .github/workflows/sync-cdis-with-pcdc.yml | 59 ++--------------------- 1 file changed, 5 insertions(+), 54 deletions(-) diff --git a/.github/workflows/sync-cdis-with-pcdc.yml b/.github/workflows/sync-cdis-with-pcdc.yml index d0ef34b32..f5f5207f2 100644 --- a/.github/workflows/sync-cdis-with-pcdc.yml +++ b/.github/workflows/sync-cdis-with-pcdc.yml @@ -32,31 +32,7 @@ jobs: git fetch upstream master git fetch origin # Ensure we have latest origin refs - - name: Check if sync is needed - id: check_sync - run: | - # Get the latest commit hashes - UPSTREAM_SHA=$(git rev-parse upstream/master) - PCDC_SHA=$(git rev-parse origin/pcdc_dev) - - echo "Upstream SHA: $UPSTREAM_SHA" - echo "PCDC SHA: $PCDC_SHA" - - if [ "$UPSTREAM_SHA" = "$PCDC_SHA" ]; then - echo "sync_needed=false" >> $GITHUB_OUTPUT - echo "✅ No sync needed - branches are identical" - else - echo "sync_needed=true" >> $GITHUB_OUTPUT - echo "🔄 Sync needed - differences detected" - - # Check if there are commits ahead - COMMITS_AHEAD=$(git rev-list --count origin/pcdc_dev..upstream/master) - echo "Commits ahead: $COMMITS_AHEAD" - echo "commits_ahead=$COMMITS_AHEAD" >> $GITHUB_OUTPUT - fi - - name: Create and push sync branch - if: steps.check_sync.outputs.sync_needed == 'true' run: | # Delete existing sync branch if it exists git push origin --delete sync-cdis-pcdc || true @@ -66,7 +42,6 @@ jobs: echo "Created sync-cdis-pcdc branch from pcdc_dev" - name: Attempt merge - if: steps.check_sync.outputs.sync_needed == 'true' id: merge run: | if git merge upstream/master --no-edit; then @@ -89,13 +64,13 @@ jobs: fi - name: Push successful merge - if: steps.check_sync.outputs.sync_needed == 'true' && steps.merge.outputs.merge_success == 'true' + if: steps.merge.outputs.merge_success == 'true' run: | git push -u origin sync-cdis-pcdc echo "✅ Pushed sync-cdis-pcdc branch" - name: Wait for branch to be available - if: steps.check_sync.outputs.sync_needed == 'true' && steps.merge.outputs.merge_success == 'true' + if: steps.merge.outputs.merge_success == 'true' run: | echo "Waiting for branch to be available on GitHub..." sleep 10 @@ -107,26 +82,8 @@ jobs: env: GH_TOKEN: ${{ secrets.PAT_TOKEN }} - - name: Check if PR already exists - if: steps.check_sync.outputs.sync_needed == 'true' && steps.merge.outputs.merge_success == 'true' - id: check_pr - run: | - # Check if PR already exists - PR_COUNT=$(gh pr list --head sync-cdis-pcdc --base pcdc_dev --json number --jq length) - echo "Existing PRs: $PR_COUNT" - echo "pr_exists=$PR_COUNT" >> $GITHUB_OUTPUT - - if [ "$PR_COUNT" = "0" ]; then - echo "✅ No existing PR found - will create new one" - else - echo "⚠️ PR already exists - skipping creation" - gh pr list --head sync-cdis-pcdc --base pcdc_dev --json number,title,url --jq '.[] | "PR #\(.number): \(.title) - \(.url)"' - fi - env: - GH_TOKEN: ${{ secrets.PAT_TOKEN }} - - name: Create Pull Request for successful merge - if: steps.check_sync.outputs.sync_needed == 'true' && steps.merge.outputs.merge_success == 'true' && steps.check_pr.outputs.pr_exists == '0' + if: steps.merge.outputs.merge_success == 'true' run: | echo "Creating PR with the following details:" echo "Base: pcdc_dev" @@ -144,7 +101,7 @@ jobs: GH_TOKEN: ${{ secrets.PAT_TOKEN }} - name: Create conflict resolution branch - if: steps.check_sync.outputs.sync_needed == 'true' && steps.merge.outputs.merge_success == 'false' + if: steps.merge.outputs.merge_success == 'false' run: | # Create timestamped branch name TIMESTAMP=$(date -u +%Y%m%d-%H%M%S) @@ -155,10 +112,4 @@ jobs: git push -u origin "$CONFLICT_BRANCH" echo "CONFLICT_BRANCH=${CONFLICT_BRANCH}" >> $GITHUB_ENV - echo "Created conflict resolution branch: $CONFLICT_BRANCH" - - - name: No sync needed notification - if: steps.check_sync.outputs.sync_needed == 'false' - run: | - echo "✅ No sync needed - pcdc_dev is already up to date with upstream/master" - echo "Upstream and pcdc_dev branches are identical" \ No newline at end of file + echo "Created conflict resolution branch: $CONFLICT_BRANCH" \ No newline at end of file From 0d6139c027f04e6f2b10ce656f92f552547a5d01 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Tue, 15 Jul 2025 14:48:58 -0700 Subject: [PATCH 068/126] remove old line --- .github/workflows/sync-cdis-with-pcdc.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/sync-cdis-with-pcdc.yml b/.github/workflows/sync-cdis-with-pcdc.yml index f5f5207f2..83239e464 100644 --- a/.github/workflows/sync-cdis-with-pcdc.yml +++ b/.github/workflows/sync-cdis-with-pcdc.yml @@ -88,8 +88,7 @@ jobs: echo "Creating PR with the following details:" echo "Base: pcdc_dev" echo "Head: sync-cdis-pcdc" - echo "Files changed: ${{ steps.merge.outputs.files_changed }}" - + # Create the PR with explicit repository context gh pr create \ --repo chicagopcdc/gen3-helm \ From a65947dcb112d7b8b554f3dd05b9cba877ec69fc Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Tue, 15 Jul 2025 14:51:45 -0700 Subject: [PATCH 069/126] fix quote --- .github/workflows/sync-cdis-with-pcdc.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/sync-cdis-with-pcdc.yml b/.github/workflows/sync-cdis-with-pcdc.yml index 83239e464..0a03045b8 100644 --- a/.github/workflows/sync-cdis-with-pcdc.yml +++ b/.github/workflows/sync-cdis-with-pcdc.yml @@ -88,14 +88,14 @@ jobs: echo "Creating PR with the following details:" echo "Base: pcdc_dev" echo "Head: sync-cdis-pcdc" - + # Create the PR with explicit repository context gh pr create \ --repo chicagopcdc/gen3-helm \ --base pcdc_dev \ --head sync-cdis-pcdc \ --title "Sync: Upstream Master into pcdc_dev" \ - --body "This PR automatically syncs changes from upstream/master into the pcdc_dev branch. + --body "This PR automatically syncs changes from upstream/master into the pcdc_dev branch." env: GH_TOKEN: ${{ secrets.PAT_TOKEN }} From b2eeca0bc3ad01b47e9d58a3f9e8fb0bf4f6267b Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Tue, 15 Jul 2025 16:29:41 -0700 Subject: [PATCH 070/126] update spacing --- pcdc-default-values.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/pcdc-default-values.yaml b/pcdc-default-values.yaml index dacb427e9..372433b6f 100644 --- a/pcdc-default-values.yaml +++ b/pcdc-default-values.yaml @@ -320,6 +320,7 @@ peregrine: repository: quay.io/pcdc/peregrine tag: "1.3.10" + portal: #enabled: false image: From df66d399da22659eaba17f40d742ccc4f158abb4 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Tue, 15 Jul 2025 16:40:15 -0700 Subject: [PATCH 071/126] update check that there are no changes --- .github/workflows/sync-cdis-with-pcdc.yml | 31 ++++++++++++++++++++--- pcdc-default-values.yaml | 1 - 2 files changed, 27 insertions(+), 5 deletions(-) diff --git a/.github/workflows/sync-cdis-with-pcdc.yml b/.github/workflows/sync-cdis-with-pcdc.yml index 0a03045b8..ce04620cd 100644 --- a/.github/workflows/sync-cdis-with-pcdc.yml +++ b/.github/workflows/sync-cdis-with-pcdc.yml @@ -20,17 +20,40 @@ jobs: with: fetch-depth: 0 token: ${{ secrets.PAT_TOKEN }} - - name: Set up Git run: | - git config user.name "paulmurdoch19" - git config user.email "paulmurdoch19@users.noreply.github.com" - + git config user.name "github-actions[bot]" + git config user.email "github-actions[bot]@users.noreply.github.com" + - name: Add upstream remote and fetch run: | git remote add upstream https://github.com/uc-cdis/gen3-helm.git git fetch upstream master git fetch origin # Ensure we have latest origin refs + + - name: Check if sync is needed + id: check_sync + run: | + # Get the latest commit hashes + UPSTREAM_SHA=$(git rev-parse upstream/master) + PCDC_SHA=$(git rev-parse origin/pcdc_dev) + + echo "Upstream SHA: $UPSTREAM_SHA" + echo "PCDC SHA: $PCDC_SHA" + + if [ "$UPSTREAM_SHA" = "$PCDC_SHA" ]; then + echo "sync_needed=false" >> $GITHUB_OUTPUT + echo "✅ No sync needed - branches are identical" + exit 0 + else + echo "sync_needed=true" >> $GITHUB_OUTPUT + echo "🔄 Sync needed - differences detected" + + # Check if there are commits ahead + COMMITS_AHEAD=$(git rev-list --count origin/pcdc_dev..upstream/master) + echo "Commits ahead: $COMMITS_AHEAD" + echo "commits_ahead=$COMMITS_AHEAD" >> $GITHUB_OUTPUT + fi - name: Create and push sync branch run: | diff --git a/pcdc-default-values.yaml b/pcdc-default-values.yaml index 372433b6f..dacb427e9 100644 --- a/pcdc-default-values.yaml +++ b/pcdc-default-values.yaml @@ -320,7 +320,6 @@ peregrine: repository: quay.io/pcdc/peregrine tag: "1.3.10" - portal: #enabled: false image: From 5ce150045502448561fb119b54f40e3cbb7159c8 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Tue, 15 Jul 2025 16:44:48 -0700 Subject: [PATCH 072/126] update check if PR needed --- .github/workflows/sync-cdis-with-pcdc.yml | 31 +++++++++++++++++++++-- 1 file changed, 29 insertions(+), 2 deletions(-) diff --git a/.github/workflows/sync-cdis-with-pcdc.yml b/.github/workflows/sync-cdis-with-pcdc.yml index ce04620cd..03acdf094 100644 --- a/.github/workflows/sync-cdis-with-pcdc.yml +++ b/.github/workflows/sync-cdis-with-pcdc.yml @@ -49,13 +49,25 @@ jobs: echo "sync_needed=true" >> $GITHUB_OUTPUT echo "🔄 Sync needed - differences detected" - # Check if there are commits ahead + # Check if there are commits ahead (both directions) COMMITS_AHEAD=$(git rev-list --count origin/pcdc_dev..upstream/master) + COMMITS_BEHIND=$(git rev-list --count upstream/master..origin/pcdc_dev) + echo "Commits ahead: $COMMITS_AHEAD" + echo "Commits behind: $COMMITS_BEHIND" echo "commits_ahead=$COMMITS_AHEAD" >> $GITHUB_OUTPUT + echo "commits_behind=$COMMITS_BEHIND" >> $GITHUB_OUTPUT + + # Only proceed if there are actual commits to merge + if [ "$COMMITS_AHEAD" -eq 0 ] && [ "$COMMITS_BEHIND" -eq 0 ]; then + echo "No commits to merge - branches may have diverged" + echo "sync_needed=false" >> $GITHUB_OUTPUT + exit 0 + fi fi - name: Create and push sync branch + if: steps.check_sync.outputs.sync_needed == 'true' run: | # Delete existing sync branch if it exists git push origin --delete sync-cdis-pcdc || true @@ -65,9 +77,17 @@ jobs: echo "Created sync-cdis-pcdc branch from pcdc_dev" - name: Attempt merge + if: steps.check_sync.outputs.sync_needed == 'true' id: merge run: | if git merge upstream/master --no-edit; then + # Check if the merge actually created any changes + if git diff --quiet HEAD~1 HEAD; then + echo "merge_success=false" >> $GITHUB_OUTPUT + echo "❌ No changes after merge - branches are equivalent" + exit 0 + fi + echo "merge_success=true" >> $GITHUB_OUTPUT echo "✅ Merge successful" @@ -111,6 +131,13 @@ jobs: echo "Creating PR with the following details:" echo "Base: pcdc_dev" echo "Head: sync-cdis-pcdc" + echo "Files changed: ${{ steps.merge.outputs.files_changed }}" + + # Check if PR already exists + if gh pr list --base pcdc_dev --head sync-cdis-pcdc --json number | jq -e '.[0]' > /dev/null; then + echo "PR already exists, skipping creation" + exit 0 + fi # Create the PR with explicit repository context gh pr create \ @@ -118,7 +145,7 @@ jobs: --base pcdc_dev \ --head sync-cdis-pcdc \ --title "Sync: Upstream Master into pcdc_dev" \ - --body "This PR automatically syncs changes from upstream/master into the pcdc_dev branch." + --body "This PR automatically syncs changes from upstream/master into the pcdc_dev branch. Files changed: ${{ steps.merge.outputs.files_changed }}" env: GH_TOKEN: ${{ secrets.PAT_TOKEN }} From f922713488cd6e9d5964ca4a92d18e2da1e572bc Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Tue, 15 Jul 2025 16:46:39 -0700 Subject: [PATCH 073/126] attempt fix again --- .github/workflows/sync-cdis-with-pcdc.yml | 24 ++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/.github/workflows/sync-cdis-with-pcdc.yml b/.github/workflows/sync-cdis-with-pcdc.yml index 03acdf094..e4cab635a 100644 --- a/.github/workflows/sync-cdis-with-pcdc.yml +++ b/.github/workflows/sync-cdis-with-pcdc.yml @@ -80,21 +80,35 @@ jobs: if: steps.check_sync.outputs.sync_needed == 'true' id: merge run: | + # Store the current HEAD before merge + BEFORE_MERGE=$(git rev-parse HEAD) + if git merge upstream/master --no-edit; then + # Get the HEAD after merge + AFTER_MERGE=$(git rev-parse HEAD) + # Check if the merge actually created any changes - if git diff --quiet HEAD~1 HEAD; then + if [ "$BEFORE_MERGE" = "$AFTER_MERGE" ]; then echo "merge_success=false" >> $GITHUB_OUTPUT - echo "❌ No changes after merge - branches are equivalent" + echo "❌ No changes after merge - branches are already up to date" + echo "files_changed=0" >> $GITHUB_OUTPUT exit 0 fi echo "merge_success=true" >> $GITHUB_OUTPUT - echo "✅ Merge successful" + echo "✅ Merge successful with changes" - # Verify the merge created changes - CHANGES=$(git diff --name-only HEAD~1 HEAD | wc -l) + # Count actual changes from the merge + CHANGES=$(git diff --name-only $BEFORE_MERGE $AFTER_MERGE | wc -l) echo "Files changed: $CHANGES" echo "files_changed=$CHANGES" >> $GITHUB_OUTPUT + + # Verify we have actual changes before proceeding + if [ "$CHANGES" -eq 0 ]; then + echo "merge_success=false" >> $GITHUB_OUTPUT + echo "❌ No file changes detected after merge" + exit 0 + fi else echo "merge_success=false" >> $GITHUB_OUTPUT echo "❌ Merge conflicts detected" From 903e441bb613c53a8d914e8c4a562b381e7013b0 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Tue, 15 Jul 2025 17:02:45 -0700 Subject: [PATCH 074/126] changes to handle no changes --- .github/workflows/sync-cdis-with-pcdc.yml | 62 ++++------------------- 1 file changed, 10 insertions(+), 52 deletions(-) diff --git a/.github/workflows/sync-cdis-with-pcdc.yml b/.github/workflows/sync-cdis-with-pcdc.yml index e4cab635a..71ef2836f 100644 --- a/.github/workflows/sync-cdis-with-pcdc.yml +++ b/.github/workflows/sync-cdis-with-pcdc.yml @@ -30,44 +30,8 @@ jobs: git remote add upstream https://github.com/uc-cdis/gen3-helm.git git fetch upstream master git fetch origin # Ensure we have latest origin refs - - - name: Check if sync is needed - id: check_sync - run: | - # Get the latest commit hashes - UPSTREAM_SHA=$(git rev-parse upstream/master) - PCDC_SHA=$(git rev-parse origin/pcdc_dev) - - echo "Upstream SHA: $UPSTREAM_SHA" - echo "PCDC SHA: $PCDC_SHA" - - if [ "$UPSTREAM_SHA" = "$PCDC_SHA" ]; then - echo "sync_needed=false" >> $GITHUB_OUTPUT - echo "✅ No sync needed - branches are identical" - exit 0 - else - echo "sync_needed=true" >> $GITHUB_OUTPUT - echo "🔄 Sync needed - differences detected" - - # Check if there are commits ahead (both directions) - COMMITS_AHEAD=$(git rev-list --count origin/pcdc_dev..upstream/master) - COMMITS_BEHIND=$(git rev-list --count upstream/master..origin/pcdc_dev) - - echo "Commits ahead: $COMMITS_AHEAD" - echo "Commits behind: $COMMITS_BEHIND" - echo "commits_ahead=$COMMITS_AHEAD" >> $GITHUB_OUTPUT - echo "commits_behind=$COMMITS_BEHIND" >> $GITHUB_OUTPUT - - # Only proceed if there are actual commits to merge - if [ "$COMMITS_AHEAD" -eq 0 ] && [ "$COMMITS_BEHIND" -eq 0 ]; then - echo "No commits to merge - branches may have diverged" - echo "sync_needed=false" >> $GITHUB_OUTPUT - exit 0 - fi - fi - name: Create and push sync branch - if: steps.check_sync.outputs.sync_needed == 'true' run: | # Delete existing sync branch if it exists git push origin --delete sync-cdis-pcdc || true @@ -77,12 +41,12 @@ jobs: echo "Created sync-cdis-pcdc branch from pcdc_dev" - name: Attempt merge - if: steps.check_sync.outputs.sync_needed == 'true' id: merge run: | # Store the current HEAD before merge BEFORE_MERGE=$(git rev-parse HEAD) - + echo "conflict_branch=false" >> $GITHUB_OUTPUT + if git merge upstream/master --no-edit; then # Get the HEAD after merge AFTER_MERGE=$(git rev-parse HEAD) @@ -91,17 +55,12 @@ jobs: if [ "$BEFORE_MERGE" = "$AFTER_MERGE" ]; then echo "merge_success=false" >> $GITHUB_OUTPUT echo "❌ No changes after merge - branches are already up to date" - echo "files_changed=0" >> $GITHUB_OUTPUT exit 0 fi - - echo "merge_success=true" >> $GITHUB_OUTPUT - echo "✅ Merge successful with changes" - + # Count actual changes from the merge CHANGES=$(git diff --name-only $BEFORE_MERGE $AFTER_MERGE | wc -l) - echo "Files changed: $CHANGES" - echo "files_changed=$CHANGES" >> $GITHUB_OUTPUT + # Verify we have actual changes before proceeding if [ "$CHANGES" -eq 0 ]; then @@ -109,15 +68,14 @@ jobs: echo "❌ No file changes detected after merge" exit 0 fi + + echo "merge_success=true" >> $GITHUB_OUTPUT + echo "✅ Merge successful with changes" + else echo "merge_success=false" >> $GITHUB_OUTPUT + echo "conflict_branch=true" >> $GITHUB_OUTPUT echo "❌ Merge conflicts detected" - - # Get list of conflicted files - CONFLICTED_FILES=$(git diff --name-only --diff-filter=U) - echo "conflicted_files<> $GITHUB_OUTPUT - echo "$CONFLICTED_FILES" >> $GITHUB_OUTPUT - echo "EOF" >> $GITHUB_OUTPUT fi - name: Push successful merge @@ -164,7 +122,7 @@ jobs: GH_TOKEN: ${{ secrets.PAT_TOKEN }} - name: Create conflict resolution branch - if: steps.merge.outputs.merge_success == 'false' + if: steps.merge.outputs.merge_success == 'false' && steps.merge.outputs.conflict_branch == 'true' run: | # Create timestamped branch name TIMESTAMP=$(date -u +%Y%m%d-%H%M%S) From af7c86cdf0044c83e3048e224b438599ada51a02 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Tue, 15 Jul 2025 17:03:58 -0700 Subject: [PATCH 075/126] update with date --- .github/workflows/sync-cdis-with-pcdc.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/sync-cdis-with-pcdc.yml b/.github/workflows/sync-cdis-with-pcdc.yml index 71ef2836f..bc349fb89 100644 --- a/.github/workflows/sync-cdis-with-pcdc.yml +++ b/.github/workflows/sync-cdis-with-pcdc.yml @@ -116,7 +116,7 @@ jobs: --repo chicagopcdc/gen3-helm \ --base pcdc_dev \ --head sync-cdis-pcdc \ - --title "Sync: Upstream Master into pcdc_dev" \ + --title "Sync: Upstream Master into pcdc_dev ($(date -u +%Y-%m-%d))" \ --body "This PR automatically syncs changes from upstream/master into the pcdc_dev branch. Files changed: ${{ steps.merge.outputs.files_changed }}" env: GH_TOKEN: ${{ secrets.PAT_TOKEN }} From a1e60e91e8b70bb98e4a67b7abce50fa2a2ec1eb Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Wed, 16 Jul 2025 11:03:34 -0700 Subject: [PATCH 076/126] add schedule every monday --- .github/workflows/sync-cdis-with-pcdc.yml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/sync-cdis-with-pcdc.yml b/.github/workflows/sync-cdis-with-pcdc.yml index bc349fb89..6a1d3a686 100644 --- a/.github/workflows/sync-cdis-with-pcdc.yml +++ b/.github/workflows/sync-cdis-with-pcdc.yml @@ -1,9 +1,8 @@ name: Sync from Upstream on: - push: - branches: - - update-sync-github-action + schedule: + - cron: '0 6 * * 1' # Runs at 06:00 UTC every Monday workflow_dispatch: jobs: From abb89138217e5959ad0bbbce96548d716da1a66a Mon Sep 17 00:00:00 2001 From: pkellyc Date: Thu, 17 Jul 2025 10:04:43 -0500 Subject: [PATCH 077/126] revert gitops.json to match pcdc_dev, going a different route --- helm/portal/defaults/gitops.json | 24 +++--------------------- 1 file changed, 3 insertions(+), 21 deletions(-) diff --git a/helm/portal/defaults/gitops.json b/helm/portal/defaults/gitops.json index cc333cd9f..73fa42f9c 100644 --- a/helm/portal/defaults/gitops.json +++ b/helm/portal/defaults/gitops.json @@ -127,31 +127,13 @@ "consortium": { "chartType": "bar", "title": "Consortium" - }, - "external_references.external_resource_name": { - "chartType": "bar", - "title": "External Resource Name", - "show": false } }, "filters": { "anchor": { "field": "disease_phase", - "options": [ - "Initial Diagnosis", - "Relapse" - ], - "tabs": [ - "Disease", - "Molecular", - "Surgery", - "Radiation", - "Response", - "SMN", - "Imaging", - "Labs", - "SCT" - ] + "options": ["Initial Diagnosis", "Relapse"], + "tabs": ["Disease", "Molecular", "Surgery", "Radiation", "Response", "SMN", "Imaging", "Labs", "SCT"] }, "tabs": [ { @@ -448,7 +430,7 @@ { "field": "tumor_assessments.tumor_laterality", "name": "Tumor Laterality" - }, + }, { "field": "stagings.irs_group", "name": "IRS Group" From c8dc64df13f61dc6df76cc0eb37351e58de37abb Mon Sep 17 00:00:00 2001 From: pkellyc Date: Thu, 17 Jul 2025 10:06:54 -0500 Subject: [PATCH 078/126] revert values.yaml to match pcdc_dev, going a different route --- helm/etl/values.yaml | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/helm/etl/values.yaml b/helm/etl/values.yaml index 6372fb678..ef75b7fc7 100644 --- a/helm/etl/values.yaml +++ b/helm/etl/values.yaml @@ -5,16 +5,17 @@ image: # -- (string) The Docker image repository for the fence service repository: quay.io/cdis/tube # -- (string) When to pull the image. This value should be "Always" to ensure the latest image is used. - pullPolicy: Always + pullPolicy: IfNotPresent # -- (string) Overrides the image tag whose default is the chart appVersion. tag: "master" spark: # -- (string) The Docker image repository for the spark service repository: quay.io/cdis/gen3-spark # -- (string) When to pull the image. This value should be "Always" to ensure the latest image is used. - pullPolicy: Always + pullPolicy: IfNotPresent # -- (string) Overrides the image tag whose default is the chart appVersion. - tag: "2024.11" + tag: "master" + # -- (list) Docker image pull secrets. imagePullSecrets: [] @@ -22,23 +23,21 @@ imagePullSecrets: [] # -- (map) Annotations to add to the pod podAnnotations: {} + # -- (map) Resource requests and limits for the containers in the pod resources: tube: # -- (map) The amount of resources that the container requests requests: - # -- (string) The amount of CPU requested - cpu: 0.3 # -- (string) The amount of memory requested memory: 128Mi spark: # -- (map) The amount of resources that the container requests requests: - # -- (string) The amount of CPU requested - cpu: 0.3 # -- (string) The amount of memory requested memory: 128Mi + esEndpoint: gen3-elasticsearch-master etlMapping: @@ -131,14 +130,6 @@ etlMapping: target_nodes: - name: slide_image path: slides.samples.cases - - name: subject - doc_type: subject - type: aggregator - root: subject - props: - - name: submitter_id - - name: project_id - - name: external_references # -- (map) Configuration options for es garbage cronjob. esGarbageCollect: From 1e70ee30f9869b555b47c8bc73826c998e7e96a9 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Thu, 17 Jul 2025 16:57:09 -0700 Subject: [PATCH 079/126] add amanuensis hooks --- .../amanuensis-clear-filter-set-cronjob.yaml | 14 +++ .../templates/amanuensis-db-migrate-job.yaml | 14 +++ .../templates/amanuensis-secret.yaml | 5 ++ .../templates/amanuensis-secrets.yaml | 27 +++++- .../amanuensis-validate-filter-sets-job.yaml | 10 +++ helm/common/templates/_db_setup_job.tpl | 5 ++ helm/fence/values.yaml | 2 +- .../templates/cleanup-helm-hooks-job.yaml | 87 +++++++++++++++++++ pcdc-default-values.yaml | 4 +- tools/roll.sh | 2 +- 10 files changed, 165 insertions(+), 5 deletions(-) create mode 100644 helm/gen3/templates/cleanup-helm-hooks-job.yaml diff --git a/helm/amanuensis/templates/amanuensis-clear-filter-set-cronjob.yaml b/helm/amanuensis/templates/amanuensis-clear-filter-set-cronjob.yaml index 6f8b48958..6d05e63c9 100644 --- a/helm/amanuensis/templates/amanuensis-clear-filter-set-cronjob.yaml +++ b/helm/amanuensis/templates/amanuensis-clear-filter-set-cronjob.yaml @@ -4,6 +4,12 @@ metadata: name: amanuensis-clear-unused-filter-sets labels: redeploy-hash: "{{ .Release.Revision }}" + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-weight": "1" + "helm.sh/hook-delete-policy": before-hook-creation + labels: + app: gen3-created-by-hook spec: schedule: "0 0 1 * *" concurrencyPolicy: Forbid @@ -26,9 +32,17 @@ spec: - name: config-volume secret: secretName: "amanuensis-config" + items: + - key: amanuensis-config.yaml + path: amanuensis-config.yaml + optional: false - name: amanuensis-volume secret: secretName: "amanuensis-creds" + items: + - key: creds.json + path: creds.json + optional: false - name: tmp-pod emptyDir: {} containers: diff --git a/helm/amanuensis/templates/amanuensis-db-migrate-job.yaml b/helm/amanuensis/templates/amanuensis-db-migrate-job.yaml index 584cb509a..abf55a8d3 100644 --- a/helm/amanuensis/templates/amanuensis-db-migrate-job.yaml +++ b/helm/amanuensis/templates/amanuensis-db-migrate-job.yaml @@ -3,6 +3,12 @@ apiVersion: batch/v1 kind: Job metadata: name: amanuensis-db-migrate + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-weight": "0" + "helm.sh/hook-delete-policy": before-hook-creation + labels: + app: gen3-created-by-hook spec: template: metadata: @@ -18,9 +24,17 @@ spec: - name: config-volume secret: secretName: "amanuensis-config" + items: + - key: amanuensis-config.yaml + path: amanuensis-config.yaml + optional: false - name: amanuensis-volume secret: secretName: "amanuensis-creds" + items: + - key: creds.json + path: creds.json + optional: false - name: tmp-pod emptyDir: {} containers: diff --git a/helm/amanuensis/templates/amanuensis-secret.yaml b/helm/amanuensis/templates/amanuensis-secret.yaml index c7ae2e52b..ed9076b5d 100644 --- a/helm/amanuensis/templates/amanuensis-secret.yaml +++ b/helm/amanuensis/templates/amanuensis-secret.yaml @@ -2,6 +2,11 @@ apiVersion: v1 kind: Secret metadata: name: amanuensis-secret + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-weight": "-1" + labels: + app: gen3-created-by-hook type: Opaque data: {{ (.Files.Glob "amanuensis-secret/*").AsSecrets | indent 2 }} diff --git a/helm/amanuensis/templates/amanuensis-secrets.yaml b/helm/amanuensis/templates/amanuensis-secrets.yaml index dd1eb4719..8d15a44db 100644 --- a/helm/amanuensis/templates/amanuensis-secrets.yaml +++ b/helm/amanuensis/templates/amanuensis-secrets.yaml @@ -2,11 +2,17 @@ apiVersion: v1 kind: ServiceAccount metadata: name: amanuensis-jobs + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-weight": "-3" --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: amanuensis-jobs-role + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-weight": "-3" rules: - apiGroups: [""] resources: ["secrets"] @@ -16,6 +22,9 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: amanuensis-jobs-role-binding + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-weight": "-3" roleRef: apiGroup: rbac.authorization.k8s.io kind: Role @@ -29,6 +38,11 @@ apiVersion: v1 kind: Secret metadata: name: amanuensis-config + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-weight": "-2" + labels: + app: gen3-created-by-hook data: {{- $existingSecret := (lookup "v1" "Secret" .Release.Namespace (printf "amanuensis-config")) }} {{- if and $existingSecret $existingSecret.data (hasKey $existingSecret.data "amanuensis-config.yaml") }} @@ -41,6 +55,11 @@ apiVersion: v1 kind: Secret metadata: name: amanuensis-creds + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-weight": "-2" + labels: + app: gen3-created-by-hook data: {{- $existingSecret := (lookup "v1" "Secret" .Release.Namespace (printf "amanuensis-creds")) }} {{- if and $existingSecret $existingSecret.data (hasKey $existingSecret.data "creds.json") }} @@ -63,7 +82,13 @@ data: apiVersion: batch/v1 kind: Job metadata: - name: amanuensis-secrets-{{ .Release.Revision }} + name: amanuensis-secrets + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-weight": "-1" + "helm.sh/hook-delete-policy": before-hook-creation + labels: + app: gen3-created-by-hook spec: backoffLimit: 0 template: diff --git a/helm/amanuensis/templates/amanuensis-validate-filter-sets-job.yaml b/helm/amanuensis/templates/amanuensis-validate-filter-sets-job.yaml index 7d2766f53..8508205f8 100644 --- a/helm/amanuensis/templates/amanuensis-validate-filter-sets-job.yaml +++ b/helm/amanuensis/templates/amanuensis-validate-filter-sets-job.yaml @@ -4,6 +4,12 @@ metadata: name: amanuensis-validate-filter-sets labels: redeploy-hash: "{{ .Release.Revision }}" + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-weight": "1" + "helm.sh/hook-delete-policy": before-hook-creation + labels: + app: gen3-created-by-hook spec: # Job spec starts here directly (no schedule or jobTemplate needed) template: @@ -16,6 +22,10 @@ spec: - name: config-volume secret: secretName: "amanuensis-config" + items: + - key: amanuensis-config.yaml + path: amanuensis-config.yaml + optional: false - name: es-dd-config-volume emptyDir: {} - name: portal-config diff --git a/helm/common/templates/_db_setup_job.tpl b/helm/common/templates/_db_setup_job.tpl index 872dcc565..4239c33b8 100644 --- a/helm/common/templates/_db_setup_job.tpl +++ b/helm/common/templates/_db_setup_job.tpl @@ -174,6 +174,11 @@ apiVersion: v1 kind: Secret metadata: name: {{ $.Chart.Name }}-dbcreds + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-weight": "-5" + labels: + app: gen3-created-by-hook data: {{- $existingSecret := (lookup "v1" "Secret" .Release.Namespace (printf "%s-dbcreds" .Chart.Name)) }} {{- if $existingSecret }} diff --git a/helm/fence/values.yaml b/helm/fence/values.yaml index 9ffb15790..458d3d7bf 100644 --- a/helm/fence/values.yaml +++ b/helm/fence/values.yaml @@ -428,7 +428,7 @@ volumes: - name: config-volume-public configMap: name: "manifest-fence" - optional: true + optional: true # -- (list) Volumes to mount to the container. volumeMounts: diff --git a/helm/gen3/templates/cleanup-helm-hooks-job.yaml b/helm/gen3/templates/cleanup-helm-hooks-job.yaml new file mode 100644 index 000000000..1cd13b47e --- /dev/null +++ b/helm/gen3/templates/cleanup-helm-hooks-job.yaml @@ -0,0 +1,87 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "gen3.fullname" . }}-cleanup + namespace: {{ .Release.Namespace }} + labels: + {{- include "gen3.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": pre-delete + "helm.sh/hook-weight": "-20" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded +automountServiceAccountToken: true + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "gen3.fullname" . }}-cleanup-role + namespace: {{ .Release.Namespace }} + labels: + {{- include "gen3.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": pre-delete + "helm.sh/hook-weight": "-20" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded +rules: +- apiGroups: ["batch"] + resources: ["jobs"] + verbs: ["get", "list", "delete"] +- apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "delete"] +- apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "delete"] + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "gen3.fullname" . }}-cleanup-rolebinding + namespace: {{ .Release.Namespace }} + labels: + {{- include "gen3.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": pre-delete + "helm.sh/hook-weight": "-20" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded +subjects: +- kind: ServiceAccount + name: {{ include "gen3.fullname" . }}-cleanup + namespace: {{ .Release.Namespace }} +roleRef: + kind: Role + name: {{ include "gen3.fullname" . }}-cleanup-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "gen3.fullname" . }}-cleanup-{{ randAlphaNum 8 | lower }} + annotations: + "helm.sh/hook": pre-delete + "helm.sh/hook-weight": "-10" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded +spec: + ttlSecondsAfterFinished: 60 + template: + spec: + restartPolicy: Never + serviceAccountName: {{ include "gen3.fullname" . }}-cleanup + containers: + - name: cleanup + image: bitnami/kubectl:latest + command: + - /bin/bash + - -c + - | + echo "Cleaning up hook resources for release: {{ .Release.Name }}" + + # Clean up jobs created by hooks + kubectl delete jobs -l app=gen3-created-by-hook + + # Clean up secrets created by hooks (if any) + kubectl delete secrets -l app=gen3-created-by-hook + echo "Cleanup completed" \ No newline at end of file diff --git a/pcdc-default-values.yaml b/pcdc-default-values.yaml index dacb427e9..894350554 100644 --- a/pcdc-default-values.yaml +++ b/pcdc-default-values.yaml @@ -63,7 +63,7 @@ amanuensis: enabled: true image: repository: "quay.io/pcdc/amanuensis" - tag: "2.24.0" + tag: "2.26.2" pullPolicy: IfNotPresent fence: @@ -335,7 +335,7 @@ portal: revproxy: image: repository: quay.io/cdis/nginx - tag: "1.17.6-ctds-1.0.1" + tag: "2025.07" sheepdog: image: diff --git a/tools/roll.sh b/tools/roll.sh index 36586b6d4..413245564 100755 --- a/tools/roll.sh +++ b/tools/roll.sh @@ -80,4 +80,4 @@ else fi # Run helm upgrade --install command -helm upgrade --install $project . -f ../../values.yaml \ No newline at end of file +helm upgrade --install $project . -f ../../values.yaml --timeout 15m0s \ No newline at end of file From bf64cbcd53983d2e49d5c943795351ad0f7a7093 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Fri, 18 Jul 2025 14:17:13 -0700 Subject: [PATCH 080/126] Update cleanup job and pcdcanalysistools Helm chart Expanded cleanup job permissions and logic to include cronjobs in addition to jobs. Updated pcdcanalysistools dependency on 'common' chart to version 0.1.20 and removed unused Flask-related environment variables from deployment template. --- helm/gen3/templates/cleanup-helm-hooks-job.yaml | 7 +++++-- helm/pcdcanalysistools/Chart.yaml | 2 +- helm/pcdcanalysistools/templates/deployment.yaml | 6 ------ 3 files changed, 6 insertions(+), 9 deletions(-) diff --git a/helm/gen3/templates/cleanup-helm-hooks-job.yaml b/helm/gen3/templates/cleanup-helm-hooks-job.yaml index 1cd13b47e..476dd6f16 100644 --- a/helm/gen3/templates/cleanup-helm-hooks-job.yaml +++ b/helm/gen3/templates/cleanup-helm-hooks-job.yaml @@ -26,7 +26,7 @@ metadata: "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded rules: - apiGroups: ["batch"] - resources: ["jobs"] + resources: ["jobs", "cronjobs"] verbs: ["get", "list", "delete"] - apiGroups: [""] resources: ["pods"] @@ -80,8 +80,11 @@ spec: echo "Cleaning up hook resources for release: {{ .Release.Name }}" # Clean up jobs created by hooks - kubectl delete jobs -l app=gen3-created-by-hook + kubectl delete jobs -l app=gen3-created-by-hook + # Clean up cronjobs created by hooks + kubectl delete cronjobs -l app=gen3-created-by-hook + # Clean up secrets created by hooks (if any) kubectl delete secrets -l app=gen3-created-by-hook echo "Cleanup completed" \ No newline at end of file diff --git a/helm/pcdcanalysistools/Chart.yaml b/helm/pcdcanalysistools/Chart.yaml index 4d464aecb..832342e78 100644 --- a/helm/pcdcanalysistools/Chart.yaml +++ b/helm/pcdcanalysistools/Chart.yaml @@ -25,5 +25,5 @@ appVersion: "1.16.0" dependencies: - name: common - version: 0.1.16 + version: 0.1.20 repository: file://../common diff --git a/helm/pcdcanalysistools/templates/deployment.yaml b/helm/pcdcanalysistools/templates/deployment.yaml index 22b2cc119..0c3937ac2 100644 --- a/helm/pcdcanalysistools/templates/deployment.yaml +++ b/helm/pcdcanalysistools/templates/deployment.yaml @@ -131,8 +131,6 @@ spec: name: manifest-global key: fence_url optional: true - - name: FLASK_SECRET_KEY - value: "TODO: FIX THIS!!!" - name: ARBORIST_URL value: http://arborist-service - name: AUTH_NAMESPACE @@ -141,10 +139,6 @@ spec: value: /etc/ssl/certs/ca-certificates.crt - name: GEN3_DEBUG value: "False" - - name: FLASK_ENV - value: development - - name: FLASK_APP - value: PcdcAnalysisTools.wsgi volumeMounts: - name: "config-volume" readOnly: true From ff424ae5ecad3bf489a71c54297b230cf8b11739 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Mon, 28 Jul 2025 15:06:10 -0700 Subject: [PATCH 081/126] Add TABLE_ONE config and update default values Introduced a new TABLE_ONE configuration section in settings.py for consortium and excluded variables. Updated pcdc-default-values.yaml with new global access level settings, image tags, and formatting improvements for consistency. --- .../pcdcanalysistools-secret/settings.py | 22 +++++++ pcdc-default-values.yaml | 61 ++++++++++--------- 2 files changed, 53 insertions(+), 30 deletions(-) diff --git a/helm/pcdcanalysistools/pcdcanalysistools-secret/settings.py b/helm/pcdcanalysistools/pcdcanalysistools-secret/settings.py index 5518e99dc..75efccd80 100644 --- a/helm/pcdcanalysistools/pcdcanalysistools-secret/settings.py +++ b/helm/pcdcanalysistools/pcdcanalysistools-secret/settings.py @@ -111,6 +111,28 @@ } } +config['TABLE_ONE'] = { + 'consortium': ["INSTRuCT", "INRG", "MaGIC", "NODAL"], + 'excluded_variables': [ + { + 'label': 'Data Contributor', + 'field': 'data_contributor_id', + }, + { + 'label': 'Study', + 'field': 'studies.study_id', + }, + { + 'label': 'Treatment Arm', + 'field': 'studies.treatment_arm', + } + ], + + 'result': { + "enabled": True + } +} + config['EXTERNAL'] = { 'commons': [ { diff --git a/pcdc-default-values.yaml b/pcdc-default-values.yaml index 894350554..2c4b85627 100644 --- a/pcdc-default-values.yaml +++ b/pcdc-default-values.yaml @@ -4,6 +4,8 @@ global: portalApp: pcdc dictionaryUrl: https://pcdc-staging-dictionaries.s3.amazonaws.com/pcdc-schema-staging-20250114.json authz_entity_name: "subject" + tierAccessLevel: "granular" + tierAccessLimit: "5" tls: cert: | -----BEGIN CERTIFICATE----- @@ -55,7 +57,7 @@ global: -----END RSA PRIVATE KEY----- arborist: - image: + image: repository: quay.io/pcdc/arborist tag: "2025.01" @@ -66,17 +68,16 @@ amanuensis: tag: "2.26.2" pullPolicy: IfNotPresent -fence: +fence: FENCE_CONFIG: DEBUG: true MOCK_STORAGE: true #fill in #AMANUENSIS_PUBLIC_KEY_PATH: '/fence/keys/key/jwt_public_key.pem' MOCK_GOOGLE_AUTH: true - mock_default_user: 'test@example.com' + mock_default_user: "test@example.com" #LOGIN_REDIRECT_WHITELIST: ["https://localhost:9443/", "http://localhost:9443/"] - - + image: repository: "quay.io/pcdc/fence" tag: "helm-test" @@ -186,7 +187,7 @@ fence: - /programs - /programs/pcdc - + roles: - id: 'file_uploader' description: 'can upload data files' @@ -294,13 +295,13 @@ fence: - privacy_policy - login_no_access - sower - guppy: enabled: true image: - repository: quay.io/pcdc/guppy - tag: 1.9.1 + repository: "guppy" + tag: "test" + pullPolicy: "Never" authFilterField: "auth_resource_path" manifestservice: @@ -313,7 +314,7 @@ pcdcanalysistools: enabled: true image: repository: quay.io/pcdc/pcdcanalysistools - tag: "1.8.9" + tag: "1.10.0" peregrine: image: @@ -322,14 +323,14 @@ peregrine: portal: #enabled: false - image: + image: repository: "quay.io/pcdc/windmill" - tag: "1.36.1" + tag: "1.41.0" pullPolicy: IfNotPresent resources: requests: cpu: 1.0 - gitops: + gitops: json: "" revproxy: @@ -359,20 +360,20 @@ sower: image: quay.io/pcdc/pelican:1.3.3_export pull_policy: Always env: - - name: DICTIONARY_URL - valueFrom: - configMapKeyRef: - name: manifest-global - key: dictionary_url - - name: GEN3_HOSTNAME - valueFrom: - configMapKeyRef: - name: manifest-global - key: hostname - - name: ROOT_NODE - value: subject - - name: OUTPUT_FILE_FORMAT - value: ZIP + - name: DICTIONARY_URL + valueFrom: + configMapKeyRef: + name: manifest-global + key: dictionary_url + - name: GEN3_HOSTNAME + valueFrom: + configMapKeyRef: + name: manifest-global + key: hostname + - name: ROOT_NODE + value: subject + - name: OUTPUT_FILE_FORMAT + value: ZIP volumeMounts: - name: pelican-creds-volume readOnly: true @@ -382,7 +383,7 @@ sower: readOnly: true mountPath: "/peregrine-creds.json" subPath: creds.json - cpu-limit: '1' + cpu-limit: "1" memory-limit: 2Gi volumes: - name: pelican-creds-volume @@ -414,7 +415,7 @@ elasticsearch: esConfig: elasticsearch.yml: | # Here we can add elasticsearch config - + resources: requests: cpu: 0.5 @@ -462,4 +463,4 @@ gearbox-middleware: enabled: false cohort-middleware: - enabled: false \ No newline at end of file + enabled: false From e626c29a62e03b1d87abe65cf1bb43f1d459be7e Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Wed, 30 Jul 2025 15:35:12 -0700 Subject: [PATCH 082/126] Refactor secret handling and update Helm chart configs Removed in-chart secret generation and patching for gearbox and sheepdog, shifting secret management outside the Helm charts. Updated environment variable handling and volume mounts to use externally managed secrets. Upgraded common chart dependency, improved sheepdog settings.py for better config sourcing, and made image/tag/pullPolicy values more explicit for several services. Also removed unused nginx server block and improved .gitignore formatting. --- .gitignore | 15 ++- gearbox-default-values.yaml | 38 ++++---- helm/gearbox/Chart.yaml | 14 +-- .../templates/create-gearbox-config.yaml | 77 --------------- helm/gearbox/templates/gearbox-secret.yaml | 26 ----- helm/gearbox/values.yaml | 94 +++++++++++-------- helm/revproxy/nginx/nginx.conf | 15 --- helm/sheepdog/sheepdog-secret/settings.py | 60 +++++++----- helm/sheepdog/templates/deployment.yaml | 13 ++- helm/sheepdog/templates/sheepdog-creds.yaml | 19 ---- helm/sheepdog/values.yaml | 8 +- helm/wts/templates/wts-oidc.yaml | 5 +- helm/wts/values.yaml | 9 ++ pcdc-default-values.yaml | 9 +- tools/roll.sh | 3 - 15 files changed, 155 insertions(+), 250 deletions(-) delete mode 100644 helm/gearbox/templates/create-gearbox-config.yaml delete mode 100644 helm/gearbox/templates/gearbox-secret.yaml delete mode 100644 helm/sheepdog/templates/sheepdog-creds.yaml diff --git a/.gitignore b/.gitignore index 4b362378a..803d8c1e1 100644 --- a/.gitignore +++ b/.gitignore @@ -8,11 +8,16 @@ _sample-*/ # --- Files --- Chart.lock -.DS_Store # macOS system file (usually ignored) -secret-values.yaml # Helm secrets/values file -.env # Environment variables -credentials.json # Instance generated service account or credentials +# macOS system file (usually ignored) +.DS_Store +# Helm secrets/values file +secret-values.yaml +# Environment variables +.env +# Instance generated service account or credentials +credentials.json temp.yaml -/values.yaml # Main Helm values file +# Main Helm values file +/values.yaml postgres.txt pcdc_data/external/external_reference.json # External reference data file for PCDC diff --git a/gearbox-default-values.yaml b/gearbox-default-values.yaml index ae39b0da3..3d129ac02 100644 --- a/gearbox-default-values.yaml +++ b/gearbox-default-values.yaml @@ -55,18 +55,18 @@ global: -----END RSA PRIVATE KEY----- arborist: - image: + image: repository: quay.io/cdis/arborist tag: 2024.03 -fence: +fence: FENCE_CONFIG: DEBUG: true MOCK_STORAGE: true #fill in - AMANUENSIS_PUBLIC_KEY_PATH: '/fence/keys/key/jwt_public_key.pem' + AMANUENSIS_PUBLIC_KEY_PATH: "/fence/keys/key/jwt_public_key.pem" MOCK_GOOGLE_AUTH: true - mock_default_user: 'test@example.com' + mock_default_user: "test@example.com" volumes: - name: old-config-volume secret: @@ -162,9 +162,9 @@ fence: readOnly: true mountPath: "/var/www/fence/fence-config-public.yaml" subPath: fence-config-public.yaml - + # -- (list) Volumes to mount to the init container. - + initVolumeMounts: - name: "config-volume" readOnly: true @@ -187,7 +187,6 @@ fence: mountPath: "/var/www/fence/fence_google_storage_creds_secret.json" subPath: fence_google_storage_creds_secret.json - podSecurityContext: runAsNonRoot: true runAsUser: 1000 @@ -308,7 +307,7 @@ fence: - /programs - /programs/pcdc - + roles: - id: 'gearbox_user' permissions: @@ -424,21 +423,20 @@ fence: - login_no_access - sower - portal: - enabled: true - image: + enabled: false + image: repository: quay.io/pcdc/gearbox_fe tag: "dev" resources: requests: cpu: 1.0 - gitops: + gitops: json: | { "s3_bucket": "https://test-compose-gearbox-data-bucket-with-versioning.s3.amazonaws.com" } - + gearboxS3Bucket: "https://test-compose-gearbox-data-bucket-with-versioning.s3.amazonaws.com" revproxy: @@ -450,12 +448,16 @@ revproxy: gearbox: enabled: true image: - repository: quay.io/pcdc/gearbox_be - tag: 1.3.0 - pullPolicy: Always + repository: gearbox-be + tag: "GEAR-488" + pullPolicy: Never + podSecurityContext: + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 gearbox-middleware: - enabled: true + enabled: false image: repository: quay.io/pcdc/gearbox-middleware tag: "helm-test" @@ -521,4 +523,4 @@ hatchery: enabled: false cohort-middleware: - enabled: false \ No newline at end of file + enabled: false diff --git a/helm/gearbox/Chart.yaml b/helm/gearbox/Chart.yaml index 379415e32..f4e1af41b 100644 --- a/helm/gearbox/Chart.yaml +++ b/helm/gearbox/Chart.yaml @@ -24,10 +24,10 @@ version: 0.1.0 appVersion: "1.16.0" dependencies: -- name: common - version: 0.1.11 - repository: file://../common -- name: postgresql - version: 11.9.13 - repository: "https://charts.bitnami.com/bitnami" - condition: postgres.separate \ No newline at end of file + - name: common + version: 0.1.20 + repository: file://../common + - name: postgresql + version: 11.9.13 + repository: "https://charts.bitnami.com/bitnami" + condition: postgres.separate diff --git a/helm/gearbox/templates/create-gearbox-config.yaml b/helm/gearbox/templates/create-gearbox-config.yaml deleted file mode 100644 index 776b03718..000000000 --- a/helm/gearbox/templates/create-gearbox-config.yaml +++ /dev/null @@ -1,77 +0,0 @@ -# DB Setup ServiceAccount -# Needs to update/ create secrets to signal that db is ready for use. -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ .Chart.Name }}-secret-patch-sa ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: {{ .Chart.Name }}-secret-patch-role -rules: -- apiGroups: [""] - resources: ["secrets"] - verbs: ["*"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: {{ .Chart.Name }}-secret-patch-rolebinding -subjects: -- kind: ServiceAccount - name: {{ .Chart.Name }}-secret-patch-sa - namespace: {{ .Release.Namespace }} -roleRef: - kind: Role - name: {{ .Chart.Name }}-secret-patch-role - apiGroup: rbac.authorization.k8s.io ---- -{{- $existingSecret := lookup "v1" "Secret" .Release.Namespace "gearbox-g3auto" }} -{{- $shouldRunJob := true }} -{{- if and $existingSecret (index $existingSecret.data "secretready") }} - {{- $shouldRunJob = false }} -{{- end }} - -{{- if $shouldRunJob }} -apiVersion: batch/v1 -kind: Job -metadata: - name: gearbox-g3auto-patch-{{ .Release.Revision }} -spec: - template: - metadata: - labels: - app: gen3job - spec: - serviceAccountName: {{ .Chart.Name }}-secret-patch-sa - containers: - - name: gearbox-g3auto-patch - image: bitnami/kubectl:latest - command: ["/bin/sh", "-c"] - args: - - | - while true; do - if kubectl get secret gearbox-dbcreds; then - echo "gearbox-dbcreds secret found" - password=$(kubectl get secret gearbox-dbcreds -o jsonpath="{.data.password}" | base64 --decode) - echo "Retrieved password from gearbox-dbcreds" - gearbox_env=$(kubectl get secret gearbox-g3auto -o jsonpath="{.data.gearbox\.env}" | base64 --decode) - echo "Current gearbox.env content: $gearbox_env" - updated_gearbox_env=$(echo "${gearbox_env}\nDB_PASSWORD=${password}") - echo "Updated gearbox.env content: $updated_gearbox_env" - encoded_gearbox_env=$(echo -n "$updated_gearbox_env" | base64 -w 0) - kubectl patch secret gearbox-g3auto -p "{\"data\":{\"gearbox.env\":\"${encoded_gearbox_env}\"}}" - echo "Patched gearbox-g3auto with updated gearbox.env" - kubectl patch secret gearbox-g3auto -p '{"data":{"secretready":"dHJ1ZQo="}}' - echo "Patched gearbox-g3auto with secretready" - break - else - echo "Waiting for gearbox-dbcreds secret to be created" - sleep 5 - fi - done - restartPolicy: Never -{{- end }} - - diff --git a/helm/gearbox/templates/gearbox-secret.yaml b/helm/gearbox/templates/gearbox-secret.yaml deleted file mode 100644 index be60699e1..000000000 --- a/helm/gearbox/templates/gearbox-secret.yaml +++ /dev/null @@ -1,26 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: gearbox-g3auto -type: Opaque -stringData: - {{- $existingSecret := lookup "v1" "Secret" .Release.Namespace "gearbox-g3auto" }} - {{- $randomPass := printf "%s%s" "gateway:" (randAlphaNum 32) }} - base64Authz.txt: {{ if and $existingSecret (index $existingSecret.data "base64Authz.txt") }}{{ index $existingSecret.data "base64Authz.txt" | b64dec | quote }}{{ else }}{{ $randomPass | quote | b64enc }}{{ end }} - gearbox.env: | - DEBUG=0 - FORCE_ISSUER=True - USER_API="http://fence-service/" - ALLOWED_ISSUERS="http://fence-service/,https://localhost/user" - DUMMY_S3=True - DB_DATABASE={{ if and $existingSecret (index $existingSecret.data "gearbox.env") }}{{ index $existingSecret.data "gearbox.env" | b64dec | regexFind "DB_DATABASE=(.*)" | quote }}{{ else }}{{ ( $.Values.postgres.database | default (printf "%s_%s" $.Chart.Name $.Release.Name) ) }}{{ end }} - DB_HOST={{ if and $existingSecret (index $existingSecret.data "gearbox.env") }}{{ index $existingSecret.data "gearbox.env" | b64dec | regexFind "DB_HOST=(.*)" | quote }}{{ else }}{{ (printf "%s-%s" $.Release.Name "postgresql" ) }}{{ end }} - DB_USER={{ if and $existingSecret (index $existingSecret.data "gearbox.env") }}{{ index $existingSecret.data "gearbox.env" | b64dec | regexFind "DB_USER=(.*)" | quote }}{{ else }}{{ ( $.Values.postgres.username | default (printf "%s_%s" $.Chart.Name $.Release.Name) ) }}{{ end }} - ADMIN_LOGINS={{ if and $existingSecret (index $existingSecret.data "gearbox.env") }}{{ index $existingSecret.data "gearbox.env" | b64dec | regexFind "ADMIN_LOGINS=(.*)" | quote }}{{ else }}{{ $randomPass }}{{ end }} - ENABLE_PHI=True - {{- if and $existingSecret (index $existingSecret.data "gearbox.env") (index $existingSecret.data "gearbox.env" | b64dec | regexFind "DB_PASSWORD=(.*)") }} - DB_PASSWORD={{ index $existingSecret.data "gearbox.env" | b64dec | regexFind "DB_PASSWORD=(.*)" | quote }} - {{- end }} - {{- if and $existingSecret (index $existingSecret.data "secretReady") }} - secretReady: {{ index $existingSecret.data "secretReady" | b64dec | quote }} - {{- end }} \ No newline at end of file diff --git a/helm/gearbox/values.yaml b/helm/gearbox/values.yaml index 6dc91e2fb..d0a4d1399 100644 --- a/helm/gearbox/values.yaml +++ b/helm/gearbox/values.yaml @@ -99,10 +99,12 @@ serviceAccount: podAnnotations: {} -podSecurityContext: {} +podSecurityContext: + {} # fsGroup: 2000 -securityContext: {} +securityContext: + {} # capabilities: # drop: # - ALL @@ -114,7 +116,7 @@ service: type: ClusterIP port: 80 -resources: +resources: requests: cpu: 0.4 memory: 512Mi @@ -142,11 +144,55 @@ env: value: http://esproxy-service:9200 - name: AWS_REGION value: "us-east-1" - - name: GB_SECRET_READY + - name: DEBUG + value: "0" + - name: FORCE_ISSUER + value: "True" + - name: USER_API + value: "http://fence-service/" + - name: ALLOWED_ISSUERS + value: "http://fence-service/,https://localhost/user" + - name: DUMMY_S3 + value: "True" + - name: ENABLE_PHI + value: "True" + - name: TESTING + value: "False" + - name: DB_DATABASE + valueFrom: + secretKeyRef: + name: gearbox-dbcreds + key: database + optional: false + - name: DB_HOST + valueFrom: + secretKeyRef: + name: gearbox-dbcreds + key: host + optional: false + - name: DB_USER + valueFrom: + secretKeyRef: + name: gearbox-dbcreds + key: username + optional: false + - name: ADMIN_LOGINS valueFrom: secretKeyRef: name: gearbox-g3auto - key: secretready + key: base64Authz.txt + optional: true + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: gearbox-dbcreds + key: password + optional: false + - name: DBREADY + valueFrom: + secretKeyRef: + name: gearbox-dbcreds + key: dbcreated optional: false volumes: @@ -156,47 +202,13 @@ volumes: items: - key: jwt_public_key.pem path: jwt_public_key.pem - - - name: config-volume-g3auto - secret: - secretName: gearbox-g3auto - # This volume may or may not be needed or available. See kube-setup-gearbox.sh and note that this - # is only available if a /gearbox directory exists. - - name: config-volume - secret: - secretName: gearbox-config - optional: true - # This volume may or may not be needed or available. See kube-setup-gearbox.sh and note that this - # may not exist if the commons does not have any gearbox manifest configuration. - - name: config-manifest - configMap: - name: manifest-gearbox optional: true - + volumeMounts: - name: "gearbox-middleware-jwt-keys" readOnly: true mountPath: "/src/src/gearbox/keys/jwt_public_key.pem" subPath: jwt_public_key.pem - - name: config-volume-g3auto - readOnly: true - mountPath: /src/.env - subPath: gearbox.env - - name: config-volume - readOnly: true - mountPath: /aggregate_config.json - subPath: aggregate_config.json - - name: config-manifest - readOnly: true - mountPath: /gearbox.json - subPath: json - -initVolumeMounts: - - name: config-volume-g3auto - readOnly: true - mountPath: /src/.env - subPath: gearbox.env - # Values to determine the labels that are used for the deployment, pod, etc. # -- (string) Valid options are "production" or "dev". If invalid option is set- the value will default to "dev". @@ -216,4 +228,4 @@ datadogLogsInjection: true # -- (bool) If enabled, the Datadog Agent will collect profiling data for your application using the Continuous Profiler. This data can be used to identify performance bottlenecks and optimize your application. datadogProfilingEnabled: true # -- (int) A value between 0 and 1, that represents the percentage of requests that will be traced. For example, a value of 0.5 means that 50% of requests will be traced. -datadogTraceSampleRate: 1 \ No newline at end of file +datadogTraceSampleRate: 1 diff --git a/helm/revproxy/nginx/nginx.conf b/helm/revproxy/nginx/nginx.conf index 237a895d8..e3d422b56 100644 --- a/helm/revproxy/nginx/nginx.conf +++ b/helm/revproxy/nginx/nginx.conf @@ -173,21 +173,6 @@ map $http_user_agent $loggable { } } - server { - listen 9200; - - location / { - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - - set $proxy_service "gen3-elasticsearch-master"; - set $upstream "http://gen3-elasticsearch-master.$namespace.svc.cluster.local"; # Updated this line - proxy_pass $upstream; - } - } - server { listen 80; diff --git a/helm/sheepdog/sheepdog-secret/settings.py b/helm/sheepdog/sheepdog-secret/settings.py index 4d1465429..9f7564af3 100644 --- a/helm/sheepdog/sheepdog-secret/settings.py +++ b/helm/sheepdog/sheepdog-secret/settings.py @@ -1,6 +1,7 @@ from sheepdog.api import app, app_init from os import environ -import confighelper +import os +import bin.confighelper as confighelper APP_NAME = "sheepdog" @@ -12,45 +13,58 @@ def load_json(file_name): conf_data = load_json("creds.json") config = app.config -config["AUTH"] = "https://auth.service.consul:5000/v3/" -config["AUTH_ADMIN_CREDS"] = None -config["INTERNAL_AUTH"] = None - # ARBORIST deprecated, replaced by ARBORIST_URL # ARBORIST_URL is initialized in app_init() directly config["ARBORIST"] = "http://arborist-service/" -# Signpost: deprecated, replaced by index client. -config["SIGNPOST"] = { - "host": environ.get("SIGNPOST_HOST") or "http://indexd-service", - "version": "v0", - "auth": ("gdcapi", conf_data.get("indexd_password", "{{indexd_password}}")), -} config["INDEX_CLIENT"] = { - "host": environ.get("INDEX_CLIENT_HOST") or "http://indexd-service", + "host": os.environ.get("INDEX_CLIENT_HOST") or "http://indexd-service", "version": "v0", - "auth": ("gdcapi", conf_data.get("indexd_password", "{{indexd_password}}")), + # The user should be "sheepdog", but for legacy reasons, we use "gdcapi" instead + "auth": ( + ( + environ.get("INDEXD_USER", "gdcapi"), + environ.get("INDEXD_PASS") + or conf_data.get("indexd_password", "{{indexd_password}}"), + ) + ), } -config["FAKE_AUTH"] = False + config["PSQLGRAPH"] = { - "host": environ.get("PGHOST"), - "user": environ.get("PGUSER"), - "password": environ.get("PGPASSWORD"), - "database": environ.get("PGDB"), + "host": conf_data.get("db_host", os.environ.get("PGHOST", "localhost")), + "user": conf_data.get("db_username", os.environ.get("PGUSER", "sheepdog")), + "password": conf_data.get("db_password", os.environ.get("PGPASSWORD", "sheepdog")), + "database": conf_data.get("db_database", os.environ.get("PGDB", "sheepdog")), } config["FLASK_SECRET_KEY"] = conf_data.get("gdcapi_secret_key", "{{gdcapi_secret_key}}") -config['PSQL_USER_DB_CONNECTION'] = 'postgresql://%s:%s@%s:5432/%s' % (environ.get("FENCE_DB_USER"), environ.get("FENCE_DB_PASS"), environ.get("FENCE_DB_HOST"), environ.get("FENCE_DB_DBNAME")) +fence_username = conf_data.get( + "fence_username", os.environ.get("FENCE_DB_USER", "fence") +) +fence_password = conf_data.get( + "fence_password", os.environ.get("FENCE_DB_PASS", "fence") +) +fence_host = conf_data.get("fence_host", os.environ.get("FENCE_DB_HOST", "localhost")) +fence_database = conf_data.get( + "fence_database", os.environ.get("FENCE_DB_DATABASE", "fence") +) +config["PSQL_USER_DB_CONNECTION"] = "postgresql://%s:%s@%s:5432/%s" % ( + fence_username, + fence_password, + fence_host, + fence_database, +) -config["BASE_URL"] = "https://%s/user" % conf_data["hostname"] # for use by authutils remove when authutils gets updated -config["USER_API"] = "http://fence-service/" # for use by authutils og: "https://%s/user" % conf_data["hostname"] +config["USER_API"] = "https://%s/user" % conf_data.get( + "hostname", os.environ.get("CONF_HOSTNAME", "localhost") +) # for use by authutils # use the USER_API URL instead of the public issuer URL to accquire JWT keys config["FORCE_ISSUER"] = True -config["DICTIONARY_URL"] = environ.get( +config["DICTIONARY_URL"] = os.environ.get( "DICTIONARY_URL", "https://s3.amazonaws.com/dictionary-artifacts/datadictionary/develop/schema.json", ) app_init(app) application = app -application.debug = environ.get("GEN3_DEBUG") == "True" \ No newline at end of file +application.debug = os.environ.get("GEN3_DEBUG") == "True" \ No newline at end of file diff --git a/helm/sheepdog/templates/deployment.yaml b/helm/sheepdog/templates/deployment.yaml index 294c70678..0831c0ee0 100644 --- a/helm/sheepdog/templates/deployment.yaml +++ b/helm/sheepdog/templates/deployment.yaml @@ -40,12 +40,9 @@ spec: {{- end }} automountServiceAccountToken: {{ .Values.automountServiceAccountToken }} volumes: - - name: sheepdog-volume - secret: - secretName: "sheepdog-creds" - name: config-volume secret: - secretName: sheepdog-secret + secretName: "sheepdog-secret" - name: config-helper configMap: name: config-helper @@ -202,6 +199,12 @@ spec: name: manifest-global key: public_datasets optional: true + - name: INDEXD_PASS + valueFrom: + secretKeyRef: + name: indexd-service-creds + key: sheepdog + optional: true - name: AUTHZ_ENTITY_NAME valueFrom: configMapKeyRef: @@ -210,7 +213,7 @@ spec: - name: GEN3_UWSGI_TIMEOUT value: "600" - name: DICTIONARY_URL - value: {{ .Values.global.dictionaryUrl }} + value: {{ include "sheepdog.dictionaryUrl" .}} {{- with .Values.indexdUrl }} - name: INDEX_CLIENT_HOST value: {{ . }} diff --git a/helm/sheepdog/templates/sheepdog-creds.yaml b/helm/sheepdog/templates/sheepdog-creds.yaml deleted file mode 100644 index 736e6db56..000000000 --- a/helm/sheepdog/templates/sheepdog-creds.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: sheepdog-creds -type: Opaque -stringData: - creds.json: |- - { - "db_host": "{{ include "gen3.service-postgres" (dict "key" "host" "service" $.Chart.Name "context" $) }}", - "db_username": "{{include "gen3.service-postgres" (dict "key" "username" "service" $.Chart.Name "context" $) }}", - "db_password": "{{include "gen3.service-postgres" (dict "key" "password" "service" $.Chart.Name "context" $) }}", - "db_database": "{{ include "gen3.service-postgres" (dict "key" "database" "service" $.Chart.Name "context" $)}}", - "hostname": "{{ .Values.global.hostname }}", - "indexd_password": "", - "fence_username": "{{include "gen3.service-postgres" (dict "key" "username" "service" "fence" "context" $) }}", - "fence_password": "{{include "gen3.service-postgres" (dict "key" "password" "service" "fence" "context" $) }}", - "fence_host": "{{ include "gen3.service-postgres" (dict "key" "host" "service" "fence" "context" $) }}", - "fence_database": "{{ include "gen3.service-postgres" (dict "key" "database" "service" "fence" "context" $)}}" - } diff --git a/helm/sheepdog/values.yaml b/helm/sheepdog/values.yaml index 84fe044b9..6589fa91a 100644 --- a/helm/sheepdog/values.yaml +++ b/helm/sheepdog/values.yaml @@ -108,7 +108,7 @@ postgresql: releaseLabel: production # -- (map) Annotations to add to the pod -podAnnotations: { "gen3.io/network-ingress": "sheepdog" } +podAnnotations: {"gen3.io/network-ingress": "sheepdog"} # -- (map) Configuration for autoscaling the number of replicas autoscaling: @@ -193,10 +193,10 @@ volumeMounts: readOnly: true mountPath: "/var/www/sheepdog/settings.py" subPath: "settings.py" - - name: "sheepdog-volume" + - name: "config-volume" readOnly: true - mountPath: "/var/www/sheepdog/creds.json" - subPath: "creds.json" + mountPath: "sheepdog/bin/settings.py" + subPath: "settings.py" # -- (map) Resource requests and limits for the containers in the pod resources: diff --git a/helm/wts/templates/wts-oidc.yaml b/helm/wts/templates/wts-oidc.yaml index e6641b7a3..938ba902a 100644 --- a/helm/wts/templates/wts-oidc.yaml +++ b/helm/wts/templates/wts-oidc.yaml @@ -25,9 +25,8 @@ spec: args: ["while [ $(curl -sw '%{http_code}' http://fence-service -o /dev/null) -ne 200 ]; do sleep 5; echo 'Waiting for fence...'; done"] containers: - name: fence-client - # TODO: Make this configurable - image: "quay.io/pcdc/fence:helm-test" - imagePullPolicy: {{ .Values.image.pullPolicy }} + image: "{{ .Values.fenceImage.repository }}:{{ .Values.fenceImage.tag }}" + imagePullPolicy: {{ .Values.fenceImage.pullPolicy }} # TODO: ADD RESOURCES # resources: command: ["/bin/bash"] diff --git a/helm/wts/values.yaml b/helm/wts/values.yaml index 1e0b27b65..1dd9e14fc 100644 --- a/helm/wts/values.yaml +++ b/helm/wts/values.yaml @@ -259,3 +259,12 @@ partOf: "Authentication" selectorLabels: # -- (map) Will completely override the commonLabels defined in the common chart's _label_setup.tpl commonLabels: + +# -- (map) Fence docker image information. +fenceImage: + # -- (string) Docker repository. + repository: quay.io/pcdc/fence + # -- (string) Docker pull policy. + pullPolicy: Always + # -- (string) Overrides the image tag whose default is the chart appVersion. + tag: "master" diff --git a/pcdc-default-values.yaml b/pcdc-default-values.yaml index 2c4b85627..8574a2347 100644 --- a/pcdc-default-values.yaml +++ b/pcdc-default-values.yaml @@ -299,9 +299,9 @@ fence: guppy: enabled: true image: - repository: "guppy" - tag: "test" - pullPolicy: "Never" + repository: "quay.io/pcdc/guppy" + tag: "1.10.3" + pullPolicy: "IfNotPresent" authFilterField: "auth_resource_path" manifestservice: @@ -339,9 +339,10 @@ revproxy: tag: "2025.07" sheepdog: + dictionaryUrl: https://pcdc-staging-dictionaries.s3.amazonaws.com/pcdc-schema-staging-20250114.json image: repository: quay.io/pcdc/sheepdog - tag: "1.5.10" + tag: "1.7.2" sower: enabled: false diff --git a/tools/roll.sh b/tools/roll.sh index 413245564..f76a626bf 100755 --- a/tools/roll.sh +++ b/tools/roll.sh @@ -65,9 +65,6 @@ if [ $# -gt 0 ]; then # Delete the deployment corresponding to the service name kubectl delete deployment ${service_name}-deployment - if [ "$service_name" = "gearbox" ]; then - kubectl delete job gearbox-g3auto-patch - fi done fi From cc9e5c175cec0f4342ca1629746dd7aa243fd0cf Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Wed, 30 Jul 2025 15:42:02 -0700 Subject: [PATCH 083/126] Add fenceImage config and comment in nginx.conf Introduced a new fenceImage configuration under wts in pcdc-default-values.yaml to specify repository, tag, and pull policy. Added a clarifying comment for the /login location in nginx.conf, indicating its purpose for gearbox. --- helm/revproxy/nginx/nginx.conf | 2 +- pcdc-default-values.yaml | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/helm/revproxy/nginx/nginx.conf b/helm/revproxy/nginx/nginx.conf index e3d422b56..69c2b886d 100644 --- a/helm/revproxy/nginx/nginx.conf +++ b/helm/revproxy/nginx/nginx.conf @@ -176,7 +176,7 @@ map $http_user_agent $loggable { server { listen 80; - + # this is here for gearbox I believe location /login { try_files $uri /index.html; } diff --git a/pcdc-default-values.yaml b/pcdc-default-values.yaml index 8574a2347..0754df1e2 100644 --- a/pcdc-default-values.yaml +++ b/pcdc-default-values.yaml @@ -400,6 +400,10 @@ wts: image: repository: quay.io/cdis/workspace-token-service tag: 2025.01 + fenceImage: + repository: "quay.io/pcdc/fence" + tag: "helm-test" + pullPolicy: Always postgresql: primary: From 92abeb7a02731a356df6182bfc136adc1987ea64 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Wed, 30 Jul 2025 16:00:41 -0700 Subject: [PATCH 084/126] Update portal configuration for Disease Data Hub Replaces the PCDC-specific portal configuration with a new configuration for the Gen3 Disease Data Hub in helm/portal/defaults/gitops.json, including new subcommons, navigation, UI text, and explorer/dataset browser configs. Updates helm/portal/values.yaml to use a generic data commons configuration. Restores the original PCDC configuration in pcdc-default-values.yaml for backward compatibility. --- helm/portal/defaults/gitops.json | 881 ++++++++++--------------------- helm/portal/values.yaml | 280 +++++++++- pcdc-default-values.yaml | 668 ++++++++++++++++++++++- 3 files changed, 1229 insertions(+), 600 deletions(-) diff --git a/helm/portal/defaults/gitops.json b/helm/portal/defaults/gitops.json index 73fa42f9c..54408911c 100644 --- a/helm/portal/defaults/gitops.json +++ b/helm/portal/defaults/gitops.json @@ -1,666 +1,351 @@ { - "gaTrackingId": "undefined", + "subcommons": [ + { + "URL": "https://tb.diseasedatahub.org/", + "name": "TB" + }, + { + "URL": "https://aids.diseasedatahub.org/", + "name": "AIDS" + }, + { + "URL": "https://flu.diseasedatahub.org/", + "name": "FLU" + }, + { + "URL": "https://microbiome.diseasedatahub.org/", + "name": "Microbiome" + } + ], + "gaTrackingId": "UA-119127212-1", "graphql": { "boardCounts": [ - { - "graphql": "_person_count", - "name": "Person", - "plural": "Persons" - }, { "graphql": "_subject_count", "name": "Subject", "plural": "Subjects" + }, + { + "graphql": "_study_count", + "name": "Study", + "plural": "Studies" + }, + { + "graphql": "_summary_lab_result_count", + "name": "Lab record", + "plural": "Lab records" } ], "chartCounts": [ - { - "graphql": "_person_count", - "name": "Person" - }, { "graphql": "_subject_count", "name": "Subject" + }, + { + "graphql": "_study_count", + "name": "Study" } ], "projectDetails": "boardCounts" }, "components": { - "appName": "Pediatric Cancer Data Commons Portal", + "appName": "Gen3 Disease Data Hub", "index": { "introduction": { - "heading": "Pediatric Cancer Data Commons", - "text": "The Pediatric Cancer Data Commons supports the management, analysis and sharing of data for the research community.", - "link": "/submission" + "heading": "Gen3 Disease Data Hub Datasets", + "text": "The Gen3 Disease Data Hub hosts data related to infectious diseases and aims to make data findable, accessible, interoperable, and reusable (FAIR).", + "link": "/datasets" }, "buttons": [ { - "name": "Define Data Field", - "icon": "data-field-define", - "body": "The Pediatric Cancer Data Commons defines the data. Please study the dictionary before you start browsing.", - "link": "/DD", - "label": "Learn more" + "name": "TB Environment", + "icon": "data-explore", + "body": "Explore TB data.", + "external_link": "https://tb.diseasedatahub.org" }, { - "name": "Explore Data", + "name": "AIDS Environment", "icon": "data-explore", - "body": "The Exploration Page gives you insights and a clear overview under selected factors.", - "link": "/explorer", - "label": "Explore data" + "body": "Explore AIDS data.", + "external_link": "https://aids.diseasedatahub.org" + }, + { + "name": "Flu Environment", + "icon": "data-explore", + "body": "Explore influenza data.", + "external_link": "https://flu.diseasedatahub.org" + }, + { + "name": "Microbiome Environment", + "icon": "data-explore", + "body": "Explore data from a collection of open-access microbiome-related studies.", + "external_link": "https://microbiome.diseasedatahub.org" } - ], - "barChart": { - "showPercentage": true - } + ] }, "navigation": { "items": [ { - "icon": "dictionary", - "link": "/DD", + "icon": "query", + "link": "/datasets", "color": "#a2a2a2", - "name": "Dictionary" + "name": "Dataset Browser" }, { "icon": "exploration", "link": "/explorer", "color": "#a2a2a2", - "name": "Exploration" + "name": "Eco Explorer" } ] }, "topBar": { "items": [ { - "icon": "external-link", - "leftOrientation": true, - "link": "https://commons.cri.uchicago.edu/pcdc/", - "name": "About PCDC" - }, - { - "icon": "external-link", - "leftOrientation": true, - "link": "https://commons.cri.uchicago.edu/sponsors/", - "name": "Our Sponsors" + "link": "https://gen3.org/resources/user/", + "name": "Documentation" } ] }, "login": { - "title": "Pediatric Cancer Data Commons", - "subTitle": "Connect. Share. Cure.", - "text": "Welcome to the Pediatric Cancer Data Commons (PCDC), brought to you by Data for the Common Good (D4CG). Headquartered at the University of Chicago, D4CG works with international leaders to develop and apply uniform data standards that facilitate the collection, combination, and analysis of data from many different sources.\n\nThe PCDC harnesses pediatric, AYA, and adult cancer clinical data from around the world into a single unified platform, making it possible to explore and access data across multiple types of cancer. The PCDC Data Portal currently includes some of the world's largest sets of clinical data for pediatric neuroblastoma, soft tissue sarcoma, germ cell tumors, AML, and Hodgkin lymphoma, with the addition of more cancer types in progress.", + "title": "Gen3 Disease Data Hub", + "subTitle": "Cross Environment Datasets", + "text": "The website combines open access datasets from multiple disciplines to create clean, easy to navigate visualizations for data-driven discovery within the fields of allergy and infectious diseases.", "contact": "If you have any questions about access or the registration process, please contact ", - "email": "pcdc_help@lists.uchicago.edu" + "email": "support@gen3.org" }, "footerLogos": [ { - "src": "/src/img/gen3.png", + "src": "/custom/sponsors/gitops-sponsors/gen3.png", "href": "https://ctds.uchicago.edu/gen3", - "alt": "Gen3 Data Commons", - "height": 40 + "alt": "Gen3 Data Commons" }, { - "src": "/src/img/uchicago.png", - "href": "https://www.uchicago.edu/", - "alt": "The University of Chicago", - "height": 40 + "src": "/src/img/createdby.png", + "href": "https://ctds.uchicago.edu/", + "alt": "Center for Translational Data Science at the University of Chicago" } ] }, - "explorerConfig": [ - { - "id": 1, - "label": "data", - "charts": { - "sex": { - "chartType": "bar", - "title": "Sex" - }, - "race": { - "chartType": "bar", - "title": "Race" - }, - "ethnicity": { - "chartType": "bar", - "title": "Ethnicity" - }, - "consortium": { - "chartType": "bar", - "title": "Consortium" - } - }, - "filters": { - "anchor": { - "field": "disease_phase", - "options": ["Initial Diagnosis", "Relapse"], - "tabs": ["Disease", "Molecular", "Surgery", "Radiation", "Response", "SMN", "Imaging", "Labs", "SCT"] - }, - "tabs": [ - { - "title": "Subject", - "fields": [ - "consortium", - "data_contributor_id", - "studies.study_id", - "studies.treatment_arm", - "sex", - "race", - "ethnicity", - "year_at_disease_phase", - "survival_characteristics.lkss_obfuscated", - "censor_status", - "age_at_censor_status", - "medical_histories.medical_history", - "medical_histories.medical_history_status", - "external_references.external_resource_name", - "biospecimen_status" - ] - }, - { - "title": "Disease", - "fields": [ - "histologies.histology", - "histologies.histology_grade", - "histologies.histology_inpc", - "tumor_assessments.age_at_tumor_assessment", - "tumor_assessments.tumor_classification", - "tumor_assessments.tumor_site", - "tumor_assessments.tumor_state", - "tumor_assessments.longest_diam_dim1", - "tumor_assessments.depth", - "tumor_assessments.tumor_size", - "tumor_assessments.invasiveness", - "tumor_assessments.nodal_clinical", - "tumor_assessments.nodal_pathology", - "tumor_assessments.parameningeal_extension", - "tumor_assessments.necrosis", - "tumor_assessments.necrosis_pct", - "tumor_assessments.tumor_laterality", - "stagings.irs_group", - "stagings.tnm_finding", - "stagings.stage_system", - "stagings.stage", - "stagings.AB", - "stagings.E", - "stagings.S", - "disease_characteristics.mki", - "disease_characteristics.bulk_disease", - "disease_characteristics.BULK_MED_MASS", - "disease_characteristics.bulky_nodal_aggregate", - "disease_characteristics.who_aml", - "disease_characteristics.CNS_disease_status", - "disease_characteristics.MLDS" - ] - }, - { - "title": "Molecular", - "fields": [ - "molecular_analysis.anaplasia", - "molecular_analysis.anaplasia_extent", - "molecular_analysis.molecular_abnormality", - "molecular_analysis.molecular_abnormality_result", - "molecular_analysis.gene1", - "molecular_analysis.gene2", - "molecular_analysis.dna_index", - "molecular_analysis.age_at_molecular_analysis", - "molecular_analysis.mitoses", - "molecular_analysis.cytodifferentiation" - ] - }, - { - "title": "Surgery", - "fields": [ - "biopsy_surgical_procedures.tumor_classification", - "biopsy_surgical_procedures.procedure_type", - "biopsy_surgical_procedures.margins" - ] - }, - { - "title": "Radiation", - "fields": [ - "radiation_therapies.tumor_classification", - "radiation_therapies.energy_type", - "radiation_therapies.rt_dose" - ] - }, - { - "title": "Response", - "fields": [ - "subject_responses.tx_prior_response", - "subject_responses.response", - "subject_responses.interim_response", - "subject_responses.response_method", - "minimal_residual_diseases.mrd_result", - "minimal_residual_diseases.mrd_result_numeric" - ] - }, - { - "title": "SMN", - "fields": [ - "secondary_malignant_neoplasm.age_at_smn", - "secondary_malignant_neoplasm.smn_site", - "secondary_malignant_neoplasm.smn_type", - "secondary_malignant_neoplasm.smn_yn", - "secondary_malignant_neoplasm.smn_morph_icdo" - ] - }, - { - "title": "Imaging", - "fields": [ - "imagings.imaging_method", - "imagings.imaging_result" - ] - }, - { - "title": "Labs", - "fields": [ - "labs.lab_test", - "labs.lab_result", - "labs.lab_result_numeric", - "labs.lab_result_unit" - ] - }, - { - "title": "SCT", - "fields": [ - "stem_cell_transplants.sct_type", - "stem_cell_transplants.sct_source", - "stem_cell_transplants.sct_donor_relationship" - ] - } + "requiredCerts": [], + "featureFlags": { + "explorer": true, + "analysis": true + }, + "datasetBrowserConfig": { + "filterSections": [ + { + "title": "Supported Data Resources", + "options": [ + { "text": "TB", "filterType": "singleSelect"}, + { "text": "AIDS", "filterType": "singleSelect"}, + { "text": "Flu", "filterType": "singleSelect"}, + { "text": "Microbiome", "filterType": "singleSelect"} ] }, - "projectId": "search", - "graphqlField": "subject", - "index": "", - "buttons": [ - { - "enabled": true, - "type": "export-to-pfb", - "title": "Export to PFB", - "leftIcon": "datafile", - "rightIcon": "download" - }, - { - "enabled": false, - "type": "data", - "title": "Download Data", - "leftIcon": "user", - "rightIcon": "download", - "fileName": "data.json", - "tooltipText": "You can only download data accessible to you" - } - ], - "table": { - "enabled": false, - "fields": [ - "external_references.external_links", - "consortium", - "data_contributor_id", - "subject_submitter_id", - "sex", - "race", - "ethnicity", - "survival_characteristics.lkss", - "survival_characteristics.age_at_lkss" + { + "title": "Research Focus", + "options": [ + { "text": "AIDS", "filterType": "singleSelect"}, + { "text": "TB", "filterType": "singleSelect"}, + { "text": "Flu", "filterType": "singleSelect"}, + { "text": "Immune Response", "filterType": "singleSelect"}, + { "text": "Immune Phenotype", "filterType": "singleSelect"}, + { "text": "Allergy", "filterType": "singleSelect"}, + { "text": "Atopy", "filterType": "singleSelect"}, + { "text": "Infection Response", "filterType": "singleSelect"}, + { "text": "Vaccine Response", "filterType": "singleSelect"}, + { "text": "Transplantation", "filterType": "singleSelect"}, + { "text": "Oncology", "filterType": "singleSelect"}, + { "text": "Autoimmune", "filterType": "singleSelect"}, + { "text": "Preterm Birth", "filterType": "singleSelect"} ] + } + ], + "fieldMapping" : [ + { "field": "link", "name": "View" }, + { "field": "dataset_name", "name": "Study" }, + { "field": "supported_data_resource", "name": "Supported Data Resource" }, + { "field": "research_focus", "name": "Research Focus" }, + { "field": "description", "name": "Description of Dataset" } + ], + "filterConfig": { + "tabs": [{ + "title": "Filters", + "fields": ["supported_data_resource", "research_focus"] + }] + } + }, + "dataExplorerConfig": { + "charts": { + "project_id": { + "chartType": "count", + "title": "Projects" }, - "patientIds": { - "filter": false, - "export": true + "subject_id": { + "chartType": "count", + "title": "Subjects" }, - "survivalAnalysis": { - "result": { - "pval": false, - "risktable": true, - "survival": true - } + "dataset": { + "chartType": "pie", + "title": "Resources", + "chartRow": 0 + }, + "data_format": { + "chartType": "bar", + "title": "Data Format", + "chartRow": 0 + }, + "data_type": { + "chartType": "pie", + "title": "Data Type", + "chartRow": 0 + }, + "experimental_strategies": { + "chartType": "bar", + "title": "Experimental Strategies", + "chartRow": 0 }, - "guppyConfig": { - "dataType": "subject", - "nodeCountTitle": "Subjects", - "fieldMapping": [ - { - "field": "data_contributor_id", - "name": "Data Contributor", - "tooltip": "Survival analysis is not allowed for this variable. Filtering by this variable will disable survival analysis." - }, - { - "field": "studies.study_id", - "name": "Study Id", - "tooltip": "Survival analysis is not allowed for this variable. Filtering by this variable will disable survival analysis." - }, - { - "field": "studies.treatment_arm", - "name": "Treatment Arm", - "tooltip": "Survival analysis is not allowed for this variable. Filtering by this variable will disable survival analysis." - }, - { - "field": "year_at_disease_phase", - "name": "Year at Initial Diagnosis" - }, - { - "field": "survival_characteristics.lkss", - "name": "Last Known Survival Status (LKSS)" - }, - { - "field": "survival_characteristics.lkss_obfuscated", - "name": "Last Known Survival Status (LKSS)" - }, - { - "field": "medical_histories.medical_history", - "name": "Medical History" - }, - { - "field": "medical_histories.medical_history_status", - "name": "Medical History Status" - }, - { - "field": "external_references.external_resource_name", - "name": "External Resource Name" - }, - { - "field": "biospecimen_status", - "name": "Biospecimen" - }, - { - "field": "histologies.histology", - "name": "Histology" - }, - { - "field": "histologies.histology_grade", - "name": "Histology Grade" - }, - { - "field": "histologies.histology_inpc", - "name": "INPC Classification" - }, - { - "field": "tumor_assessments.age_at_tumor_assessment", - "name": "Age at Tumor Assessment" - }, - { - "field": "tumor_assessments.tumor_classification", - "name": "Tumor Classification" - }, - { - "field": "tumor_assessments.tumor_site", - "name": "Tumor Site" - }, - { - "field": "tumor_assessments.tumor_state", - "name": "Tumor State" - }, - { - "field": "tumor_assessments.longest_diam_dim1", - "name": "Longest Diameter Dimension 1" - }, - { - "field": "tumor_assessments.depth", - "name": "Tumor Depth" - }, - { - "field": "tumor_assessments.tumor_size", - "name": "Tumor Size" - }, - { - "field": "tumor_assessments.invasiveness", - "name": "Invasiveness" - }, - { - "field": "tumor_assessments.nodal_clinical", - "name": "Nodal Clinical" - }, - { - "field": "tumor_assessments.nodal_pathology", - "name": "Nodal Pathology" - }, - { - "field": "tumor_assessments.parameningeal_extension", - "name": "Parameningeal Extension" - }, - { - "field": "tumor_assessments.necrosis", - "name": "Necrosis" - }, - { - "field": "tumor_assessments.necrosis_pct", - "name": "Necrosis PCT" - }, - { - "field": "tumor_assessments.tumor_laterality", - "name": "Tumor Laterality" - }, - { - "field": "stagings.irs_group", - "name": "IRS Group" - }, - { - "field": "stagings.tnm_finding", - "name": "TNM Finding" - }, - { - "field": "stagings.stage_system", - "name": "Stage System" - }, - { - "field": "stagings.stage", - "name": "Stage" - }, - { - "field": "stagings.AB", - "name": "Ann Arbor AB" - }, - { - "field": "stagings.E", - "name": "Ann Arbor E" - }, - { - "field": "stagings.S", - "name": "Ann Arbor S" - }, - { - "field": "disease_characteristics.mki", - "name": "MKI" - }, - { - "field": "disease_characteristics.bulk_disease", - "name": "Bulky Disease" - }, - { - "field": "disease_characteristics.BULK_MED_MASS", - "name": "Bulky Mediastinal Mass" - }, - { - "field": "disease_characteristics.bulky_nodal_aggregate", - "name": "Bulky Nodal Aggregate" - }, - { - "field": "disease_characteristics.who_aml", - "name": "WHO AML" - }, - { - "field": "disease_characteristics.CNS_disease_status", - "name": "CNS Disease Status" - }, - { - "field": "disease_characteristics.MLDS", - "name": "MLDS" - }, - { - "field": "molecular_analysis.anaplasia", - "name": "Anaplasia" - }, - { - "field": "molecular_analysis.anaplasia_extent", - "name": "Anaplasia Extent" - }, - { - "field": "molecular_analysis.molecular_abnormality", - "name": "Molecular Abnormality" - }, - { - "field": "molecular_analysis.molecular_abnormality_result", - "name": "Molecular Abnormality Result" - }, - { - "field": "molecular_analysis.gene1", - "name": "Gene 1" - }, - { - "field": "molecular_analysis.gene2", - "name": "Gene 2" - }, - { - "field": "molecular_analysis.dna_index", - "name": "DNA Index" - }, - { - "field": "molecular_analysis.age_at_molecular_analysis", - "name": "Age at Molecular Analysis" - }, - { - "field": "molecular_analysis.mitoses", - "name": "Mitoses" - }, - { - "field": "molecular_analysis.cytodifferentiation", - "name": "Cytodifferentiation" - }, - { - "field": "biopsy_surgical_procedures.tumor_classification", - "name": "Tumor Classification" - }, - { - "field": "biopsy_surgical_procedures.procedure_type", - "name": "Procedure Type" - }, - { - "field": "biopsy_surgical_procedures.procedure_site", - "name": "Procedure Site" - }, - { - "field": "biopsy_surgical_procedures.margins", - "name": "Margins" - }, - { - "field": "radiation_therapies.tumor_classification", - "name": "Tumor Classification" - }, - { - "field": "radiation_therapies.age_at_rt_start", - "name": "Age at Radiation Therapy" - }, - { - "field": "radiation_therapies.rt_site", - "name": "Radiation Site" - }, - { - "field": "radiation_therapies.energy_type", - "name": "Energy Type" - }, - { - "field": "radiation_therapies.rt_dose", - "name": "Radiation Dose" - }, - { - "field": "radiation_therapies.rt_unit", - "name": "Radiation Unit" - }, - { - "field": "subject_responses.age_at_response", - "name": "Age at Response" - }, - { - "field": "subject_responses.tx_prior_response", - "name": "Treatment Prior Response" - }, - { - "field": "subject_responses.response", - "name": "Response" - }, - { - "field": "subject_responses.interim_response", - "name": "Interim Response" - }, - { - "field": "subject_responses.response_method", - "name": "Response Method" - }, - { - "field": "minimal_residual_diseases.mrd_result", - "name": "MRD Result" - }, - { - "field": "minimal_residual_diseases.mrd_result_numeric", - "name": "MRD Result Numeric" - }, - { - "field": "subject_responses.necrosis", - "name": "Necrosis" - }, - { - "field": "secondary_malignant_neoplasm.age_at_smn", - "name": "Age at SMN" - }, - { - "field": "secondary_malignant_neoplasm.smn_site", - "name": "SMN Site" - }, - { - "field": "secondary_malignant_neoplasm.smn_type", - "name": "SMN Type" - }, - { - "field": "secondary_malignant_neoplasm.smn_yn", - "name": "Secondary Malignancy" - }, - { - "field": "secondary_malignant_neoplasm.smn_morph_icdo", - "name": "ICD-O Morphology" - }, - { - "field": "imagings.imaging_method", - "name": "Imaging Method" - }, - { - "field": "imagings.imaging_result", - "name": "Imaging Result" - }, - { - "field": "labs.lab_result_numeric", - "name": "Numeric Lab Result" - }, - { - "field": "labs.lab_result_unit", - "name": "Lab Result Unit" - }, - { - "field": "labs.lab_result", - "name": "Lab Result" - }, - { - "field": "labs.lab_test", - "name": "Lab Test" - }, - { - "field": "stem_cell_transplants.sct_type", - "name": "SCT Type" - }, - { - "field": "stem_cell_transplants.sct_source", - "name": "SCT Source" - }, - { - "field": "stem_cell_transplants.sct_donor_relationship", - "name": "SCT Donor Relationship" - } + "species": { + "chartType": "bar", + "title": "Genus species", + "chartRow": 0 + }, + "gender": { + "chartType": "pie", + "title": "Gender", + "chartRow": 1 + }, + "race": { + "chartType": "pie", + "title": "Race", + "chartRow": 1 + }, + "ethnicity": { + "chartType": "pie", + "title": "Ethnicity", + "chartRow": 1 + }, + "biospecimen_anatomic_site": { + "chartType": "pie", + "title": "Biospecimen Anatomic Site", + "chartRow": 1 + } + }, + "fieldMapping" : [ + { "field": "dataset", "name": "Resource" }, + { "field": "studyAccession", "name": "Study" }, + { "field": "phenotype", "name": "Phenotype" }, + { "field": "gender", "name": "Gender" }, + { "field": "ethnicity", "name": "Ethnicity" }, + { "field": "strain", "name": "Strain" }, + { "field": "species", "name": "Genus species" }, + { "field": "submitter_id", "name": "Submitter ID" }, + { "field": "race", "name": "Race" }, + { "field": "hiv_status", "name": "HIV Status" }, + { "field": "study_submitter_id", "name": "Study"}, + { "field": "frstdthd", "name": "Year of Death" }, + { "field": "arthxbase", "name": "ART Use Prior to Baseline"}, + { "field": "bshbvstat", "name": "Baseline HBV Sero-status"}, + { "field": "bshcvstat", "name": "Baseline HCV Sero-status"}, + { "field": "cd4nadir", "name": "CD4 Nadir Prior to HAART"}, + { "field": "status", "name": "Summarized HIV Sero-status"}, + {"field": "project_id", "name": "Project ID"}, + {"field": "frstcncrd", "name": "First Confirmed Cancer Year"}, + {"field": "frstdmd", "name": "First Visit Year with Diabetes"}, + {"field": "frstdmmd", "name": "First Visit Year with All Necessary Components to Determine Diabetes"}, + {"field": "frsthtnd", "name": "First Visit Year with Hypertension"}, + {"field": "frsthtnmd", "name": "First Visit Year with All Necessary Components to Determine Hypertension"}, + {"field": "fcd4lowd", "name": "First Year Seen CD4N < 200 or CD4% < 14"}, + {"field": "fposdate", "name": "First Year Seen Seropositive"}, + {"field": "frstaidd", "name": "First Reported AIDS Year"}, + {"field": "lastafrd", "name": "Last Reported AIDS Free Year"}, + {"field": "lastcond", "name": "Year of Last Study Visit Attended"}, + {"field": "lastcontact", "name": "Last Year of Contact"}, + {"field": "lcd4higd", "name": "Last Year Seen with CD4N >= 200 and CD4% >= 14"}, + {"field": "lnegdate", "name": "Last Year Seen Seronegative"}, + {"field": "amikacin_res_phenotype", "name": "Amikacin Phenotype" }, + {"field": "capreomycin_res_phenotype", "name": "Capreomycin Phenotype" }, + {"field": "isoniazid_res_phenotype", "name": "Isoniazid Phenotype" }, + {"field": "kanamycin_res_phenotype", "name": "Kanamycin Phenotype" }, + {"field": "ofloxacin_res_phenotype", "name": "Ofloxacin Phenotype" }, + {"field": "pyrazinamide_res_phenotype", "name": "Pyrazinamide Phenotype" }, + {"field": "rifampicin_res_phenotype", "name": "Rifampicin Phenotype" }, + {"field": "rifampin_res_phenotype", "name": "Rifampin Phenotype" }, + {"field": "streptomycin_res_phenotype", "name": "streptomycin Phenotype" } + ], + "filterConfig": { + "tabs": [{ + "title": "Resource", + "fields": ["dataset", "data_format", "data_type"] + }, + { + "title": "Subject", + "fields": ["ethnicity", "gender", "species", "race"] + }, + { + "title": "Diagnosis", + "fields": [ + "arthxbase", + "bshbvstat", + "bshcvstat", + "cd4nadir", + "status", + "hiv_status" ] }, - "dataRequests": { - "enabled": false + { + "title": "Comorbidity", + "fields": [ + "frstcncrd", + "frstdmd", + "frstdmmd", + "frsthtnd", + "frsthtnmd" + ] + }, { + "title": "HIV History", + "fields": [ + "cd4nadir", + "fcd4lowd", + "fposdate", + "frstaidd", + "lastafrd", + "lastcond", + "lastcontact", + "lcd4higd", + "lnegdate", + "status" + ] }, - "getAccessButtonLink": "https://docs.pedscommons.org/PCDCProjectRequestForm/" + { + "title": "Drug Resistance", + "fields": [ + "amikacin_res_phenotype", + "capreomycin_res_phenotype", + "isoniazid_res_phenotype", + "kanamycin_res_phenotype", + "ofloxacin_res_phenotype", + "pyrazinamide_res_phenotype", + "rifampicin_res_phenotype", + "rifampin_res_phenotype", + "streptomycin_res_phenotype" + ] + }, + { + "title": "Experiment", + "fields": [ + "experimental_strategies", + "virus_type", + "virus_subtype", + "analyte_type", + "biospecimen_anatomic_site", + "cell_line", + "sample_type", + "composition", + "strain" + ] + }] } - ] + } } \ No newline at end of file diff --git a/helm/portal/values.yaml b/helm/portal/values.yaml index 99955bc33..b30032ae1 100644 --- a/helm/portal/values.yaml +++ b/helm/portal/values.yaml @@ -220,7 +220,285 @@ gitops: # -- (string) gen3Bundle: "" # -- (string) multiline string - gitops.json - json: + json: | + { + "graphql": { + "boardCounts": [ + { + "graphql": "_case_count", + "name": "Case", + "plural": "Cases" + }, + { + "graphql": "_experiment_count", + "name": "Experiment", + "plural": "Experiments" + }, + { + "graphql": "_aliquot_count", + "name": "Aliquot", + "plural": "Aliquots" + } + ], + "chartCounts": [ + { + "graphql": "_case_count", + "name": "Case" + }, + { + "graphql": "_experiment_count", + "name": "Experiment" + }, + { + "graphql": "_aliquot_count", + "name": "Aliquot" + } + ], + "projectDetails": "boardCounts" + }, + "components": { + "appName": "Generic Data Commons Portal", + "index": { + "introduction": { + "heading": "Data Commons", + "text": "The Generic Data Commons supports the management, analysis and sharing of data for the research community.", + "link": "/submission" + }, + "buttons": [ + { + "name": "Define Data Field", + "icon": "data-field-define", + "body": "The Generic Data Commons define the data in a general way. Please study the dictionary before you start browsing.", + "link": "/DD", + "label": "Learn more" + }, + { + "name": "Explore Data", + "icon": "data-explore", + "body": "The Exploration Page gives you insights and a clear overview under selected factors.", + "link": "/explorer", + "label": "Explore data" + }, + { + "name": "Access Data", + "icon": "data-access", + "body": "Use our selected tool to filter out the data you need.", + "link": "/query", + "label": "Query data" + }, + { + "name": "Submit Data", + "icon": "data-submit", + "body": "Submit Data based on the dictionary.", + "link": "/submission", + "label": "Submit data" + } + ] + }, + "navigation": { + "title": "Generic Data Commons", + "items": [ + { + "icon": "dictionary", + "link": "/DD", + "color": "#a2a2a2", + "name": "Dictionary" + }, + { + "icon": "exploration", + "link": "/explorer", + "color": "#a2a2a2", + "name": "Exploration" + }, + { + "icon": "query", + "link": "/query", + "color": "#a2a2a2", + "name": "Query" + }, + { + "icon": "workspace", + "link": "/workspace", + "color": "#a2a2a2", + "name": "Workspace" + }, + { + "icon": "profile", + "link": "/identity", + "color": "#a2a2a2", + "name": "Profile" + } + ] + }, + "topBar": { + "items": [ + { + "icon": "upload", + "link": "/submission", + "name": "Submit Data" + }, + { + "link": "https://gen3.org/resources/user", + "name": "Documentation" + } + ] + }, + "login": { + "title": "Generic Data Commons", + "subTitle": "Explore, Analyze, and Share Data", + "text": "This website supports the management, analysis and sharing of human disease data for the research community and aims to advance basic understanding of the genetic basis of complex traits and accelerate discovery and development of therapies, diagnostic tests, and other technologies for diseases like cancer.", + "contact": "If you have any questions about access or the registration process, please contact ", + "email": "support@gen3.org" + }, + "certs": {}, + "footerLogos": [ + { + "src": "/src/img/gen3.png", + "href": "https://ctds.uchicago.edu/gen3", + "alt": "Gen3 Data Commons" + }, + { + "src": "/src/img/createdby.png", + "href": "https://ctds.uchicago.edu/", + "alt": "Center for Translational Data Science at the University of Chicago" + } + ] + }, + "requiredCerts": [], + "featureFlags": { + "explorer": true, + "noIndex": true, + "analysis": false, + "discovery": false, + "discoveryUseAggMDS": false, + "studyRegistration": false + }, + "explorerConfig": [ + { + "tabTitle": "Data", + "charts": { + "project_id": { + "chartType": "count", + "title": "Projects" + }, + "_case_id": { + "chartType": "count", + "title": "Cases" + }, + "gender": { + "chartType": "pie", + "title": "Gender" + }, + "race": { + "chartType": "bar", + "title": "Race" + } + }, + "filters": { + "tabs": [ + { + "title": "Case", + "fields":[ + "project_id", + "gender", + "race", + "ethnicity" + ] + } + ] + }, + "table": { + "enabled": false + }, + "dropdowns": {}, + "buttons": [ + { + "enabled": true, + "type": "export-to-workspace", + "title": "Export to Workspace", + "leftIcon": "datafile", + "rightIcon": "download" + } + ], + "guppyConfig": { + "dataType": "case", + "nodeCountTitle": "Cases", + "fieldMapping": [ + { "field": "disease_type", "name": "Disease type" }, + { "field": "primary_site", "name": "Site where samples were collected"} + ], + "manifestMapping": { + "resourceIndexType": "file", + "resourceIdField": "object_id", + "referenceIdFieldInResourceIndex": "_case_id", + "referenceIdFieldInDataIndex": "_case_id" + }, + "accessibleFieldCheckList": ["_case_id"], + "accessibleValidationField": "_case_id" + } + }, + { + "tabTitle": "File", + "charts": { + "data_type": { + "chartType": "stackedBar", + "title": "File Type" + }, + "data_format": { + "chartType": "stackedBar", + "title": "File Format" + } + }, + "filters": { + "tabs": [ + { + "title": "File", + "fields": [ + "project_id", + "data_type", + "data_format" + ] + } + ] + }, + "buttons": [ + { + "enabled": true, + "type": "export-files-to-workspace", + "title": "Export to Workspace", + "leftIcon": "datafile", + "rightIcon": "download" + } + ], + "table": { + "enabled": true, + "fields": [ + "project_id", + "file_name", + "file_size", + "object_id" + ] + }, + "dropdowns": {}, + "guppyConfig": { + "dataType": "file", + "fieldMapping": [ + { "field": "object_id", "name": "GUID" } + ], + "nodeCountTitle": "Files", + "manifestMapping": { + "resourceIndexType": "case", + "resourceIdField": "_case_id", + "referenceIdFieldInResourceIndex": "object_id", + "referenceIdFieldInDataIndex": "object_id" + }, + "accessibleFieldCheckList": ["_case_id"], + "accessibleValidationField": "_case_id", + "downloadAccessor": "object_id" + } + } + ] + } # -- (string) - favicon in base64 favicon: "AAABAAEAICAAAAEAIACoEAAAFgAAACgAAAAgAAAAQAAAAAEAIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADQv3IAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA1MiCAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwKg0Nd6yqf+8pi7D3rKp/96yqf/esqn/3rKp/76qNMPEpU2QxbFJNwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/7WfF3cAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMWySQAAAAAA3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/TrIS0AAAAAL+nLQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACxmAIAxrhKBregGtLesqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/2MyPCLGaCwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAs5kJANqvn0vesqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/18l+GwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKuSAADq5L8H3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/z79qBca0SwAAAAAAAAAAAAAAAAAAAAAAAAAAAN6yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf+4oR3YAAAAAAAAAAAAAAAAAAAAAAAAAAC4oBlZ3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/AqC/N3rKp/96yqf+/rD3M3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf+4oyBkAAAAAAAAAAAAAAAAAAAAAN6yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf+9qDAqAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAzb1oH96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/8qoYv8AAAAAAAAAALefHQC4oB5X3rKp/96yqf/esqn/AAAAAAAAAADm3bsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOHbrAAAAAAA6ePTEd6yqf/esqn/3rKp/8CsNngAAAAAAAAAAN6yqf/esqn/3rKp/////xIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADq4bwA08V3EN6yqf/esqn/3rKp/wAAAAAAAAAA3rKp/96yqf+6nyfZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA3rKp/96yqf/esqn/AAAAALyjJDbesqn/3rKp/7ihIc0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADFpE7l3rKp/96yqf/esqn/wq0+Wd6yqf/esqn/3rKp/wAAAADPwW4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAC7pCAAAAAAAN6yqf/esqn/3rKp/8CsOVK6oyF63rKp/96yqf/esqn/uqQqxAAAAAC7oyQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAtZ8WAAAAAADesqn/3rKp/96yqf/esqn/3rKp/7ukIHresqn/3rKp/96yqf/esqn/3rKp/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA3rKp/96yqf/esqn/3rKp/96yqf/esqn/wK1BXN6yqf/esqn/3rKp/96yqf/esqn/uKAYUgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAL+oO1Hesqn/3rKp/96yqf/esqn/3rKp/76pLXq3nx023rKp/96yqf/esqn/3rKp/96yqf/esqn/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAt58l896yqf/esqn/3rKp/96yqf/esqn/3rKp/wAAAADesqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/xrRRVQAAAADYzYkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAM67agAAAAAAybZYUt6yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/9+/UXAAAAAN6yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAN6yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/wAAAACznRMAtJ4ZV96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADesqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/ArDZ4AAAAAAAAAAAAAAAA3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/yqdi/wAAAAAAAAAAAAAAAAAAAADHplZ93rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/6Ny8U+bauVDesqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf+5oyBkAAAAAAAAAAAAAAAAAAAAAAAAAADesqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/t6Ec1wAAAAAAAAAAAAAAAAAAAAAAAAAAs5sWAOHUlQfesqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/OxHUFxbRJAAAAAAAAAAAAAAAAAAAAAAAAAAAAsJkFAN6yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/29COIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAr5YBAN6yqf+7pSf43rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/uaMf+d2xp6MAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAyrhUAAAAAAC7pil73rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/7miH38AAAAAxrJDAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADi150b2K6T4N6yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/96yqf/esqn/3rKp/7mjI5zUxHAaAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOnftwAAAAAAAAAAAN6yqf/esqn/3rKp/7egG+e2nxf/uKAk/7mjIvPesqn/3rKp/7agGEAAAAAAAAAAANnOjAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA///////wD///gAP//gAAf/wAAD/4AAAf8AAAD+AAAAfgAAAHwA/wA8f//+OP///xj///8Y////CP///xh///4IP//8CD///Bgf//gID//wGAP/wBwB/4A8AP8APgAYAH4AAAB/AAAA/wAAAf+AAAH/8AAP//" # -- (string) - multiline string - gitops.css diff --git a/pcdc-default-values.yaml b/pcdc-default-values.yaml index 0754df1e2..5d64d8e15 100644 --- a/pcdc-default-values.yaml +++ b/pcdc-default-values.yaml @@ -331,7 +331,673 @@ portal: requests: cpu: 1.0 gitops: - json: "" + json: | + { + "gaTrackingId": "undefined", + "graphql": { + "boardCounts": [ + { + "graphql": "_person_count", + "name": "Person", + "plural": "Persons" + }, + { + "graphql": "_subject_count", + "name": "Subject", + "plural": "Subjects" + } + ], + "chartCounts": [ + { + "graphql": "_person_count", + "name": "Person" + }, + { + "graphql": "_subject_count", + "name": "Subject" + } + ], + "projectDetails": "boardCounts" + }, + "components": { + "appName": "Pediatric Cancer Data Commons Portal", + "index": { + "introduction": { + "heading": "Pediatric Cancer Data Commons", + "text": "The Pediatric Cancer Data Commons supports the management, analysis and sharing of data for the research community.", + "link": "/submission" + }, + "buttons": [ + { + "name": "Define Data Field", + "icon": "data-field-define", + "body": "The Pediatric Cancer Data Commons defines the data. Please study the dictionary before you start browsing.", + "link": "/DD", + "label": "Learn more" + }, + { + "name": "Explore Data", + "icon": "data-explore", + "body": "The Exploration Page gives you insights and a clear overview under selected factors.", + "link": "/explorer", + "label": "Explore data" + } + ], + "barChart": { + "showPercentage": true + } + }, + "navigation": { + "items": [ + { + "icon": "dictionary", + "link": "/DD", + "color": "#a2a2a2", + "name": "Dictionary" + }, + { + "icon": "exploration", + "link": "/explorer", + "color": "#a2a2a2", + "name": "Exploration" + } + ] + }, + "topBar": { + "items": [ + { + "icon": "external-link", + "leftOrientation": true, + "link": "https://commons.cri.uchicago.edu/pcdc/", + "name": "About PCDC" + }, + { + "icon": "external-link", + "leftOrientation": true, + "link": "https://commons.cri.uchicago.edu/sponsors/", + "name": "Our Sponsors" + } + ] + }, + "login": { + "title": "Pediatric Cancer Data Commons", + "subTitle": "Connect. Share. Cure.", + "text": "Welcome to the Pediatric Cancer Data Commons (PCDC), brought to you by Data for the Common Good (D4CG). Headquartered at the University of Chicago, D4CG works with international leaders to develop and apply uniform data standards that facilitate the collection, combination, and analysis of data from many different sources.\n\nThe PCDC harnesses pediatric, AYA, and adult cancer clinical data from around the world into a single unified platform, making it possible to explore and access data across multiple types of cancer. The PCDC Data Portal currently includes some of the world's largest sets of clinical data for pediatric neuroblastoma, soft tissue sarcoma, germ cell tumors, AML, and Hodgkin lymphoma, with the addition of more cancer types in progress.", + "contact": "If you have any questions about access or the registration process, please contact ", + "email": "pcdc_help@lists.uchicago.edu" + }, + "footerLogos": [ + { + "src": "/src/img/gen3.png", + "href": "https://ctds.uchicago.edu/gen3", + "alt": "Gen3 Data Commons", + "height": 40 + }, + { + "src": "/src/img/uchicago.png", + "href": "https://www.uchicago.edu/", + "alt": "The University of Chicago", + "height": 40 + } + ] + }, + "explorerConfig": [ + { + "id": 1, + "label": "data", + "charts": { + "sex": { + "chartType": "bar", + "title": "Sex" + }, + "race": { + "chartType": "bar", + "title": "Race" + }, + "ethnicity": { + "chartType": "bar", + "title": "Ethnicity" + }, + "consortium": { + "chartType": "bar", + "title": "Consortium" + } + }, + "filters": { + "anchor": { + "field": "disease_phase", + "options": ["Initial Diagnosis", "Relapse"], + "tabs": ["Disease", "Molecular", "Surgery", "Radiation", "Response", "SMN", "Imaging", "Labs", "SCT"] + }, + "tabs": [ + { + "title": "Subject", + "fields": [ + "consortium", + "data_contributor_id", + "studies.study_id", + "studies.treatment_arm", + "sex", + "race", + "ethnicity", + "year_at_disease_phase", + "survival_characteristics.lkss_obfuscated", + "censor_status", + "age_at_censor_status", + "medical_histories.medical_history", + "medical_histories.medical_history_status", + "external_references.external_resource_name", + "biospecimen_status" + ] + }, + { + "title": "Disease", + "fields": [ + "histologies.histology", + "histologies.histology_grade", + "histologies.histology_inpc", + "tumor_assessments.age_at_tumor_assessment", + "tumor_assessments.tumor_classification", + "tumor_assessments.tumor_site", + "tumor_assessments.tumor_state", + "tumor_assessments.longest_diam_dim1", + "tumor_assessments.depth", + "tumor_assessments.tumor_size", + "tumor_assessments.invasiveness", + "tumor_assessments.nodal_clinical", + "tumor_assessments.nodal_pathology", + "tumor_assessments.parameningeal_extension", + "tumor_assessments.necrosis", + "tumor_assessments.necrosis_pct", + "tumor_assessments.tumor_laterality", + "stagings.irs_group", + "stagings.tnm_finding", + "stagings.stage_system", + "stagings.stage", + "stagings.AB", + "stagings.E", + "stagings.S", + "disease_characteristics.mki", + "disease_characteristics.bulk_disease", + "disease_characteristics.BULK_MED_MASS", + "disease_characteristics.bulky_nodal_aggregate", + "disease_characteristics.who_aml", + "disease_characteristics.CNS_disease_status", + "disease_characteristics.MLDS" + ] + }, + { + "title": "Molecular", + "fields": [ + "molecular_analysis.anaplasia", + "molecular_analysis.anaplasia_extent", + "molecular_analysis.molecular_abnormality", + "molecular_analysis.molecular_abnormality_result", + "molecular_analysis.gene1", + "molecular_analysis.gene2", + "molecular_analysis.dna_index", + "molecular_analysis.age_at_molecular_analysis", + "molecular_analysis.mitoses", + "molecular_analysis.cytodifferentiation" + ] + }, + { + "title": "Surgery", + "fields": [ + "biopsy_surgical_procedures.tumor_classification", + "biopsy_surgical_procedures.procedure_type", + "biopsy_surgical_procedures.margins" + ] + }, + { + "title": "Radiation", + "fields": [ + "radiation_therapies.tumor_classification", + "radiation_therapies.energy_type", + "radiation_therapies.rt_dose" + ] + }, + { + "title": "Response", + "fields": [ + "subject_responses.tx_prior_response", + "subject_responses.response", + "subject_responses.interim_response", + "subject_responses.response_method", + "minimal_residual_diseases.mrd_result", + "minimal_residual_diseases.mrd_result_numeric" + ] + }, + { + "title": "SMN", + "fields": [ + "secondary_malignant_neoplasm.age_at_smn", + "secondary_malignant_neoplasm.smn_site", + "secondary_malignant_neoplasm.smn_type", + "secondary_malignant_neoplasm.smn_yn", + "secondary_malignant_neoplasm.smn_morph_icdo" + ] + }, + { + "title": "Imaging", + "fields": [ + "imagings.imaging_method", + "imagings.imaging_result" + ] + }, + { + "title": "Labs", + "fields": [ + "labs.lab_test", + "labs.lab_result", + "labs.lab_result_numeric", + "labs.lab_result_unit" + ] + }, + { + "title": "SCT", + "fields": [ + "stem_cell_transplants.sct_type", + "stem_cell_transplants.sct_source", + "stem_cell_transplants.sct_donor_relationship" + ] + } + ] + }, + "projectId": "search", + "graphqlField": "subject", + "index": "", + "buttons": [ + { + "enabled": true, + "type": "export-to-pfb", + "title": "Export to PFB", + "leftIcon": "datafile", + "rightIcon": "download" + }, + { + "enabled": false, + "type": "data", + "title": "Download Data", + "leftIcon": "user", + "rightIcon": "download", + "fileName": "data.json", + "tooltipText": "You can only download data accessible to you" + } + ], + "table": { + "enabled": false, + "fields": [ + "external_references.external_links", + "consortium", + "data_contributor_id", + "subject_submitter_id", + "sex", + "race", + "ethnicity", + "survival_characteristics.lkss", + "survival_characteristics.age_at_lkss" + ] + }, + "patientIds": { + "filter": false, + "export": true + }, + "survivalAnalysis": { + "result": { + "pval": false, + "risktable": true, + "survival": true + } + }, + "guppyConfig": { + "dataType": "subject", + "nodeCountTitle": "Subjects", + "fieldMapping": [ + { + "field": "data_contributor_id", + "name": "Data Contributor", + "tooltip": "Survival analysis is not allowed for this variable. Filtering by this variable will disable survival analysis." + }, + { + "field": "studies.study_id", + "name": "Study Id", + "tooltip": "Survival analysis is not allowed for this variable. Filtering by this variable will disable survival analysis." + }, + { + "field": "studies.treatment_arm", + "name": "Treatment Arm", + "tooltip": "Survival analysis is not allowed for this variable. Filtering by this variable will disable survival analysis." + }, + { + "field": "year_at_disease_phase", + "name": "Year at Initial Diagnosis" + }, + { + "field": "survival_characteristics.lkss", + "name": "Last Known Survival Status (LKSS)" + }, + { + "field": "survival_characteristics.lkss_obfuscated", + "name": "Last Known Survival Status (LKSS)" + }, + { + "field": "medical_histories.medical_history", + "name": "Medical History" + }, + { + "field": "medical_histories.medical_history_status", + "name": "Medical History Status" + }, + { + "field": "external_references.external_resource_name", + "name": "External Resource Name" + }, + { + "field": "biospecimen_status", + "name": "Biospecimen" + }, + { + "field": "histologies.histology", + "name": "Histology" + }, + { + "field": "histologies.histology_grade", + "name": "Histology Grade" + }, + { + "field": "histologies.histology_inpc", + "name": "INPC Classification" + }, + { + "field": "tumor_assessments.age_at_tumor_assessment", + "name": "Age at Tumor Assessment" + }, + { + "field": "tumor_assessments.tumor_classification", + "name": "Tumor Classification" + }, + { + "field": "tumor_assessments.tumor_site", + "name": "Tumor Site" + }, + { + "field": "tumor_assessments.tumor_state", + "name": "Tumor State" + }, + { + "field": "tumor_assessments.longest_diam_dim1", + "name": "Longest Diameter Dimension 1" + }, + { + "field": "tumor_assessments.depth", + "name": "Tumor Depth" + }, + { + "field": "tumor_assessments.tumor_size", + "name": "Tumor Size" + }, + { + "field": "tumor_assessments.invasiveness", + "name": "Invasiveness" + }, + { + "field": "tumor_assessments.nodal_clinical", + "name": "Nodal Clinical" + }, + { + "field": "tumor_assessments.nodal_pathology", + "name": "Nodal Pathology" + }, + { + "field": "tumor_assessments.parameningeal_extension", + "name": "Parameningeal Extension" + }, + { + "field": "tumor_assessments.necrosis", + "name": "Necrosis" + }, + { + "field": "tumor_assessments.necrosis_pct", + "name": "Necrosis PCT" + }, + { + "field": "tumor_assessments.tumor_laterality", + "name": "Tumor Laterality" + }, + { + "field": "stagings.irs_group", + "name": "IRS Group" + }, + { + "field": "stagings.tnm_finding", + "name": "TNM Finding" + }, + { + "field": "stagings.stage_system", + "name": "Stage System" + }, + { + "field": "stagings.stage", + "name": "Stage" + }, + { + "field": "stagings.AB", + "name": "Ann Arbor AB" + }, + { + "field": "stagings.E", + "name": "Ann Arbor E" + }, + { + "field": "stagings.S", + "name": "Ann Arbor S" + }, + { + "field": "disease_characteristics.mki", + "name": "MKI" + }, + { + "field": "disease_characteristics.bulk_disease", + "name": "Bulky Disease" + }, + { + "field": "disease_characteristics.BULK_MED_MASS", + "name": "Bulky Mediastinal Mass" + }, + { + "field": "disease_characteristics.bulky_nodal_aggregate", + "name": "Bulky Nodal Aggregate" + }, + { + "field": "disease_characteristics.who_aml", + "name": "WHO AML" + }, + { + "field": "disease_characteristics.CNS_disease_status", + "name": "CNS Disease Status" + }, + { + "field": "disease_characteristics.MLDS", + "name": "MLDS" + }, + { + "field": "molecular_analysis.anaplasia", + "name": "Anaplasia" + }, + { + "field": "molecular_analysis.anaplasia_extent", + "name": "Anaplasia Extent" + }, + { + "field": "molecular_analysis.molecular_abnormality", + "name": "Molecular Abnormality" + }, + { + "field": "molecular_analysis.molecular_abnormality_result", + "name": "Molecular Abnormality Result" + }, + { + "field": "molecular_analysis.gene1", + "name": "Gene 1" + }, + { + "field": "molecular_analysis.gene2", + "name": "Gene 2" + }, + { + "field": "molecular_analysis.dna_index", + "name": "DNA Index" + }, + { + "field": "molecular_analysis.age_at_molecular_analysis", + "name": "Age at Molecular Analysis" + }, + { + "field": "molecular_analysis.mitoses", + "name": "Mitoses" + }, + { + "field": "molecular_analysis.cytodifferentiation", + "name": "Cytodifferentiation" + }, + { + "field": "biopsy_surgical_procedures.tumor_classification", + "name": "Tumor Classification" + }, + { + "field": "biopsy_surgical_procedures.procedure_type", + "name": "Procedure Type" + }, + { + "field": "biopsy_surgical_procedures.procedure_site", + "name": "Procedure Site" + }, + { + "field": "biopsy_surgical_procedures.margins", + "name": "Margins" + }, + { + "field": "radiation_therapies.tumor_classification", + "name": "Tumor Classification" + }, + { + "field": "radiation_therapies.age_at_rt_start", + "name": "Age at Radiation Therapy" + }, + { + "field": "radiation_therapies.rt_site", + "name": "Radiation Site" + }, + { + "field": "radiation_therapies.energy_type", + "name": "Energy Type" + }, + { + "field": "radiation_therapies.rt_dose", + "name": "Radiation Dose" + }, + { + "field": "radiation_therapies.rt_unit", + "name": "Radiation Unit" + }, + { + "field": "subject_responses.age_at_response", + "name": "Age at Response" + }, + { + "field": "subject_responses.tx_prior_response", + "name": "Treatment Prior Response" + }, + { + "field": "subject_responses.response", + "name": "Response" + }, + { + "field": "subject_responses.interim_response", + "name": "Interim Response" + }, + { + "field": "subject_responses.response_method", + "name": "Response Method" + }, + { + "field": "minimal_residual_diseases.mrd_result", + "name": "MRD Result" + }, + { + "field": "minimal_residual_diseases.mrd_result_numeric", + "name": "MRD Result Numeric" + }, + { + "field": "subject_responses.necrosis", + "name": "Necrosis" + }, + { + "field": "secondary_malignant_neoplasm.age_at_smn", + "name": "Age at SMN" + }, + { + "field": "secondary_malignant_neoplasm.smn_site", + "name": "SMN Site" + }, + { + "field": "secondary_malignant_neoplasm.smn_type", + "name": "SMN Type" + }, + { + "field": "secondary_malignant_neoplasm.smn_yn", + "name": "Secondary Malignancy" + }, + { + "field": "secondary_malignant_neoplasm.smn_morph_icdo", + "name": "ICD-O Morphology" + }, + { + "field": "imagings.imaging_method", + "name": "Imaging Method" + }, + { + "field": "imagings.imaging_result", + "name": "Imaging Result" + }, + { + "field": "labs.lab_result_numeric", + "name": "Numeric Lab Result" + }, + { + "field": "labs.lab_result_unit", + "name": "Lab Result Unit" + }, + { + "field": "labs.lab_result", + "name": "Lab Result" + }, + { + "field": "labs.lab_test", + "name": "Lab Test" + }, + { + "field": "stem_cell_transplants.sct_type", + "name": "SCT Type" + }, + { + "field": "stem_cell_transplants.sct_source", + "name": "SCT Source" + }, + { + "field": "stem_cell_transplants.sct_donor_relationship", + "name": "SCT Donor Relationship" + } + ] + }, + "dataRequests": { + "enabled": false + }, + "getAccessButtonLink": "https://docs.pedscommons.org/PCDCProjectRequestForm/" + } + ] + } revproxy: image: From 30a17211035e312f3236ba7581c21bbc58c950c5 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Wed, 30 Jul 2025 16:48:05 -0700 Subject: [PATCH 085/126] Refactor Peregrine secret/config handling and volume mounts Removed the deprecated peregrine-creds secret and its template, consolidating configuration and credentials into peregrine-secret. Updated settings.py to use environment variables with fallbacks to creds.json, and simplified config_helper.py by removing unused legacy code. Adjusted deployment.yaml and values.yaml to use a single config-volume and support custom volumeMounts via Helm values. --- .../peregrine-secret/config_helper.py | 348 +----------------- helm/peregrine/peregrine-secret/settings.py | 85 ++--- helm/peregrine/templates/deployment.yaml | 11 +- helm/peregrine/templates/peregrine-creds.yaml | 19 - helm/peregrine/values.yaml | 18 +- .../sheepdog/sheepdog-secret/config_helper.py | 346 +---------------- pcdc-default-values.yaml | 2 +- 7 files changed, 78 insertions(+), 751 deletions(-) delete mode 100644 helm/peregrine/templates/peregrine-creds.yaml diff --git a/helm/peregrine/peregrine-secret/config_helper.py b/helm/peregrine/peregrine-secret/config_helper.py index 6b303beac..6bf5f592c 100644 --- a/helm/peregrine/peregrine-secret/config_helper.py +++ b/helm/peregrine/peregrine-secret/config_helper.py @@ -1,9 +1,17 @@ +""" +Originally copied from `cloud-automation/apis_configs/config_helper.py` +(renamed `confighelper.py` so it isn't overwritten by the file that cloud-automation +still mounts for backwards compatibility). + +TODO: once everyone has this independent version of sheepdog, remove `wsgi.py` and +`config_helper.py` here: +https://github.com/uc-cdis/cloud-automation/blob/afb750d/kube/services/peregrine/peregrine-deploy.yaml#L159-L170 +and update this: +https://github.com/uc-cdis/cloud-automation/blob/afb750d752f1324c2884da1efaef3cec8f9476b9/gen3/bin/kube-setup-peregrine.sh#L16 +""" + import json import os -import copy -import argparse -import re -import types # # make it easy to change this for testing @@ -43,334 +51,6 @@ def load_json(file_name, app_name, search_folders=None): """ actual_files = find_paths(file_name, app_name, search_folders) if not actual_files: - return None + return {} with open(actual_files[0], "r") as reader: - return json.load(reader) - - -def inject_creds_into_fence_config(creds_file_path, config_file_path): - creds_file = open(creds_file_path, "r") - creds = json.load(creds_file) - creds_file.close() - - # get secret values from creds.json file - db_host = _get_nested_value(creds, "db_host") - db_username = _get_nested_value(creds, "db_username") - db_password = _get_nested_value(creds, "db_password") - db_database = _get_nested_value(creds, "db_database") - hostname = _get_nested_value(creds, "hostname") - indexd_password = _get_nested_value(creds, "indexd_password") - google_client_secret = _get_nested_value(creds, "google_client_secret") - google_client_id = _get_nested_value(creds, "google_client_id") - hmac_key = _get_nested_value(creds, "hmac_key") - db_path = "postgresql://{}:{}@{}:5432/{}".format( - db_username, db_password, db_host, db_database - ) - - config_file = open(config_file_path, "r").read() - - print(" DB injected with value(s) from creds.json") - config_file = _replace(config_file, "DB", db_path) - - print(" BASE_URL injected with value(s) from creds.json") - config_file = _replace(config_file, "BASE_URL", "https://{}/user".format(hostname)) - - print(" INDEXD_PASSWORD injected with value(s) from creds.json") - config_file = _replace(config_file, "INDEXD_PASSWORD", indexd_password) - config_file = _replace(config_file, "INDEXD_USERNAME", "fence") - - print(" ENCRYPTION_KEY injected with value(s) from creds.json") - config_file = _replace(config_file, "ENCRYPTION_KEY", hmac_key) - - print( - " OPENID_CONNECT/google/client_secret injected with value(s) " - "from creds.json" - ) - config_file = _replace( - config_file, "OPENID_CONNECT/google/client_secret", google_client_secret - ) - - print(" OPENID_CONNECT/google/client_id injected with value(s) from creds.json") - config_file = _replace( - config_file, "OPENID_CONNECT/google/client_id", google_client_id - ) - - open(config_file_path, "w+").write(config_file) - - -def set_prod_defaults(config_file_path): - config_file = open(config_file_path, "r").read() - - print( - " CIRRUS_CFG/GOOGLE_APPLICATION_CREDENTIALS set as " - "var/www/fence/fence_google_app_creds_secret.json" - ) - config_file = _replace( - config_file, - "CIRRUS_CFG/GOOGLE_APPLICATION_CREDENTIALS", - "/var/www/fence/fence_google_app_creds_secret.json", - ) - - print( - " CIRRUS_CFG/GOOGLE_STORAGE_CREDS set as " - "var/www/fence/fence_google_storage_creds_secret.json" - ) - config_file = _replace( - config_file, - "CIRRUS_CFG/GOOGLE_STORAGE_CREDS", - "/var/www/fence/fence_google_storage_creds_secret.json", - ) - - print(" INDEXD set as http://indexd-service/") - config_file = _replace(config_file, "INDEXD", "http://indexd-service/") - - print(" ARBORIST set as http://arborist-service/") - config_file = _replace(config_file, "ARBORIST", "http://arborist-service/") - - print(" HTTP_PROXY/host set as cloud-proxy.internal.io") - config_file = _replace(config_file, "HTTP_PROXY/host", "cloud-proxy.internal.io") - - print(" HTTP_PROXY/port set as 3128") - config_file = _replace(config_file, "HTTP_PROXY/port", 3128) - - print(" DEBUG set to false") - config_file = _replace(config_file, "DEBUG", False) - - print(" MOCK_AUTH set to false") - config_file = _replace(config_file, "MOCK_AUTH", False) - - print(" MOCK_GOOGLE_AUTH set to false") - config_file = _replace(config_file, "MOCK_GOOGLE_AUTH", False) - - print(" AUTHLIB_INSECURE_TRANSPORT set to true") - config_file = _replace(config_file, "AUTHLIB_INSECURE_TRANSPORT", True) - - print(" SESSION_COOKIE_SECURE set to true") - config_file = _replace(config_file, "SESSION_COOKIE_SECURE", True) - - print(" ENABLE_CSRF_PROTECTION set to true") - config_file = _replace(config_file, "ENABLE_CSRF_PROTECTION", True) - - open(config_file_path, "w+").write(config_file) - - -def inject_other_files_into_fence_config(other_files, config_file_path): - additional_cfgs = _get_all_additional_configs(other_files) - - config_file = open(config_file_path, "r").read() - - for key, value in additional_cfgs.iteritems(): - print(" {} set to {}".format(key, value)) - config_file = _nested_replace(config_file, key, value) - - open(config_file_path, "w+").write(config_file) - - -def _get_all_additional_configs(other_files): - """ - Attempt to parse given list of files and extract configuration variables and values - """ - additional_configs = dict() - for file_path in other_files: - try: - file_ext = file_path.strip().split(".")[-1] - if file_ext == "json": - json_file = open(file_path, "r") - configs = json.load(json_file) - json_file.close() - elif file_ext == "py": - configs = from_pyfile(file_path) - else: - print( - "Cannot load config vars from a file with extention: {}".format( - file_ext - ) - ) - except Exception as exc: - # if there's any issue reading the file, exit - print( - "Error reading {}. Cannot get configuration. Skipping this file. " - "Details: {}".format(other_files, str(exc)) - ) - continue - - if configs: - additional_configs.update(configs) - - return additional_configs - - -def _nested_replace(config_file, key, value, replacement_path=None): - replacement_path = replacement_path or key - try: - for inner_key, inner_value in value.iteritems(): - temp_path = replacement_path - temp_path = temp_path + "/" + inner_key - config_file = _nested_replace( - config_file, inner_key, inner_value, temp_path - ) - except AttributeError: - # not a dict so replace - if value is not None: - config_file = _replace(config_file, replacement_path, value) - - return config_file - - -def _replace(yaml_config, path_to_key, replacement_value, start=0, nested_level=0): - """ - Replace a nested value in a YAML file string with the given value without - losing comments. Uses a regex to do the replacement. - - Args: - yaml_config (str): a string representing a full configuration file - path_to_key (str): nested/path/to/key. The value of this key will be - replaced - replacement_value (str): Replacement value for the key from - path_to_key - """ - nested_path_to_replace = path_to_key.split("/") - - # our regex looks for a specific number of spaces to ensure correct - # level of nesting. It matches to the end of the line - search_string = ( - " " * nested_level + ".*" + nested_path_to_replace[0] + "(')?(\")?:.*\n" - ) - matches = re.search(search_string, yaml_config[start:]) - - # early return if we haven't found anything - if not matches: - return yaml_config - - # if we're on the last item in the path, we need to get the value and - # replace it in the original file - if len(nested_path_to_replace) == 1: - # replace the current key:value with the new replacement value - match_start = start + matches.start(0) + len(" " * nested_level) - match_end = start + matches.end(0) - yaml_config = ( - yaml_config[:match_start] - + "{}: {}\n".format( - nested_path_to_replace[0], - _get_yaml_replacement_value(replacement_value, nested_level), - ) - + yaml_config[match_end:] - ) - - return yaml_config - - # set new start point to past current match and move on to next match - start = matches.end(0) - nested_level += 1 - del nested_path_to_replace[0] - - return _replace( - yaml_config, - "/".join(nested_path_to_replace), - replacement_value, - start, - nested_level, - ) - - -def from_pyfile(filename, silent=False): - """ - Modeled after flask's ability to load in python files: - https://github.com/pallets/flask/blob/master/flask/config.py - - Some alterations were made but logic is essentially the same - """ - filename = os.path.abspath(filename) - d = types.ModuleType("config") - d.__file__ = filename - try: - with open(filename, mode="rb") as config_file: - exec(compile(config_file.read(), filename, "exec"), d.__dict__) - except IOError as e: - print("Unable to load configuration file ({})".format(e.strerror)) - if silent: - return False - raise - return _from_object(d) - - -def _from_object(obj): - configs = {} - for key in dir(obj): - if key.isupper(): - configs[key] = getattr(obj, key) - return configs - - -def _get_yaml_replacement_value(value, nested_level=0): - if isinstance(value, str): - return "'" + value + "'" - elif isinstance(value, bool): - return str(value).lower() - elif isinstance(value, list) or isinstance(value, set): - output = "" - for item in value: - # spaces for nested level then spaces and hyphen for each list item - output += ( - "\n" - + " " * nested_level - + " - " - + _get_yaml_replacement_value(item) - + "" - ) - return output - else: - return value - - -def _get_nested_value(dictionary, nested_path): - """ - Return a value from a dictionary given a path-like nesting of keys. - - Will default to an empty string if value cannot be found. - - Args: - dictionary (dict): a dictionary - nested_path (str): nested/path/to/key - - Returns: - ?: Value from dict - """ - replacement_value_path = nested_path.split("/") - replacement_value = copy.deepcopy(dictionary) - - for item in replacement_value_path: - replacement_value = replacement_value.get(item, {}) - - if replacement_value == {}: - replacement_value = "" - - return replacement_value - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument( - "-i", - "--creds_file_to_inject", - default="creds.json", - help="creds file to inject into the configuration yaml", - ) - parser.add_argument( - "--other_files_to_inject", - nargs="+", - help="fence_credentials.json, local_settings.py, fence_settings.py file(s) to " - "inject into the configuration yaml", - ) - parser.add_argument( - "-c", "--config_file", default="config.yaml", help="configuration yaml" - ) - args = parser.parse_args() - - inject_creds_into_fence_config(args.creds_file_to_inject, args.config_file) - set_prod_defaults(args.config_file) - - if args.other_files_to_inject: - inject_other_files_into_fence_config( - args.other_files_to_inject, args.config_file - ) + return json.load(reader) \ No newline at end of file diff --git a/helm/peregrine/peregrine-secret/settings.py b/helm/peregrine/peregrine-secret/settings.py index bfa26abb8..9288629cc 100644 --- a/helm/peregrine/peregrine-secret/settings.py +++ b/helm/peregrine/peregrine-secret/settings.py @@ -1,6 +1,6 @@ from peregrine.api import app, app_init from os import environ -import confighelper +import bin.confighelper as confighelper APP_NAME = "peregrine" @@ -12,71 +12,62 @@ def load_json(file_name): conf_data = load_json("creds.json") config = app.config -config["AUTH"] = "https://auth.service.consul:5000/v3/" -config["AUTH_ADMIN_CREDS"] = None -config["INTERNAL_AUTH"] = None # ARBORIST deprecated, replaced by ARBORIST_URL # ARBORIST_URL is initialized in app_init() directly config["ARBORIST"] = "http://arborist-service/" -# Signpost: deprecated, replaced by index client. -config["SIGNPOST"] = { - "host": environ.get("SIGNPOST_HOST") or "http://indexd-service", - "version": "v0", - "auth": ("gdcapi", conf_data.get("indexd_password", "{{indexd_password}}")), -} + config["INDEX_CLIENT"] = { "host": environ.get("INDEX_CLIENT_HOST") or "http://indexd-service", "version": "v0", - "auth": ("gdcapi", conf_data.get("indexd_password", "{{indexd_password}}")), + # The user should be "sheepdog", but for legacy reasons, we use "gdcapi" instead + "auth": ( + ( + environ.get("INDEXD_USER", "gdcapi"), + environ.get("INDEXD_PASS") + or conf_data.get("indexd_password", "{{indexd_password}}"), + ) + ), } -config["FAKE_AUTH"] = False + config["PSQLGRAPH"] = { - "host": environ.get("PGHOST"), - "user": environ.get("PGUSER"), - "password": environ.get("PGPASSWORD"), - "database": environ.get("PGDB"), + "host": environ.get("PGHOST") or conf_data.get("db_host", "{{db_host}}"), + "user": environ.get("PGUSER") or conf_data.get("db_username", "{{db_username}}"), + "password": environ.get("PGPASSWORD") + or conf_data.get("db_password", "{{db_password}}"), + "database": environ.get("PGDB") or conf_data.get("db_database", "{{db_database}}"), } -config["HMAC_ENCRYPTION_KEY"] = conf_data.get("hmac_key", "{{hmac_key}}") -config["FLASK_SECRET_KEY"] = conf_data.get("gdcapi_secret_key", "{{gdcapi_secret_key}}") -config['PSQL_USER_DB_CONNECTION'] = 'postgresql://%s:%s@%s:5432/%s' % (environ.get("FENCE_DB_USER"), environ.get("FENCE_DB_PASS"), environ.get("FENCE_DB_HOST"), environ.get("FENCE_DB_DBNAME")) +fence_username = environ.get("FENCE_DB_USER") or conf_data.get( + "fence_username", "{{fence_username}}" +) +fence_password = environ.get("FENCE_DB_PASS") or conf_data.get( + "fence_password", "{{fence_password}}" +) +fence_host = environ.get("FENCE_DB_HOST") or conf_data.get( + "fence_host", "{{fence_host}}" +) +fence_database = environ.get("FENCE_DB_DBNAME") or conf_data.get( + "fence_database", "{{fence_database}}" +) +config["PSQL_USER_DB_CONNECTION"] = "postgresql://%s:%s@%s:5432/%s" % ( + fence_username, + fence_password, + fence_host, + fence_database, +) + config["DICTIONARY_URL"] = environ.get( "DICTIONARY_URL", "https://s3.amazonaws.com/dictionary-artifacts/datadictionary/develop/schema.json", ) -config["SUBMISSION"] = {"bucket": conf_data.get("bagit_bucket", "{{bagit_bucket}}")} - -config["STORAGE"] = { - "s3": { - "access_key": conf_data.get("s3_access", "{{s3_access}}"), - "secret_key": conf_data.get("s3_secret", "{{s3_secret}}"), - } -} - -config["OIDC_ISSUER"] = "https://%s/user" % conf_data["hostname"] - -config["OAUTH2"] = { - "client_id": conf_data.get("oauth2_client_id", "{{oauth2_client_id}}"), - "client_secret": conf_data.get("oauth2_client_secret", "{{oauth2_client_secret}}"), - "api_base_url": "https://%s/user/" % conf_data["hostname"], - "authorize_url": "https://%s/user/oauth2/authorize" % conf_data["hostname"], - "access_token_url": "https://%s/user/oauth2/token" % conf_data["hostname"], - "refresh_token_url": "https://%s/user/oauth2/token" % conf_data["hostname"], - "client_kwargs": { - "redirect_uri": "https://%s/api/v0/oauth2/authorize" % conf_data["hostname"], - "scope": "openid data user", - }, - # deprecated key values, should be removed after all commons use new oidc - "internal_oauth_provider": "http://fence-service/oauth2/", - "oauth_provider": "https://%s/user/oauth2/" % conf_data["hostname"], - "redirect_uri": "https://%s/api/v0/oauth2/authorize" % conf_data["hostname"], -} +hostname = environ.get("CONF_HOSTNAME") or conf_data["hostname"] +config["OIDC_ISSUER"] = "https://%s/user" % hostname -config["USER_API"] = "http://fence-service/" # for use by authutils +config["USER_API"] = config["OIDC_ISSUER"] # for use by authutils # use the USER_API URL instead of the public issuer URL to accquire JWT keys config["FORCE_ISSUER"] = True app_init(app) diff --git a/helm/peregrine/templates/deployment.yaml b/helm/peregrine/templates/deployment.yaml index 9ae5340bd..c10e3e7b3 100644 --- a/helm/peregrine/templates/deployment.yaml +++ b/helm/peregrine/templates/deployment.yaml @@ -158,15 +158,10 @@ spec: value: "False" - name: CONF_HOSTNAME value: {{ .Values.global.hostname | quote }} + {{- with .Values.volumeMounts }} volumeMounts: - - name: "config-volume" - readOnly: true - mountPath: "/var/www/peregrine/settings.py" - subPath: "settings.py" - - name: "peregrine-volume" - readOnly: true - mountPath: "/var/www/peregrine/creds.json" - subPath: "creds.json" + {{- toYaml . | nindent 12 }} + {{- end }} ports: - name: http containerPort: 80 diff --git a/helm/peregrine/templates/peregrine-creds.yaml b/helm/peregrine/templates/peregrine-creds.yaml deleted file mode 100644 index 14d898ac0..000000000 --- a/helm/peregrine/templates/peregrine-creds.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: peregrine-creds -type: Opaque -stringData: - creds.json: |- - { - "db_host": "{{ include "gen3.service-postgres" (dict "key" "host" "service" "sheepdog" "context" $) }}", - "db_username": "{{include "gen3.service-postgres" (dict "key" "username" "service" "sheepdog" "context" $) }}", - "db_password": "{{include "gen3.service-postgres" (dict "key" "password" "service" "sheepdog" "context" $) }}", - "db_database": "{{ include "gen3.service-postgres" (dict "key" "database" "service" "sheepdog" "context" $)}}", - "hostname": "{{ .Values.global.hostname }}", - "indexd_password": "", - "fence_username": "{{include "gen3.service-postgres" (dict "key" "username" "service" "fence" "context" $) }}", - "fence_password": "{{include "gen3.service-postgres" (dict "key" "password" "service" "fence" "context" $) }}", - "fence_host": "{{ include "gen3.service-postgres" (dict "key" "host" "service" "fence" "context" $) }}", - "fence_database": "{{ include "gen3.service-postgres" (dict "key" "database" "service" "fence" "context" $)}}" - } diff --git a/helm/peregrine/values.yaml b/helm/peregrine/values.yaml index 06777b206..841462998 100644 --- a/helm/peregrine/values.yaml +++ b/helm/peregrine/values.yaml @@ -211,14 +211,11 @@ env: # -- (list) Volumes to attach to the container. volumes: -- name: shared-data - emptyDir: {} -- name: config-volume - secret: - secretName: "peregrine-secret" -- name: peregrine-volume - secret: - secretName: "peregrine-creds" + - name: shared-data + emptyDir: {} + - name: config-volume + secret: + secretName: "peregrine-secret" # -- (list) Volumes to mount to the container. volumeMounts: @@ -230,6 +227,11 @@ volumeMounts: readOnly: true mountPath: "peregrine/bin/settings.py" subPath: "settings.py" + - name: "config-volume" + readOnly: true + mountPath: "peregrine/bin/confighelper.py" + subPath: "config_helper.py" + # Values to determine the labels that are used for the deployment, pod, etc. # -- (string) Valid options are "production" or "dev". If invalid option is set- the value will default to "dev". diff --git a/helm/sheepdog/sheepdog-secret/config_helper.py b/helm/sheepdog/sheepdog-secret/config_helper.py index ab1805496..e53844eaa 100644 --- a/helm/sheepdog/sheepdog-secret/config_helper.py +++ b/helm/sheepdog/sheepdog-secret/config_helper.py @@ -1,9 +1,15 @@ +""" +Originally copied from `cloud-automation/apis_configs/config_helper.py` +(renamed `confighelper.py` so it isn't overwritten by the file that cloud-automation +still mounts for backwards compatibility). + +TODO: once everyone has this independent version of sheepdog, remove `wsgi.py` and +`config_helper.py` here: +https://github.com/uc-cdis/cloud-automation/blob/afb750d/kube/services/sheepdog/sheepdog-deploy.yaml#L166-L177 +""" + import json import os -import copy -import argparse -import re -import types # # make it easy to change this for testing @@ -43,334 +49,6 @@ def load_json(file_name, app_name, search_folders=None): """ actual_files = find_paths(file_name, app_name, search_folders) if not actual_files: - return None + return {} with open(actual_files[0], "r") as reader: - return json.load(reader) - - -def inject_creds_into_fence_config(creds_file_path, config_file_path): - creds_file = open(creds_file_path, "r") - creds = json.load(creds_file) - creds_file.close() - - # get secret values from creds.json file - db_host = _get_nested_value(creds, "db_host") - db_username = _get_nested_value(creds, "db_username") - db_password = _get_nested_value(creds, "db_password") - db_database = _get_nested_value(creds, "db_database") - hostname = _get_nested_value(creds, "hostname") - indexd_password = environ.get('INDEXD_PASS') - google_client_secret = _get_nested_value(creds, "google_client_secret") - google_client_id = _get_nested_value(creds, "google_client_id") - hmac_key = _get_nested_value(creds, "hmac_key") - db_path = "postgresql://{}:{}@{}:5432/{}".format( - db_username, db_password, db_host, db_database - ) - - config_file = open(config_file_path, "r").read() - - print(" DB injected with value(s) from creds.json") - config_file = _replace(config_file, "DB", db_path) - - print(" BASE_URL injected with value(s) from creds.json") - config_file = _replace(config_file, "BASE_URL", "https://{}/user".format(hostname)) - - print(" INDEXD_PASSWORD injected with value(s) from creds.json") - config_file = _replace(config_file, "INDEXD_PASSWORD", indexd_password) - config_file = _replace(config_file, "INDEXD_USERNAME", "fence") - - print(" ENCRYPTION_KEY injected with value(s) from creds.json") - config_file = _replace(config_file, "ENCRYPTION_KEY", hmac_key) - - print( - " OPENID_CONNECT/google/client_secret injected with value(s) " - "from creds.json" - ) - config_file = _replace( - config_file, "OPENID_CONNECT/google/client_secret", google_client_secret - ) - - print(" OPENID_CONNECT/google/client_id injected with value(s) from creds.json") - config_file = _replace( - config_file, "OPENID_CONNECT/google/client_id", google_client_id - ) - - open(config_file_path, "w+").write(config_file) - - -def set_prod_defaults(config_file_path): - config_file = open(config_file_path, "r").read() - - print( - " CIRRUS_CFG/GOOGLE_APPLICATION_CREDENTIALS set as " - "var/www/fence/fence_google_app_creds_secret.json" - ) - config_file = _replace( - config_file, - "CIRRUS_CFG/GOOGLE_APPLICATION_CREDENTIALS", - "/var/www/fence/fence_google_app_creds_secret.json", - ) - - print( - " CIRRUS_CFG/GOOGLE_STORAGE_CREDS set as " - "var/www/fence/fence_google_storage_creds_secret.json" - ) - config_file = _replace( - config_file, - "CIRRUS_CFG/GOOGLE_STORAGE_CREDS", - "/var/www/fence/fence_google_storage_creds_secret.json", - ) - - print(" INDEXD set as http://indexd-service/") - config_file = _replace(config_file, "INDEXD", "http://indexd-service/") - - print(" ARBORIST set as http://arborist-service/") - config_file = _replace(config_file, "ARBORIST", "http://arborist-service/") - - print(" HTTP_PROXY/host set as cloud-proxy.internal.io") - config_file = _replace(config_file, "HTTP_PROXY/host", "cloud-proxy.internal.io") - - print(" HTTP_PROXY/port set as 3128") - config_file = _replace(config_file, "HTTP_PROXY/port", 3128) - - print(" DEBUG set to false") - config_file = _replace(config_file, "DEBUG", False) - - print(" MOCK_AUTH set to false") - config_file = _replace(config_file, "MOCK_AUTH", False) - - print(" MOCK_GOOGLE_AUTH set to false") - config_file = _replace(config_file, "MOCK_GOOGLE_AUTH", False) - - print(" AUTHLIB_INSECURE_TRANSPORT set to true") - config_file = _replace(config_file, "AUTHLIB_INSECURE_TRANSPORT", True) - - print(" SESSION_COOKIE_SECURE set to true") - config_file = _replace(config_file, "SESSION_COOKIE_SECURE", True) - - print(" ENABLE_CSRF_PROTECTION set to true") - config_file = _replace(config_file, "ENABLE_CSRF_PROTECTION", True) - - open(config_file_path, "w+").write(config_file) - - -def inject_other_files_into_fence_config(other_files, config_file_path): - additional_cfgs = _get_all_additional_configs(other_files) - - config_file = open(config_file_path, "r").read() - - for key, value in additional_cfgs.iteritems(): - print(" {} set to {}".format(key, value)) - config_file = _nested_replace(config_file, key, value) - - open(config_file_path, "w+").write(config_file) - - -def _get_all_additional_configs(other_files): - """ - Attempt to parse given list of files and extract configuration variables and values - """ - additional_configs = dict() - for file_path in other_files: - try: - file_ext = file_path.strip().split(".")[-1] - if file_ext == "json": - json_file = open(file_path, "r") - configs = json.load(json_file) - json_file.close() - elif file_ext == "py": - configs = from_pyfile(file_path) - else: - print( - "Cannot load config vars from a file with extention: {}".format( - file_ext - ) - ) - except Exception as exc: - # if there's any issue reading the file, exit - print( - "Error reading {}. Cannot get configuration. Skipping this file. " - "Details: {}".format(other_files, str(exc)) - ) - continue - - if configs: - additional_configs.update(configs) - - return additional_configs - - -def _nested_replace(config_file, key, value, replacement_path=None): - replacement_path = replacement_path or key - try: - for inner_key, inner_value in value.iteritems(): - temp_path = replacement_path - temp_path = temp_path + "/" + inner_key - config_file = _nested_replace( - config_file, inner_key, inner_value, temp_path - ) - except AttributeError: - # not a dict so replace - if value is not None: - config_file = _replace(config_file, replacement_path, value) - - return config_file - - -def _replace(yaml_config, path_to_key, replacement_value, start=0, nested_level=0): - """ - Replace a nested value in a YAML file string with the given value without - losing comments. Uses a regex to do the replacement. - - Args: - yaml_config (str): a string representing a full configuration file - path_to_key (str): nested/path/to/key. The value of this key will be - replaced - replacement_value (str): Replacement value for the key from - path_to_key - """ - nested_path_to_replace = path_to_key.split("/") - - # our regex looks for a specific number of spaces to ensure correct - # level of nesting. It matches to the end of the line - search_string = ( - " " * nested_level + ".*" + nested_path_to_replace[0] + "(')?(\")?:.*\n" - ) - matches = re.search(search_string, yaml_config[start:]) - - # early return if we haven't found anything - if not matches: - return yaml_config - - # if we're on the last item in the path, we need to get the value and - # replace it in the original file - if len(nested_path_to_replace) == 1: - # replace the current key:value with the new replacement value - match_start = start + matches.start(0) + len(" " * nested_level) - match_end = start + matches.end(0) - yaml_config = ( - yaml_config[:match_start] - + "{}: {}\n".format( - nested_path_to_replace[0], - _get_yaml_replacement_value(replacement_value, nested_level), - ) - + yaml_config[match_end:] - ) - - return yaml_config - - # set new start point to past current match and move on to next match - start = matches.end(0) - nested_level += 1 - del nested_path_to_replace[0] - - return _replace( - yaml_config, - "/".join(nested_path_to_replace), - replacement_value, - start, - nested_level, - ) - - -def from_pyfile(filename, silent=False): - """ - Modeled after flask's ability to load in python files: - https://github.com/pallets/flask/blob/master/flask/config.py - - Some alterations were made but logic is essentially the same - """ - filename = os.path.abspath(filename) - d = types.ModuleType("config") - d.__file__ = filename - try: - with open(filename, mode="rb") as config_file: - exec(compile(config_file.read(), filename, "exec"), d.__dict__) - except IOError as e: - print("Unable to load configuration file ({})".format(e.strerror)) - if silent: - return False - raise - return _from_object(d) - - -def _from_object(obj): - configs = {} - for key in dir(obj): - if key.isupper(): - configs[key] = getattr(obj, key) - return configs - - -def _get_yaml_replacement_value(value, nested_level=0): - if isinstance(value, str): - return "'" + value + "'" - elif isinstance(value, bool): - return str(value).lower() - elif isinstance(value, list) or isinstance(value, set): - output = "" - for item in value: - # spaces for nested level then spaces and hyphen for each list item - output += ( - "\n" - + " " * nested_level - + " - " - + _get_yaml_replacement_value(item) - + "" - ) - return output - else: - return value - - -def _get_nested_value(dictionary, nested_path): - """ - Return a value from a dictionary given a path-like nesting of keys. - - Will default to an empty string if value cannot be found. - - Args: - dictionary (dict): a dictionary - nested_path (str): nested/path/to/key - - Returns: - ?: Value from dict - """ - replacement_value_path = nested_path.split("/") - replacement_value = copy.deepcopy(dictionary) - - for item in replacement_value_path: - replacement_value = replacement_value.get(item, {}) - - if replacement_value == {}: - replacement_value = "" - - return replacement_value - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument( - "-i", - "--creds_file_to_inject", - default="creds.json", - help="creds file to inject into the configuration yaml", - ) - parser.add_argument( - "--other_files_to_inject", - nargs="+", - help="fence_credentials.json, local_settings.py, fence_settings.py file(s) to " - "inject into the configuration yaml", - ) - parser.add_argument( - "-c", "--config_file", default="config.yaml", help="configuration yaml" - ) - args = parser.parse_args() - - inject_creds_into_fence_config(args.creds_file_to_inject, args.config_file) - set_prod_defaults(args.config_file) - - if args.other_files_to_inject: - inject_other_files_into_fence_config( - args.other_files_to_inject, args.config_file - ) \ No newline at end of file + return json.load(reader) \ No newline at end of file diff --git a/pcdc-default-values.yaml b/pcdc-default-values.yaml index 5d64d8e15..6ed845904 100644 --- a/pcdc-default-values.yaml +++ b/pcdc-default-values.yaml @@ -319,7 +319,7 @@ pcdcanalysistools: peregrine: image: repository: quay.io/pcdc/peregrine - tag: "1.3.10" + tag: "1.4.1" portal: #enabled: false From f12e764c64670f0bb088510d7a838ae7b332648f Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Wed, 30 Jul 2025 16:52:29 -0700 Subject: [PATCH 086/126] Fix typos and improve Helm values documentation Corrected a typo in Chart.yaml and improved documentation for the pcdcanalysistools and amanuensis subcharts in values.yaml. Also reorganized and clarified the configuration structure for these subcharts. --- helm/gen3/Chart.yaml | 2 +- helm/gen3/values.yaml | 23 +++++++++-------------- 2 files changed, 10 insertions(+), 15 deletions(-) diff --git a/helm/gen3/Chart.yaml b/helm/gen3/Chart.yaml index 3ebac313c..4672154a2 100644 --- a/helm/gen3/Chart.yaml +++ b/helm/gen3/Chart.yaml @@ -169,7 +169,7 @@ dependencies: # Application charts are a collection of templates that can be packaged into versioned archives # to be deployed. # -# Library charts pxrovide useful utilities or functions for the chart developer. They're included as +# Library charts provide useful utilities or functions for the chart developer. They're included as # a dependency of application charts to inject those utilities and functions into the rendering # pipeline. Library charts do not define any templates and therefore cannot be deployed. type: application diff --git a/helm/gen3/values.yaml b/helm/gen3/values.yaml index e01308ae8..3d99fdf3c 100644 --- a/helm/gen3/values.yaml +++ b/helm/gen3/values.yaml @@ -329,20 +329,6 @@ sheepdog: ssjdispatcher: # -- (bool) Whether to deploy the ssjdispatcher subchart. enabled: false - # -- (map) Docker image information. - image: - # -- (string) The Docker image repository for the ssjdispatcher service. - repository: - # -- (string) Overrides the image tag whose default is the chart appVersion. - tag: - -pcdcanalysistools: - enabled: true - - -amanuensis: - enabled: true - wts: # -- (bool) Whether to deploy the wts subchart. @@ -417,6 +403,15 @@ neuvector: # hostname/service name for our ElasitcSearch instance, used to allow egress from containers ES_HOST: gen3-elasticsearch-master +pcdcanalysistools: + # -- (bool) Whether to deploy the pcdcanalysistools subchart. + enabled: true + +amanuensis: + # -- (bool) Whether to deploy the amanuensis subchart. + enabled: true + + # -- (map) Secret information for External Secrets and DB Secrets. secrets: # -- (str) AWS access key ID. Overrides global key. From 7a28fa52793bc25b907152161c3166a075469b10 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Wed, 30 Jul 2025 17:39:16 -0700 Subject: [PATCH 087/126] Update JWT key handling and config in Fence Helm chart Refactors jwt-keys.yaml to create a Secret only when externalSecrets is not deployed or when createK8sJwtKeysSecret is true. Adds INDEXD_PASSWORD to environment variables, removes optional from yaml-merge volume, and cleans up FENCE_CONFIG formatting. Also sets AMANUENSIS_PUBLIC_KEY_PATH in pcdc-default-values.yaml. --- helm/fence/templates/jwt-keys.yaml | 14 +++++++++----- helm/fence/values.yaml | 13 ++++++++----- pcdc-default-values.yaml | 5 +++-- 3 files changed, 20 insertions(+), 12 deletions(-) diff --git a/helm/fence/templates/jwt-keys.yaml b/helm/fence/templates/jwt-keys.yaml index 322abbf5b..06d10f288 100644 --- a/helm/fence/templates/jwt-keys.yaml +++ b/helm/fence/templates/jwt-keys.yaml @@ -1,5 +1,9 @@ -{{include "common.jwt-key-pair-secret" .}} ---- -{{include "common.jwt_public_key_setup_sa" .}} ---- -{{include "common.create_public_key_job" .}} \ No newline at end of file +{{- if or (not .Values.global.externalSecrets.deploy) (and .Values.global.externalSecrets.deploy .Values.externalSecrets.createK8sJwtKeysSecret) }} +apiVersion: v1 +kind: Secret +metadata: + name: fence-jwt-keys +type: Opaque +data: + jwt_private_key.pem: {{ include "getOrCreatePrivateKey" . }} +{{- end }} \ No newline at end of file diff --git a/helm/fence/values.yaml b/helm/fence/values.yaml index 458d3d7bf..15d580e87 100644 --- a/helm/fence/values.yaml +++ b/helm/fence/values.yaml @@ -374,6 +374,12 @@ env: optional: false - name: DB value: postgresql://$(PGUSER):$(PGPASSWORD)@$(PGHOST):5432/$(PGDB) + - name: INDEXD_PASSWORD + valueFrom: + secretKeyRef: + name: indexd-service-creds + key: fence + optional: true - name: gen3Env valueFrom: configMapKeyRef: @@ -417,7 +423,6 @@ volumes: - name: yaml-merge configMap: name: "fence-yaml-merge" - optional: true - name: amanuensis-jwt-keys secret: secretName: "amanuensis-jwt-keys" @@ -801,8 +806,6 @@ FENCE_CONFIG: # print(key) ENCRYPTION_KEY: REPLACEME - AMANUENSIS_PUBLIC_KEY_PATH: "/amanuensis/jwt_public_key.pem" - # -- (map) Debug and security settings # Modify based on whether you're in a dev environment or in production DEBUG: false @@ -858,7 +861,7 @@ FENCE_CONFIG: OPENID_CONNECT: # any OIDC IDP that does not differ from the generic implementation can be # configured without code changes - generic_oidc_idp: # choose a unique ID and replace this key + generic_oidc_idp: # choose a unique ID and replace this key # -- (str) Optional; display name for this IDP name: "some_idp" # -- (str) Client ID @@ -866,7 +869,7 @@ FENCE_CONFIG: # -- (str) Client secret client_secret: "" # -- (str) Redirect URL for this IDP - redirect_url: "{{BASE_URL}}/login/some_idp/login" # replace IDP name + redirect_url: "{{BASE_URL}}/login/some_idp/login" # replace IDP name # use `discovery` to configure IDPs that do not expose a discovery # endpoint. One of `discovery_url` or `discovery` should be configured # -- (str) URL of the OIDC discovery endpoint for the IDP diff --git a/pcdc-default-values.yaml b/pcdc-default-values.yaml index 6ed845904..b9ae6c1e0 100644 --- a/pcdc-default-values.yaml +++ b/pcdc-default-values.yaml @@ -73,11 +73,12 @@ fence: DEBUG: true MOCK_STORAGE: true #fill in - #AMANUENSIS_PUBLIC_KEY_PATH: '/fence/keys/key/jwt_public_key.pem' + AMANUENSIS_PUBLIC_KEY_PATH: "/amanuensis/jwt_public_key.pem" MOCK_GOOGLE_AUTH: true mock_default_user: "test@example.com" #LOGIN_REDIRECT_WHITELIST: ["https://localhost:9443/", "http://localhost:9443/"] - + podSecurityContext: + {} image: repository: "quay.io/pcdc/fence" tag: "helm-test" From 3f40fddbf5cb25e5b2a5292c4ec97ff6d84a55ec Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Thu, 31 Jul 2025 11:38:13 -0700 Subject: [PATCH 088/126] Refactor pcdcanalysistools Helm chart and secrets handling Major refactor of the pcdcanalysistools Helm chart: updates Chart version to 1.0.0, improves config and secret handling in Python, removes legacy templates, adds new templates for AWS config, network policy, PDB, and secret store, and updates deployment, HPA, and service templates for better configuration and flexibility. Also updates values.yaml with expanded documentation and options, and improves settings.py to use secrets from creds.json. This prepares the chart for more robust, production-ready deployments and simplifies secret management. --- helm/gen3/Chart.yaml | 2 +- helm/pcdcanalysistools/Chart.yaml | 6 +- .../pcdcanalysistools-secret/confighelper.py | 456 +----------------- .../pcdcanalysistools-secret/settings.py | 115 ++--- helm/pcdcanalysistools/templates/NOTES.txt | 2 +- helm/pcdcanalysistools/templates/_helpers.tpl | 24 + .../templates/aws-config.yaml | 3 + .../templates/deployment.yaml | 221 +++------ helm/pcdcanalysistools/templates/hpa.yaml | 40 +- .../templates/netpolicy.yaml | 2 + .../templates/pcdcanalysistools-creds.yaml | 8 - helm/pcdcanalysistools/templates/pdb.yaml | 3 + .../templates/secret-store.yaml | 3 + helm/pcdcanalysistools/templates/service.yaml | 2 +- .../templates/serviceaccount.yaml | 12 - .../templates/tests/test-connection.yaml | 15 - helm/pcdcanalysistools/values.yaml | 290 +++++++---- helm/sheepdog/templates/deployment.yaml | 4 - pcdc-default-values.yaml | 2 +- 19 files changed, 397 insertions(+), 813 deletions(-) create mode 100644 helm/pcdcanalysistools/templates/aws-config.yaml create mode 100644 helm/pcdcanalysistools/templates/netpolicy.yaml delete mode 100644 helm/pcdcanalysistools/templates/pcdcanalysistools-creds.yaml create mode 100644 helm/pcdcanalysistools/templates/pdb.yaml create mode 100644 helm/pcdcanalysistools/templates/secret-store.yaml delete mode 100644 helm/pcdcanalysistools/templates/serviceaccount.yaml delete mode 100644 helm/pcdcanalysistools/templates/tests/test-connection.yaml diff --git a/helm/gen3/Chart.yaml b/helm/gen3/Chart.yaml index b47297195..20a98f085 100644 --- a/helm/gen3/Chart.yaml +++ b/helm/gen3/Chart.yaml @@ -112,7 +112,7 @@ dependencies: repository: "file://../wts" condition: wts.enabled - name: pcdcanalysistools - version: "0.1.0" + version: "1.0.0" repository: "file://../pcdcanalysistools" condition: pcdcanalysistools.enabled - name: amanuensis diff --git a/helm/pcdcanalysistools/Chart.yaml b/helm/pcdcanalysistools/Chart.yaml index 832342e78..7b7fc61af 100644 --- a/helm/pcdcanalysistools/Chart.yaml +++ b/helm/pcdcanalysistools/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 name: pcdcanalysistools -description: A Helm chart for Kubernetes +description: A Helm chart for gen3 pcdcanalysistools Service # A chart can be either an 'application' or a 'library' chart. # @@ -15,13 +15,13 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 +version: 1.0.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "1.16.0" +appVersion: "master" dependencies: - name: common diff --git a/helm/pcdcanalysistools/pcdcanalysistools-secret/confighelper.py b/helm/pcdcanalysistools/pcdcanalysistools-secret/confighelper.py index 869ca25af..05c929e2d 100644 --- a/helm/pcdcanalysistools/pcdcanalysistools-secret/confighelper.py +++ b/helm/pcdcanalysistools/pcdcanalysistools-secret/confighelper.py @@ -1,9 +1,15 @@ +""" +Originally copied from `cloud-automation/apis_configs/config_helper.py` +(renamed `confighelper.py` so it isn't overwritten by the file that cloud-automation +still mounts for backwards compatibility). + +TODO: once everyone has this independent version of PcdcAnalysisTools, remove `wsgi.py` and +`config_helper.py` here: +https://github.com/uc-cdis/cloud-automation/blob/afb750d/kube/services/PcdcAnalysisTools/PcdcAnalysisTools-deploy.yaml#L166-L177 +""" + import json import os -import copy -import argparse -import re -import types # # make it easy to change this for testing @@ -43,444 +49,6 @@ def load_json(file_name, app_name, search_folders=None): """ actual_files = find_paths(file_name, app_name, search_folders) if not actual_files: - return None + return {} with open(actual_files[0], "r") as reader: - return json.load(reader) - - -def inject_creds_into_fence_config(creds_file_path, config_file_path): - creds_file = open(creds_file_path, "r") - creds = json.load(creds_file) - creds_file.close() - - # get secret values from creds.json file - db_host = _get_nested_value(creds, "db_host") - db_username = _get_nested_value(creds, "db_username") - db_password = _get_nested_value(creds, "db_password") - db_database = _get_nested_value(creds, "db_database") - hostname = _get_nested_value(creds, "hostname") - indexd_password = _get_nested_value(creds, "indexd_password") - google_client_secret = _get_nested_value(creds, "google_client_secret") - google_client_id = _get_nested_value(creds, "google_client_id") - hmac_key = _get_nested_value(creds, "hmac_key") - db_path = "postgresql://{}:{}@{}:5432/{}".format( - db_username, db_password, db_host, db_database - ) - - config_file = open(config_file_path, "r").read() - - print(" DB injected with value(s) from creds.json") - config_file = _replace(config_file, "DB", db_path) - - print(" BASE_URL injected with value(s) from creds.json") - config_file = _replace(config_file, "BASE_URL", "https://{}/user".format(hostname)) - - print(" INDEXD_PASSWORD injected with value(s) from creds.json") - config_file = _replace(config_file, "INDEXD_PASSWORD", indexd_password) - config_file = _replace(config_file, "INDEXD_USERNAME", "fence") - - print(" ENCRYPTION_KEY injected with value(s) from creds.json") - config_file = _replace(config_file, "ENCRYPTION_KEY", hmac_key) - - print( - " OPENID_CONNECT/google/client_secret injected with value(s) " - "from creds.json" - ) - config_file = _replace( - config_file, "OPENID_CONNECT/google/client_secret", google_client_secret - ) - - print(" OPENID_CONNECT/google/client_id injected with value(s) from creds.json") - config_file = _replace( - config_file, "OPENID_CONNECT/google/client_id", google_client_id - ) - - open(config_file_path, "w+").write(config_file) - -def inject_creds_into_amanuensis_config(creds_file_path, config_file_path): - creds_file = open(creds_file_path, "r") - creds = json.load(creds_file) - creds_file.close() - - # get secret values from creds.json file - db_host = _get_nested_value(creds, "db_host") - db_username = _get_nested_value(creds, "db_username") - db_password = _get_nested_value(creds, "db_password") - db_database = _get_nested_value(creds, "db_database") - hostname = _get_nested_value(creds, "hostname") - data_delivery_bucket = _get_nested_value(creds, "data_delivery_bucket") - data_delivery_bucket_aws_key_id = _get_nested_value(creds, "data_delivery_bucket_aws_key_id") - data_delivery_bucket_aws_access_key = _get_nested_value(creds, "data_delivery_bucket_aws_access_key") - csl_key = _get_nested_value(creds, "csl_key") - - db_path = "postgresql://{}:{}@{}:5432/{}".format( - db_username, db_password, db_host, db_database - ) - - config_file = open(config_file_path, "r").read() - - print(" DB injected with value(s) from creds.json") - config_file = _replace(config_file, "DB", db_path) - - print(" BASE_URL injected with value(s) from creds.json") - config_file = _replace(config_file, "BASE_URL", "https://{}/amanuensis".format(hostname)) - - print(" HOSTNAME injected with value(s) from creds.json") - config_file = _replace(config_file, "HOSTNAME", "{}".format(hostname)) - - print(" AWS_CREDENTIALS/DATA_DELIVERY_S3_BUCKET/aws_access_key_id injected with value(s) from creds.json") - config_file = _replace( - config_file, "AWS_CREDENTIALS/DATA_DELIVERY_S3_BUCKET/aws_access_key_id", data_delivery_bucket_aws_key_id - ) - - print(" AWS_CREDENTIALS/DATA_DELIVERY_S3_BUCKET/aws_secret_access_key injected with value(s) from creds.json") - config_file = _replace( - config_file, "AWS_CREDENTIALS/DATA_DELIVERY_S3_BUCKET/aws_secret_access_key", data_delivery_bucket_aws_access_key - ) - - print(" AWS_CREDENTIALS/DATA_DELIVERY_S3_BUCKET/bucket_name injected with value(s) from creds.json") - config_file = _replace( - config_file, "AWS_CREDENTIALS/DATA_DELIVERY_S3_BUCKET/bucket_name", data_delivery_bucket - ) - - print(" CSL_KEY injected with value(s) from creds.json") - config_file = _replace( - config_file, "CSL_KEY", csl_key - ) - - # modify USER_API to http://user-service/ if hostname is localhost - - if hostname == "localhost": - print(" USER_API set to http://fence-service/") - config_file = _replace(config_file, "USER_API", "http://fence-service/") - # print(" ENCRYPTION_KEY injected with value(s) from creds.json") - # config_file = _replace(config_file, "ENCRYPTION_KEY", hmac_key) - - - open(config_file_path, "w+").write(config_file) - - -def set_prod_defaults(config_file_path): - config_file = open(config_file_path, "r").read() - - print( - " CIRRUS_CFG/GOOGLE_APPLICATION_CREDENTIALS set as " - "var/www/fence/fence_google_app_creds_secret.json" - ) - config_file = _replace( - config_file, - "CIRRUS_CFG/GOOGLE_APPLICATION_CREDENTIALS", - "/var/www/fence/fence_google_app_creds_secret.json", - ) - - print( - " CIRRUS_CFG/GOOGLE_STORAGE_CREDS set as " - "var/www/fence/fence_google_storage_creds_secret.json" - ) - config_file = _replace( - config_file, - "CIRRUS_CFG/GOOGLE_STORAGE_CREDS", - "/var/www/fence/fence_google_storage_creds_secret.json", - ) - - print(" INDEXD set as http://indexd-service/") - config_file = _replace(config_file, "INDEXD", "http://indexd-service/") - - print(" ARBORIST set as http://arborist-service/") - config_file = _replace(config_file, "ARBORIST", "http://arborist-service/") - - print(" HTTP_PROXY/host set as cloud-proxy.internal.io") - config_file = _replace(config_file, "HTTP_PROXY/host", "cloud-proxy.internal.io") - - print(" HTTP_PROXY/port set as 3128") - config_file = _replace(config_file, "HTTP_PROXY/port", 3128) - - print(" DEBUG set to false") - config_file = _replace(config_file, "DEBUG", False) - - print(" MOCK_AUTH set to false") - config_file = _replace(config_file, "MOCK_AUTH", False) - - print(" MOCK_GOOGLE_AUTH set to false") - config_file = _replace(config_file, "MOCK_GOOGLE_AUTH", False) - - print(" AUTHLIB_INSECURE_TRANSPORT set to true") - config_file = _replace(config_file, "AUTHLIB_INSECURE_TRANSPORT", True) - - print(" SESSION_COOKIE_SECURE set to true") - config_file = _replace(config_file, "SESSION_COOKIE_SECURE", True) - - print(" ENABLE_CSRF_PROTECTION set to true") - config_file = _replace(config_file, "ENABLE_CSRF_PROTECTION", True) - - open(config_file_path, "w+").write(config_file) - -def set_prod_defaults_amanuensis(config_file_path): - config_file = open(config_file_path, "r").read() - - print(" INDEXD set as http://indexd-service/") - config_file = _replace(config_file, "INDEXD", "http://indexd-service/") - - print(" ARBORIST set as http://arborist-service/") - config_file = _replace(config_file, "ARBORIST", "http://arborist-service/") - - print(" HTTP_PROXY/host set as cloud-proxy.internal.io") - config_file = _replace(config_file, "HTTP_PROXY/host", "cloud-proxy.internal.io") - - print(" HTTP_PROXY/port set as 3128") - config_file = _replace(config_file, "HTTP_PROXY/port", 3128) - - print(" DEBUG set to false") - config_file = _replace(config_file, "DEBUG", False) - - print(" MOCK_AUTH set to false") - config_file = _replace(config_file, "MOCK_AUTH", False) - - print(" MOCK_GOOGLE_AUTH set to false") - config_file = _replace(config_file, "MOCK_GOOGLE_AUTH", False) - - print(" AUTHLIB_INSECURE_TRANSPORT set to true") - config_file = _replace(config_file, "AUTHLIB_INSECURE_TRANSPORT", True) - - print(" SESSION_COOKIE_SECURE set to true") - config_file = _replace(config_file, "SESSION_COOKIE_SECURE", True) - - print(" ENABLE_CSRF_PROTECTION set to true") - config_file = _replace(config_file, "ENABLE_CSRF_PROTECTION", True) - - open(config_file_path, "w+").write(config_file) - -def inject_other_files_into_fence_config(other_files, config_file_path): - additional_cfgs = _get_all_additional_configs(other_files) - - config_file = open(config_file_path, "r").read() - - for key, value in additional_cfgs.iteritems(): - print(" {} set to {}".format(key, value)) - config_file = _nested_replace(config_file, key, value) - - open(config_file_path, "w+").write(config_file) - - -def _get_all_additional_configs(other_files): - """ - Attempt to parse given list of files and extract configuration variables and values - """ - additional_configs = dict() - for file_path in other_files: - try: - file_ext = file_path.strip().split(".")[-1] - if file_ext == "json": - json_file = open(file_path, "r") - configs = json.load(json_file) - json_file.close() - elif file_ext == "py": - configs = from_pyfile(file_path) - else: - print( - "Cannot load config vars from a file with extention: {}".format( - file_ext - ) - ) - except Exception as exc: - # if there's any issue reading the file, exit - print( - "Error reading {}. Cannot get configuration. Skipping this file. " - "Details: {}".format(other_files, str(exc)) - ) - continue - - if configs: - additional_configs.update(configs) - - return additional_configs - - -def _nested_replace(config_file, key, value, replacement_path=None): - replacement_path = replacement_path or key - try: - for inner_key, inner_value in value.iteritems(): - temp_path = replacement_path - temp_path = temp_path + "/" + inner_key - config_file = _nested_replace( - config_file, inner_key, inner_value, temp_path - ) - except AttributeError: - # not a dict so replace - if value is not None: - config_file = _replace(config_file, replacement_path, value) - - return config_file - - -def _replace(yaml_config, path_to_key, replacement_value, start=0, nested_level=0, key_only=False): - """ - Replace a nested value in a YAML file string with the given value without - losing comments. Uses a regex to do the replacement. - - Args: - yaml_config (str): a string representing a full configuration file - path_to_key (str): nested/path/to/key. The value of this key will be - replaced - replacement_value (str): Replacement value for the key from - path_to_key - """ - nested_path_to_replace = path_to_key.split("/") - - # our regex looks for a specific number of spaces to ensure correct - # level of nesting. It matches to the end of the line - search_string = ( - " " * nested_level + ".*" + nested_path_to_replace[0] + "(')?(\")?:.*\n" - ) - matches = re.search(search_string, yaml_config[start:]) - - # early return if we haven't found anything - if not matches: - return yaml_config - - # if we're on the last item in the path, we need to get the value and - # replace it in the original file - if len(nested_path_to_replace) == 1: - # replace the current key:value with the new replacement value - match_start = start + matches.start(0) + len(" " * nested_level) - match_end = start + matches.end(0) - if not key_only: - yaml_config = ( - yaml_config[:match_start] - + "{}: {}\n".format( - nested_path_to_replace[0], - _get_yaml_replacement_value(replacement_value, nested_level), - ) - + yaml_config[match_end:] - ) - else: - yaml_config = ( - yaml_config[:match_start] - + "{}:\n".format( - _get_yaml_replacement_value(replacement_value, nested_level), - ) - + yaml_config[match_end:] - ) - - return yaml_config - - # set new start point to past current match and move on to next match - start = start + matches.end(0) - nested_level += 1 - del nested_path_to_replace[0] - - return _replace( - yaml_config, - "/".join(nested_path_to_replace), - replacement_value, - start, - nested_level, - key_only=key_only, - ) - - -def from_pyfile(filename, silent=False): - """ - Modeled after flask's ability to load in python files: - https://github.com/pallets/flask/blob/master/flask/config.py - - Some alterations were made but logic is essentially the same - """ - filename = os.path.abspath(filename) - d = types.ModuleType("config") - d.__file__ = filename - try: - with open(filename, mode="rb") as config_file: - exec(compile(config_file.read(), filename, "exec"), d.__dict__) - except IOError as e: - print("Unable to load configuration file ({})".format(e.strerror)) - if silent: - return False - raise - return _from_object(d) - - -def _from_object(obj): - configs = {} - for key in dir(obj): - if key.isupper(): - configs[key] = getattr(obj, key) - return configs - - -def _get_yaml_replacement_value(value, nested_level=0): - if isinstance(value, str): - return "'" + value + "'" - elif isinstance(value, bool): - return str(value).lower() - elif isinstance(value, list) or isinstance(value, set): - output = "" - for item in value: - # spaces for nested level then spaces and hyphen for each list item - output += ( - "\n" - + " " * nested_level - + " - " - + _get_yaml_replacement_value(item) - + "" - ) - return output - else: - return value - - -def _get_nested_value(dictionary, nested_path): - """ - Return a value from a dictionary given a path-like nesting of keys. - - Will default to an empty string if value cannot be found. - - Args: - dictionary (dict): a dictionary - nested_path (str): nested/path/to/key - - Returns: - ?: Value from dict - """ - replacement_value_path = nested_path.split("/") - replacement_value = copy.deepcopy(dictionary) - - for item in replacement_value_path: - replacement_value = replacement_value.get(item, {}) - - if replacement_value == {}: - replacement_value = "" - - return replacement_value - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument( - "-i", - "--creds_file_to_inject", - default="creds.json", - help="creds file to inject into the configuration yaml", - ) - parser.add_argument( - "--other_files_to_inject", - nargs="+", - help="fence_credentials.json, local_settings.py, fence_settings.py file(s) to " - "inject into the configuration yaml", - ) - parser.add_argument( - "-c", "--config_file", default="config.yaml", help="configuration yaml" - ) - args = parser.parse_args() - - if args.config_file == "new-amanuensis-config.yaml": - inject_creds_into_amanuensis_config(args.creds_file_to_inject, args.config_file) - set_prod_defaults_amanuensis(args.config_file) - else: - inject_creds_into_fence_config(args.creds_file_to_inject, args.config_file) - set_prod_defaults(args.config_file) - - if args.other_files_to_inject: - inject_other_files_into_fence_config( - args.other_files_to_inject, args.config_file - ) + return json.load(reader) \ No newline at end of file diff --git a/helm/pcdcanalysistools/pcdcanalysistools-secret/settings.py b/helm/pcdcanalysistools/pcdcanalysistools-secret/settings.py index 75efccd80..6a3be935d 100644 --- a/helm/pcdcanalysistools/pcdcanalysistools-secret/settings.py +++ b/helm/pcdcanalysistools/pcdcanalysistools-secret/settings.py @@ -1,81 +1,61 @@ from PcdcAnalysisTools.api import app, app_init from os import environ -#import config_helper +import bin.confighelper as confighelper from pcdcutils.environment import is_env_enabled APP_NAME='PcdcAnalysisTools' -# def load_json(file_name): -# return config_helper.load_json(file_name, APP_NAME) +def load_json(file_name): + return confighelper.load_json(file_name, APP_NAME) -# conf_data = load_json("creds.json") +conf_data = load_json("creds.json") config = app.config -config['SERVICE_NAME'] = 'pcdcanalysistools' -config['PRIVATE_KEY_PATH'] = "/var/www/PcdcAnalysisTools/jwt_private_key.pem" - -config["AUTH"] = "https://auth.service.consul:5000/v3/" -config["AUTH_ADMIN_CREDS"] = None -config["INTERNAL_AUTH"] = None - # ARBORIST deprecated, replaced by ARBORIST_URL # ARBORIST_URL is initialized in app_init() directly config["ARBORIST"] = "http://arborist-service/" -# Signpost: deprecated, replaced by index client. -config["SIGNPOST"] = { - "host": environ.get("SIGNPOST_HOST") or "http://indexd-service", - "version": "v0", - "auth": ("gdcapi", environ.get( "INDEXD_PASS") ), -} config["INDEX_CLIENT"] = { "host": environ.get("INDEX_CLIENT_HOST") or "http://indexd-service", "version": "v0", - "auth": ("gdcapi", environ.get( "INDEXD_PASS") ), + # The user should be "sheepdog", but for legacy reasons, we use "gdcapi" instead + "auth": ( + ( + environ.get("INDEXD_USER", "gdcapi"), + environ.get("INDEXD_PASS") + or conf_data.get("indexd_password", "{{indexd_password}}"), + ) + ), } -config["FAKE_AUTH"] = False + config["PSQLGRAPH"] = { - 'host': environ.get( "PGHOST"), - 'user': environ.get( "PGUSER"), - 'password': environ.get( "PGPASSWORD"), - 'database': environ.get( "PGDB"), + "host": conf_data.get("db_host", environ.get("PGHOST", "localhost")), + "user": conf_data.get("db_username", environ.get("PGUSER", "sheepdog")), + "password": conf_data.get("db_password", environ.get("PGPASSWORD", "sheepdog")), + "database": conf_data.get("db_database", environ.get("PGDB", "sheepdog")), } -# config["HMAC_ENCRYPTION_KEY"] = conf_data.get("hmac_key", "{{hmac_key}}") -# config["FLASK_SECRET_KEY"] = conf_data.get("gdcapi_secret_key", "{{gdcapi_secret_key}}") -config["HMAC_ENCRYPTION_KEY"] = environ.get( "HMAC_ENCRYPTION_KEY") -config["FLASK_SECRET_KEY"] = environ.get( "FLASK_SECRET_KEY") -fence_username = environ.get( "FENCE_DB_USER") -fence_password = environ.get( "FENCE_DB_PASS") -fence_host = environ.get( "FENCE_DB_HOST") -fence_database = environ.get( "FENCE_DB_DBNAME") -config['PSQL_USER_DB_CONNECTION'] = 'postgresql://%s:%s@%s:5432/%s' % (fence_username, fence_password, fence_host, fence_database) - -hostname = environ.get("CONF_HOSTNAME", "localhost") -config['OIDC_ISSUER'] = 'https://%s/user' % hostname - -config["OAUTH2"] = { - "client_id": 'conf_data.get("oauth2_client_id", "{{oauth2_client_id}}")', - "client_secret": 'conf_data.get("oauth2_client_secret", "{{oauth2_client_secret}}")', - "api_base_url": "https://%s/user/" % hostname, - "authorize_url": "https://%s/user/oauth2/authorize" % hostname, - "access_token_url": "https://%s/user/oauth2/token" % hostname, - "refresh_token_url": "https://%s/user/oauth2/token" % hostname, - "client_kwargs": { - "redirect_uri": "https://%s/api/v0/oauth2/authorize" % hostname, - "scope": "openid data user", - }, - # deprecated key values, should be removed after all commons use new oidc - "internal_oauth_provider": "http://fence-service/oauth2/", - "oauth_provider": "https://%s/user/oauth2/" % hostname, - "redirect_uri": "https://%s/api/v0/oauth2/authorize" % hostname, -} - -# trailing slash intentionally omitted -config['GUPPY_API'] = 'http://guppy-service' - -config["USER_API"] = 'http://fence-service/' # for use by authutils -config["FENCE"] = 'http://fence-service' +config["FLASK_SECRET_KEY"] = conf_data.get("gdcapi_secret_key", "{{gdcapi_secret_key}}") +fence_username = conf_data.get( + "fence_username", environ.get("FENCE_DB_USER", "fence") +) +fence_password = conf_data.get( + "fence_password", environ.get("FENCE_DB_PASS", "fence") +) +fence_host = conf_data.get("fence_host", environ.get("FENCE_DB_HOST", "localhost")) +fence_database = conf_data.get( + "fence_database", environ.get("FENCE_DB_DATABASE", "fence") +) +config["PSQL_USER_DB_CONNECTION"] = "postgresql://%s:%s@%s:5432/%s" % ( + fence_username, + fence_password, + fence_host, + fence_database, +) + +config["USER_API"] = "https://%s/user" % conf_data.get( + "hostname", environ.get("CONF_HOSTNAME", "localhost") +) # for use by authutils # config['USER_API'] = 'http://fence-service/' # option to force authutils to prioritize USER_API setting over the issuer from @@ -83,10 +63,21 @@ # services are on different containers but the hostname is still localhost config['FORCE_ISSUER'] = True -if environ.get('DICTIONARY_URL'): - config['DICTIONARY_URL'] = environ.get('DICTIONARY_URL') -else: - config['PATH_TO_SCHEMA_DIR'] = environ.get('PATH_TO_SCHEMA_DIR') +config["DICTIONARY_URL"] = environ.get( + "DICTIONARY_URL", + "https://s3.amazonaws.com/dictionary-artifacts/datadictionary/develop/schema.json", +) + +# trailing slash intentionally omitted +config['GUPPY_API'] = 'http://guppy-service' +config["FENCE"] = 'http://fence-service' +config['SERVICE_NAME'] = 'pcdcanalysistools' +config['PRIVATE_KEY_PATH'] = "/var/www/PcdcAnalysisTools/jwt_private_key.pem" +config["AUTH"] = "https://auth.service.consul:5000/v3/" +config["AUTH_ADMIN_CREDS"] = None +config["INTERNAL_AUTH"] = None +config["FAKE_AUTH"] = False +config["HMAC_ENCRYPTION_KEY"] = conf_data.get("hmac_key", environ.get("HMAC_ENCRYPTION_KEY")) config['SURVIVAL'] = { 'consortium': ["INSTRuCT", "INRG", "MaGIC", "NODAL"], diff --git a/helm/pcdcanalysistools/templates/NOTES.txt b/helm/pcdcanalysistools/templates/NOTES.txt index 70b82c54c..c1e7e1aef 100644 --- a/helm/pcdcanalysistools/templates/NOTES.txt +++ b/helm/pcdcanalysistools/templates/NOTES.txt @@ -1 +1 @@ -{{ .Chart.Name }} has been deployed \ No newline at end of file +{{ .Chart.Name }} has been deployed successfully. diff --git a/helm/pcdcanalysistools/templates/_helpers.tpl b/helm/pcdcanalysistools/templates/_helpers.tpl index 6b4035d8d..7413ac498 100644 --- a/helm/pcdcanalysistools/templates/_helpers.tpl +++ b/helm/pcdcanalysistools/templates/_helpers.tpl @@ -66,3 +66,27 @@ Create the name of the service account to use {{- default "default" .Values.serviceAccount.name }} {{- end }} {{- end }} + + +{{/* + Postgres Password lookup Fence +*/}} +{{- define "fence.postgres.password" -}} +{{- $localpass := (lookup "v1" "Secret" "postgres" "postgres-postgresql" ) -}} +{{- if $localpass }} +{{- default (index $localpass.data "postgres-password" | b64dec) }} +{{- else }} +{{- default .Values.secrets.fence.password }} +{{- end }} +{{- end }} + +# {{/* +# Define dictionaryUrl +# */}} +# {{- define "pcdcanalysistools.dictionaryUrl" -}} +# {{- if .Values.global }} +# {{- .Values.global.dictionaryUrl }} +# {{- else}} +# {{- .Values.dictionaryUrl }} +# {{- end }} +# {{- end }} \ No newline at end of file diff --git a/helm/pcdcanalysistools/templates/aws-config.yaml b/helm/pcdcanalysistools/templates/aws-config.yaml new file mode 100644 index 000000000..a49fb9f1b --- /dev/null +++ b/helm/pcdcanalysistools/templates/aws-config.yaml @@ -0,0 +1,3 @@ +{{- if or (.Values.secrets.awsSecretAccessKey) (.Values.global.aws.awsSecretAccessKey) (.Values.global.aws.externalSecrets.enabled) }} +{{ include "common.awsconfig" . }} +{{- end -}} \ No newline at end of file diff --git a/helm/pcdcanalysistools/templates/deployment.yaml b/helm/pcdcanalysistools/templates/deployment.yaml index 0c3937ac2..db06e7753 100644 --- a/helm/pcdcanalysistools/templates/deployment.yaml +++ b/helm/pcdcanalysistools/templates/deployment.yaml @@ -2,11 +2,12 @@ apiVersion: apps/v1 kind: Deployment metadata: name: pcdcanalysistools-deployment + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} labels: {{- include "pcdcanalysistools.labels" . | nindent 4 }} - {{- if .Values.global.ddEnabled }} - {{- include "common.datadogLabels" . | nindent 4 }} - {{- end }} spec: {{- if not .Values.autoscaling.enabled }} replicas: {{ .Values.replicaCount }} @@ -14,156 +15,42 @@ spec: selector: matchLabels: {{- include "pcdcanalysistools.selectorLabels" . | nindent 6 }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + {{- with .Values.strategy }} + strategy: + {{- toYaml . | nindent 4 }} + {{- end }} template: metadata: - {{- with .Values.podAnnotations }} - annotations: - {{- toYaml . | nindent 8 }} - {{- end }} labels: + # gen3 networkpolicy labels + public: "yes" + netnolimit: "yes" + s3: "yes" {{- include "pcdcanalysistools.selectorLabels" . | nindent 8 }} - {{- if .Values.global.ddEnabled }} - {{- include "common.datadogLabels" . | nindent 8 }} + {{- include "common.extraLabels" . | nindent 8 }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/pcdcanalysistools-secret.yaml") . | sha256sum }} + {{- if coalesce .Values.metricsEnabled .Values.global.metricsEnabled true }} + {{- include "common.grafanaAnnotations" . | nindent 8 }} {{- end }} spec: - volumes: - - name: config-volume - secret: - secretName: "pcdcanalysistools-secret" - - name: pcdcanalysistools-jwt-keys - secret: - secretName: "pcdcanalysistools-jwt-keys" - items: - - key: jwt_private_key.pem - path: jwt_private_key.pem - - name: config-helper - secret: - secretName: "pcdcanalysistools-secret" - - name: creds-volume - secret: - secretName: "sheepdog-creds" - - - {{- with .Values.imagePullSecrets }} - imagePullSecrets: + {{- with .Values.affinity }} + affinity: {{- toYaml . | nindent 8 }} {{- end }} - serviceAccountName: {{ include "pcdcanalysistools.serviceAccountName" . }} - securityContext: - {{- toYaml .Values.podSecurityContext | nindent 8 }} + automountServiceAccountToken: {{ .Values.automountServiceAccountToken }} + {{- with .Values.volumes }} + volumes: + {{- toYaml . | nindent 10 }} + {{- end }} containers: - - name: {{ .Chart.Name }} - securityContext: - {{- toYaml .Values.securityContext | nindent 12 }} + - name: pcdcanalysistools image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} - env: - - name: CONF_HOSTNAME - value: {{ .Values.global.hostname }} - - name: FENCE_DB_USER - valueFrom: - secretKeyRef: - name: fence-dbcreds - key: username - optional: false - - name: FENCE_DB_PASS - valueFrom: - secretKeyRef: - name: fence-dbcreds - key: password - optional: false - - name: FENCE_DB_HOST - valueFrom: - secretKeyRef: - name: fence-dbcreds - key: host - optional: false - - name: FENCE_DB_DBNAME - valueFrom: - secretKeyRef: - name: fence-dbcreds - key: database - optional: false - - name: DICTIONARY_URL - valueFrom: - configMapKeyRef: - name: manifest-global - key: dictionary_url - - name: SIGNPOST_HOST - valueFrom: - configMapKeyRef: - name: manifest-global - key: indexd_url - optional: true - - name: PGPASSWORD - valueFrom: - secretKeyRef: - name: sheepdog-dbcreds - key: password - optional: false - - name: PGDB - valueFrom: - secretKeyRef: - name: sheepdog-dbcreds - key: database - optional: false - - name: PGUSER - valueFrom: - secretKeyRef: - name: sheepdog-dbcreds - key: username - optional: false - - name: PGHOST - valueFrom: - secretKeyRef: - name: sheepdog-dbcreds - key: host - optional: false - - name: INDEX_CLIENT_HOST - valueFrom: - configMapKeyRef: - name: manifest-global - key: indexd_url - optional: true - - name: FENCE_URL - valueFrom: - configMapKeyRef: - name: manifest-global - key: fence_url - optional: true - - name: ARBORIST_URL - value: http://arborist-service - - name: AUTH_NAMESPACE - value: default - - name: REQUESTS_CA_BUNDLE - value: /etc/ssl/certs/ca-certificates.crt - - name: GEN3_DEBUG - value: "False" - volumeMounts: - - name: "config-volume" - readOnly: true - mountPath: "/var/www/PcdcAnalysisTools/wsgi.py" - subPath: "settings.py" - - name: "config-volume" - readOnly: true - mountPath: "/PcdcAnalysisTools/bin/settings.py" - subPath: "settings.py" - - name: "pcdcanalysistools-jwt-keys" - readOnly: true - mountPath: "/var/www/PcdcAnalysisTools/jwt_private_key.pem" - subPath: "jwt_private_key.pem" - - name: "creds-volume" - readOnly: true - mountPath: "/var/www/PcdcAnalysisTools/creds.json" - subPath: creds.json - - name: "config-helper" - readOnly: true - mountPath: "/PcdcAnalysisTools/bin/config_helper.py" - subPath: confighelper.py ports: - - name: http - containerPort: 80 - protocol: TCP + - containerPort: 80 + - containerPort: 443 livenessProbe: httpGet: path: /_status?timeout=20 @@ -172,20 +59,46 @@ spec: periodSeconds: 60 timeoutSeconds: 30 readinessProbe: + initialDelaySeconds: 30 httpGet: path: /_status?timeout=2 - port: http + port: 80 + env: + - name: CONF_HOSTNAME + value: {{ .Values.global.hostname }} + - name: DICTIONARY_URL + value: {{ default "https://s3.amazonaws.com/dictionary-artifacts/datadictionary/develop/schema.json" .Values.global.dictionaryUrl | quote }} + - name: INDEX_CLIENT_HOST + value: {{ default "http://indexd-service" .Values.global.indexdURL | quote }} + {{- if eq .Values.global.dev false }} + - name: FENCE_URL + value: https://{{ .Values.global.hostname }}/user + {{- else }} + - name: FENCE_URL + value: {{ default "http://fence-service" .Values.global.fenceURL | quote }} + {{- end }} + - name: ARBORIST_URL + value: {{ default "http://arborist-service" .Values.global.arboristURL | quote }} + {{- with .Values.authNamespace }} + - name: AUTH_NAMESPACE + value: {{ . }} + {{- end }} + # - name: REQUESTS_CA_BUNDLE + # # + # # override python 'requests' SSL certificate bundle + # # to use system trusted certs + # # which includes our private certificate authority + # # + # value: /etc/ssl/certs/ca-certificates.crt + - name: GEN3_DEBUG + value: "True" + {{- with .Values.volumeMounts }} + volumeMounts: + {{- toYaml . | nindent 10 }} + {{- end }} resources: {{- toYaml .Values.resources | nindent 12 }} - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} \ No newline at end of file + # command: ["/bin/bash" ] + # args: + # - "-c" + # - "sleep infinity" \ No newline at end of file diff --git a/helm/pcdcanalysistools/templates/hpa.yaml b/helm/pcdcanalysistools/templates/hpa.yaml index ddc5e5225..94f298246 100644 --- a/helm/pcdcanalysistools/templates/hpa.yaml +++ b/helm/pcdcanalysistools/templates/hpa.yaml @@ -1,32 +1,32 @@ -{{- if .Values.autoscaling.enabled }} +{{- if default .Values.global.autoscaling.enabled .Values.autoscaling.enabled }} apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler metadata: - name: {{ include "pcdcanalysistools.fullname" . }} + name: pcdcanalysistools-deployment labels: {{- include "pcdcanalysistools.labels" . | nindent 4 }} spec: scaleTargetRef: apiVersion: apps/v1 kind: Deployment - name: {{ include "pcdcanalysistools.fullname" . }} - minReplicas: {{ .Values.autoscaling.minReplicas }} - maxReplicas: {{ .Values.autoscaling.maxReplicas }} + name: pcdcanalysistools-deployment + minReplicas: {{ default .Values.global.autoscaling.minReplicas .Values.autoscaling.minReplicas }} + maxReplicas: {{ default .Values.global.autoscaling.maxReplicas .Values.autoscaling.maxReplicas }} metrics: - {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- if default .Values.global.autoscaling.targetCPUUtilizationPercentage .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ default .Values.global.autoscaling.targetCPUUtilizationPercentage .Values.autoscaling.targetCPUUtilizationPercentage}} {{- end }} - {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} - - type: Resource - resource: - name: memory - target: - type: Utilization - averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- if default .Values.global.autoscaling.targetMemoryUtilizationPercentage .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + averageUtilization: {{ default .Values.global.autoscaling.targetMemoryUtilizationPercentage .Values.autoscaling.targetMemoryUtilizationPercentage }} + type: Utilization {{- end }} -{{- end }} +{{- end }} \ No newline at end of file diff --git a/helm/pcdcanalysistools/templates/netpolicy.yaml b/helm/pcdcanalysistools/templates/netpolicy.yaml new file mode 100644 index 000000000..2fb372551 --- /dev/null +++ b/helm/pcdcanalysistools/templates/netpolicy.yaml @@ -0,0 +1,2 @@ +#I believe this allows a service to connect to a DB but not sure if pcdcanalysistools needs to be able to connect to the DB +{{ include "common.db_netpolicy" . }} \ No newline at end of file diff --git a/helm/pcdcanalysistools/templates/pcdcanalysistools-creds.yaml b/helm/pcdcanalysistools/templates/pcdcanalysistools-creds.yaml deleted file mode 100644 index 6d9b6a53a..000000000 --- a/helm/pcdcanalysistools/templates/pcdcanalysistools-creds.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: pcdcanalysistools-creds -type: Opaque -stringData: - creds.json: |- - "" \ No newline at end of file diff --git a/helm/pcdcanalysistools/templates/pdb.yaml b/helm/pcdcanalysistools/templates/pdb.yaml new file mode 100644 index 000000000..2ef2de13d --- /dev/null +++ b/helm/pcdcanalysistools/templates/pdb.yaml @@ -0,0 +1,3 @@ +{{- if and .Values.global.pdb (gt (int .Values.replicaCount) 1) }} +{{ include "common.pod_disruption_budget" . }} +{{- end }} \ No newline at end of file diff --git a/helm/pcdcanalysistools/templates/secret-store.yaml b/helm/pcdcanalysistools/templates/secret-store.yaml new file mode 100644 index 000000000..771c7760d --- /dev/null +++ b/helm/pcdcanalysistools/templates/secret-store.yaml @@ -0,0 +1,3 @@ +{{ if .Values.global.externalSecrets.separateSecretStore }} +{{ include "common.secretstore" . }} +{{- end }} \ No newline at end of file diff --git a/helm/pcdcanalysistools/templates/service.yaml b/helm/pcdcanalysistools/templates/service.yaml index 8fff1ae27..678d878fb 100644 --- a/helm/pcdcanalysistools/templates/service.yaml +++ b/helm/pcdcanalysistools/templates/service.yaml @@ -8,7 +8,7 @@ spec: type: {{ .Values.service.type }} ports: - port: {{ .Values.service.port }} - targetPort: http + targetPort: {{ .Values.service.port }} protocol: TCP name: http selector: diff --git a/helm/pcdcanalysistools/templates/serviceaccount.yaml b/helm/pcdcanalysistools/templates/serviceaccount.yaml deleted file mode 100644 index cf8465e5b..000000000 --- a/helm/pcdcanalysistools/templates/serviceaccount.yaml +++ /dev/null @@ -1,12 +0,0 @@ -{{- if .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "pcdcanalysistools.serviceAccountName" . }} - labels: - {{- include "pcdcanalysistools.labels" . | nindent 4 }} - {{- with .Values.serviceAccount.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -{{- end }} diff --git a/helm/pcdcanalysistools/templates/tests/test-connection.yaml b/helm/pcdcanalysistools/templates/tests/test-connection.yaml deleted file mode 100644 index 12364747b..000000000 --- a/helm/pcdcanalysistools/templates/tests/test-connection.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: "{{ include "pcdcanalysistools.fullname" . }}-test-connection" - labels: - {{- include "pcdcanalysistools.labels" . | nindent 4 }} - annotations: - "helm.sh/hook": test -spec: - containers: - - name: wget - image: busybox - command: ['wget'] - args: ['{{ include "pcdcanalysistools.fullname" . }}:{{ .Values.service.port }}'] - restartPolicy: Never diff --git a/helm/pcdcanalysistools/values.yaml b/helm/pcdcanalysistools/values.yaml index 7bd08e26a..08adb21a0 100644 --- a/helm/pcdcanalysistools/values.yaml +++ b/helm/pcdcanalysistools/values.yaml @@ -1,21 +1,31 @@ # Default values for pcdcanalysistools. # This is a YAML-formatted file. # Declare variables to be passed into your templates. + + +# Global configuration global: # -- (map) AWS configuration aws: # -- (bool) Set to true if deploying to AWS. Controls ingress annotations. enabled: false # -- (string) Credentials for AWS stuff. - awsAccessKeyId: + awsAccessKeyId: # -- (string) Credentials for AWS stuff. - awsSecretAccessKey: + awsSecretAccessKey: + externalSecrets: + # -- (bool) Whether to use External Secrets for aws config. + enabled: false + # -- (String) Name of Secrets Manager secret. + externalSecretAwsCreds: # -- (bool) Whether the deployment is for development purposes. dev: true - # -- (map) Postgres database configuration. + postgres: # -- (bool) Whether the database should be created. dbCreate: true + # -- (string) Name of external secret. Disabled if empty + externalSecret: "" # -- (map) Master credentials to postgres. This is going to be the default postgres server being used for each service, unless each service specifies their own postgres master: # -- (string) hostname of postgres server @@ -26,109 +36,215 @@ global: password: # -- (string) Port for Postgres. port: "5432" + # -- (string) Environment name. This should be the same as vpcname if you're doing an AWS deployment. Currently this is being used to share ALB's if you have multiple namespaces. Might be used other places too. + environment: default + # -- (string) Hostname for the deployment. + hostname: localhost + # -- (string) ARN of the reverse proxy certificate. + revproxyArn: arn:aws:acm:us-east-1:123456:certificate + # -- (string) URL of the data dictionary. + dictionaryUrl: https://s3.amazonaws.com/dictionary-artifacts/datadictionary/develop/schema.json + # -- (string) Portal application name. + portalApp: gitops + # -- (string) S3 bucket name for Kubernetes manifest files. + kubeBucket: kube-gen3 + # -- (string) S3 bucket name for log files. + logsBucket: logs-gen3 + # -- (bool) Whether public datasets are enabled. + publicDataSets: true + # -- (string) Access level for tiers. acceptable values for `tier_access_level` are: `libre`, `regular` and `private`. If omitted, by default common will be treated as `private` + tierAccessLevel: libre + # -- (map) Controls network policy settings + netPolicy: + enabled: false + # -- (int) Number of dispatcher jobs. + dispatcherJobNum: "10" + # -- (bool) If the service will be deployed with a Pod Disruption Budget. Note- you need to have more than 2 replicas for the pdb to be deployed. + pdb: false + # -- (int) The minimum amount of pods that are available at all times if the PDB is deployed. + minAvialable: 1 + # -- (map) External Secrets settings. + externalSecrets: + # -- (bool) Will use ExternalSecret resources to pull secrets from Secrets Manager instead of creating them locally. Be cautious as this will override any pcdcanalysistools secrets you have deployed. + deploy: false + # -- (string) Will deploy a separate External Secret Store for this service. + separateSecretStore: false + # -- (map) This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/ + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 80 + +# -- (map) This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/ +autoscaling: {} + +# -- (bool) Whether Metrics are enabled. +metricsEnabled: true + +# -- (map) External Secrets settings. +externalSecrets: + # -- (string) Will override the name of the aws secrets manager secret. Default is "Values.global.environment-.Chart.Name-creds" + dbcreds: + +# -- (map) Postgres database configuration. If db does not exist in postgres cluster and dbCreate is set ot true then these databases will be created for you +postgres: + # -- (bool) Whether the database should be restored from s3. Default to global.postgres.dbRestore + dbRestore: false + # -- (bool) Whether the database should be created. Default to global.postgres.dbCreate + dbCreate: + # -- (string) Hostname for postgres server. This is a service override, defaults to global.postgres.host + host: + # -- (string) Database name for postgres. This is a service override, defaults to - + database: + # -- (string) Username for postgres. This is a service override, defaults to - + username: + # -- (string) Port for Postgres. + port: "5432" + # -- (string) Password for Postgres. Will be autogenerated if left empty. + password: + # -- (string) Will create a Database for the individual service to help with developing it. + separate: false + +# -- (map) Postgresql subchart settings if deployed separately option is set to "true". +# Disable persistence by default so we can spin up and down ephemeral environments +postgresql: + primary: + persistence: + # -- (bool) Option to persist the dbs data. + enabled: false + +# Deployment +releaseLabel: production +# -- (map) Annotations to add to the pod +podAnnotations: {"gen3.io/network-ingress": "pcdcanalysistools"} + +# -- (int) Number of replicas for the deployment. replicaCount: 1 -# -- (string) URL for the arborist service -arboristUrl: http://arborist-service -authNamespace: default +# -- (int) Number of old revisions to retain +revisionHistoryLimit: 2 +# -- (map) Rolling update deployment strategy +strategy: + type: RollingUpdate + rollingUpdate: + # -- (int) Number of additional replicas to add during rollout. + maxSurge: 1 + # -- (int) Maximum amount of pods that can be unavailable during the update. + maxUnavailable: 0 + +# -- (bool) Whether Datadog is enabled. +dataDog: + enabled: false + env: dev + +# -- (map) Affinity to use for the deployment. +affinity: + podAntiAffinity: + # -- (map) Option for scheduling to be required or preferred. + preferredDuringSchedulingIgnoredDuringExecution: + # -- (int) Weight value for preferred scheduling. + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + # -- (list) Label key for match expression. + - key: app + # -- (string) Operation type for the match expression. + operator: In + # -- (list) Value for the match expression key. + values: + - pcdcanalysistools + # -- (string) Value for topology key label. + topologyKey: "kubernetes.io/hostname" + +# -- (bool) Automount the default service account token +automountServiceAccountToken: false + +# -- (int) pcdcanalysistools transactions take forever - try to let the complete before termination +terminationGracePeriodSeconds: 50 + +# -- (map) Docker image information. image: + # -- (string) Docker repository. repository: quay.io/pcdc/pcdcanalysistools + # -- (string) Docker pull policy. pullPolicy: IfNotPresent - # Overrides the image tag whose default is the chart appVersion. + # -- (string) Overrides the image tag whose default is the chart appVersion. tag: 1.8.4 -# -- (list) Docker image pull secrets. -imagePullSecrets: [] - -# -- (string) Override the name of the chart. -nameOverride: "" - -# -- (string) Override the full name of the deployment. -fullnameOverride: "" - -serviceAccount: - # Specifies whether a service account should be created - create: true - # Annotations to add to the service account - annotations: {} - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - name: "" - -# -- (map) Annotations to add to the pod -podAnnotations: {} - -# -- (map) Security context to apply to the pod -podSecurityContext: {} - # fsGroup: 2000 - -# -- (map) Security context to apply to the container -securityContext: {} - # capabilities: - # drop: - # - ALL - # readOnlyRootFilesystem: true - # runAsNonRoot: true - # runAsUser: 1000 - -service: - # -- (string) Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". - type: ClusterIP - # -- (int) The port number that the service exposes. - port: 80 - - +# Environment Variables +authNamespace: "" +# -- (list) of files to become volumes in the container +volumes: + - name: config-volume + secret: + secretName: "pcdcanalysistools-secret" + - name: pcdcanalysistools-jwt-keys + secret: + secretName: "pcdcanalysistools-jwt-keys" + items: + - key: jwt_private_key.pem + path: jwt_private_key.pem + +# -- (list) Volumes to mount to the container. +volumeMounts: + - name: "config-volume" + readOnly: true + mountPath: "/var/www/PcdcAnalysisTools/wsgi.py" + subPath: "settings.py" + - name: "config-volume" + readOnly: true + mountPath: "PcdcAnalysisTools/bin/settings.py" + subPath: "settings.py" + - name: "config-volume" + readOnly: true + mountPath: "PcdcAnalysisTools/bin/confighelper.py" + subPath: "confighelper.py" + - name: "pcdcanalysistools-jwt-keys" + readOnly: true + mountPath: "/var/www/PcdcAnalysisTools/jwt_private_key.pem" + subPath: "jwt_private_key.pem" + +# -- (map) Resource requests and limits for the containers in the pod resources: # -- (map) The amount of resources that the container requests requests: - # -- (string) The amount of CPU requested - cpu: 0.1 # -- (string) The amount of memory requested memory: 12Mi # -- (map) The maximum amount of resources that the container is allowed to use limits: - # -- (string) The maximum amount of CPU the container can use - cpu: 1.0 # -- (string) The maximum amount of memory the container can use memory: 512Mi -autoscaling: - enabled: false - minReplicas: 1 - maxReplicas: 100 - targetCPUUtilizationPercentage: 80 - # targetMemoryUtilizationPercentage: 80 - -nodeSelector: {} - -tolerations: [] - -affinity: {} - -volumes: -- name: config-volume - secret: - secretName: "pcdcanalysistools-secret" +# Service and Pod +# -- (map) Kubernetes service information. +service: + # -- (string) Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". + type: ClusterIP + # -- (int) The port number that the service exposes. + port: 80 +# Secrets +# -- (map) Values for pcdcanalysistools secret. +secrets: + # -- (str) AWS access key ID to access the db restore job S3 bucket. Overrides global key. + awsAccessKeyId: + # -- (str) AWS secret access key ID to access the db restore job S3 bucket. Overrides global key. + awsSecretAccessKey: + +# Values to determine the labels that are used for the deployment, pod, etc. +# -- (string) Valid options are "production" or "dev". If invalid option is set- the value will default to "dev". +release: "production" +# -- (string) Valid options are "true" or "false". If invalid option is set- the value will default to "false". +criticalService: "true" +# -- (string) Label to help organize pods and their use. Any value is valid, but use "_" or "-" to divide words. +partOf: "Core-Service" # -- (map) Will completely override the selectorLabels defined in the common chart's _label_setup.tpl -selectorLabels: - - -# ingress: -# enabled: false -# className: "" -# annotations: {} -# # kubernetes.io/ingress.class: nginx -# # kubernetes.io/tls-acme: "true" -# hosts: -# - host: chart-example.local -# paths: -# - path: / -# pathType: ImplementationSpecific -# tls: [] -# # - secretName: chart-example-tls -# # hosts: -# # - chart-example.local \ No newline at end of file +selectorLabels: +# -- (map) Will completely override the commonLabels defined in the common chart's _label_setup.tpl +commonLabels: \ No newline at end of file diff --git a/helm/sheepdog/templates/deployment.yaml b/helm/sheepdog/templates/deployment.yaml index d1b39e4da..3b241de6b 100644 --- a/helm/sheepdog/templates/deployment.yaml +++ b/helm/sheepdog/templates/deployment.yaml @@ -125,10 +125,6 @@ spec: httpGet: path: /_status?timeout=2 port: 80 - # command: ["/bin/bash" ] - # args: - # - "-c" - # - "sleep infinity" env: - name: CONF_HOSTNAME value: {{ .Values.global.hostname }} diff --git a/pcdc-default-values.yaml b/pcdc-default-values.yaml index b9ae6c1e0..f25be583e 100644 --- a/pcdc-default-values.yaml +++ b/pcdc-default-values.yaml @@ -315,7 +315,7 @@ pcdcanalysistools: enabled: true image: repository: quay.io/pcdc/pcdcanalysistools - tag: "1.10.0" + tag: "1.10.1" peregrine: image: From 407d0d5d4abc013fc757514fc423c0f9d82f49be Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Thu, 31 Jul 2025 14:07:24 -0700 Subject: [PATCH 089/126] Refactor Amanuensis Helm chart and add AWS/Crossplane support Major update to the Amanuensis Helm chart: adds AWS and Crossplane integration, external secrets support, new config and job templates, and a Python YAML merge script. Refactors deployment, service account, HPA, and resource settings for improved flexibility and cloud compatibility. Removes obsolete logo and test templates, updates values.yaml for expanded configuration, and bumps chart version to 1.0.0. --- helm/amanuensis/.helmignore | 2 +- helm/amanuensis/Chart.yaml | 8 +- helm/amanuensis/scripts/yaml_merge.py | 56 +++++ helm/amanuensis/templates/_helpers.tpl | 36 +++ .../amanuensis-clear-filter-set-cronjob.yaml | 6 +- .../templates/amanuensis-config-public.yaml | 10 + .../templates/amanuensis-db-migrate-job.yaml | 5 +- .../templates/amanuensis-logo-config.yaml | 6 - .../templates/amanuensis-secrets.yaml | 18 +- .../amanuensis-validate-filter-sets-job.yaml | 3 + .../templates/amanuensis-yaml-merge.yaml | 6 + helm/amanuensis/templates/aws-config.yaml | 3 + helm/amanuensis/templates/crossplane.yaml | 65 +++++ helm/amanuensis/templates/deployment.yaml | 51 ++-- .../amanuensis/templates/external-secret.yaml | 28 +++ helm/amanuensis/templates/hpa.yaml | 44 ++-- helm/amanuensis/templates/netpolicy.yaml | 1 + helm/amanuensis/templates/pdb.yaml | 3 + helm/amanuensis/templates/secret-store.yaml | 3 + helm/amanuensis/templates/service.yaml | 4 +- helm/amanuensis/templates/serviceaccount.yaml | 9 +- .../templates/tests/test-connection.yaml | 15 -- helm/amanuensis/values.yaml | 228 ++++++++++++++---- helm/gen3/Chart.yaml | 2 +- 24 files changed, 461 insertions(+), 151 deletions(-) create mode 100644 helm/amanuensis/scripts/yaml_merge.py create mode 100644 helm/amanuensis/templates/amanuensis-config-public.yaml delete mode 100644 helm/amanuensis/templates/amanuensis-logo-config.yaml create mode 100644 helm/amanuensis/templates/amanuensis-yaml-merge.yaml create mode 100644 helm/amanuensis/templates/aws-config.yaml create mode 100644 helm/amanuensis/templates/crossplane.yaml create mode 100644 helm/amanuensis/templates/external-secret.yaml create mode 100644 helm/amanuensis/templates/netpolicy.yaml create mode 100644 helm/amanuensis/templates/pdb.yaml create mode 100644 helm/amanuensis/templates/secret-store.yaml delete mode 100644 helm/amanuensis/templates/tests/test-connection.yaml diff --git a/helm/amanuensis/.helmignore b/helm/amanuensis/.helmignore index 0e8a0eb36..691fa13d6 100644 --- a/helm/amanuensis/.helmignore +++ b/helm/amanuensis/.helmignore @@ -20,4 +20,4 @@ .project .idea/ *.tmproj -.vscode/ +.vscode/ \ No newline at end of file diff --git a/helm/amanuensis/Chart.yaml b/helm/amanuensis/Chart.yaml index 68fc04d6e..1c89fe987 100644 --- a/helm/amanuensis/Chart.yaml +++ b/helm/amanuensis/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 name: amanuensis -description: A Helm chart for Kubernetes +description: A Helm chart for gen3 Amanuensis # A chart can be either an 'application' or a 'library' chart. # @@ -15,17 +15,17 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.1 +version: 1.0.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "1.16.0" +appVersion: "master" dependencies: - name: common - version: 0.1.11 + version: 0.1.20 repository: file://../common - name: postgresql version: 11.9.13 diff --git a/helm/amanuensis/scripts/yaml_merge.py b/helm/amanuensis/scripts/yaml_merge.py new file mode 100644 index 000000000..5da248af7 --- /dev/null +++ b/helm/amanuensis/scripts/yaml_merge.py @@ -0,0 +1,56 @@ +import sys +import yaml + +''' +Helper script to merge arbitraly number of yaml files + +Usage: python yaml_merge.py file1.yaml file2.yaml ... amanuensis-config.yaml + +Example: python yaml_merge.py file1.yaml file2.yaml amanuensis-config.yaml +file1.yaml key(s) will overriden by items in file2.yaml if they exist, + +''' +def merge_yaml_files(file_paths): + merged_data = {} + + for file_path in file_paths: + try: + with open(file_path, 'r') as file: + data = yaml.safe_load(file) + merged_data = merge_dicts(merged_data, data) + except FileNotFoundError as e: + print('WARNING! File not found: {}. Will be ignored!'.format(file_path)) + + return merged_data + +def merge_dicts(dict1, dict2): + if dict2 is not None: #Fix AttributeError + for key, value in dict2.items(): + if key in dict1 and isinstance(dict1[key], dict) and isinstance(value, dict): + dict1[key] = merge_dicts(dict1[key], value) + else: + dict1[key] = value + + return dict1 + +def save_merged_file(merged_data, output_file_path): + with open(output_file_path, 'w') as output_file: + yaml.dump(merged_data, output_file, default_flow_style=False) + +if __name__ == "__main__": + # Check if at least two arguments are provided (including the script name) + if len(sys.argv) < 3: + print("Usage: python yaml_merge.py config-file1.yaml config-file2.yaml ... amanuensis-config.yaml") + sys.exit(1) + + # Extract input file paths and output file path + input_files = sys.argv[1:-1] + output_file = sys.argv[-1] + + # Merge YAML files + merged_data = merge_yaml_files(input_files) + + # Save the merged data to the output file + save_merged_file(merged_data, output_file) + + print(f"Merged Configuration saved to {output_file}") diff --git a/helm/amanuensis/templates/_helpers.tpl b/helm/amanuensis/templates/_helpers.tpl index d576d4089..56ac8a8c4 100644 --- a/helm/amanuensis/templates/_helpers.tpl +++ b/helm/amanuensis/templates/_helpers.tpl @@ -66,3 +66,39 @@ Create the name of the service account to use {{- default "default" .Values.serviceAccount.name }} {{- end }} {{- end }} + +{{/* + Postgres Password lookup +*/}} +{{- define "amanuensis.postgres.password" -}} +{{- $localpass := (lookup "v1" "Secret" "postgres" "postgres-postgresql" ) -}} +{{- if $localpass }} +{{- default (index $localpass.data "postgres-password" | b64dec) }} +{{- else }} +{{- default .Values.postgres.password }} +{{- end }} +{{- end }} + + +{{/* + amanuensis Config Secrets Manager Name +*/}} +{{- define "amanuensis-config" -}} +{{- default "amanuensis-config" .Values.externalSecrets.amanuensisConfig }} +{{- end }} + +{{/* + amanuensis should config job run +*/}} +{{- define "amanuensis.shouldRunJob" -}} +{{- $existingSecretConfig := lookup "v1" "Secret" .Release.Namespace (printf "amanuensis-config") }} +{{- $existingSecretCreds := lookup "v1" "Secret" .Release.Namespace (printf "amanuensis-creds") }} +{{- if and + (and $existingSecretConfig $existingSecretConfig.data (hasKey $existingSecretConfig.data "amanuensis-config.yaml")) + (and $existingSecretCreds $existingSecretCreds.data (hasKey $existingSecretCreds.data "creds.json")) +}} + false +{{- else }} + true +{{- end }} +{{- end }} \ No newline at end of file diff --git a/helm/amanuensis/templates/amanuensis-clear-filter-set-cronjob.yaml b/helm/amanuensis/templates/amanuensis-clear-filter-set-cronjob.yaml index 6d05e63c9..14e57b7e1 100644 --- a/helm/amanuensis/templates/amanuensis-clear-filter-set-cronjob.yaml +++ b/helm/amanuensis/templates/amanuensis-clear-filter-set-cronjob.yaml @@ -1,3 +1,5 @@ +{{- if .Values.amanuensisJobs.clearFilterSetCronjob }} +--- apiVersion: batch/v1 kind: CronJob metadata: @@ -79,4 +81,6 @@ spec: echo "WARNING: non zero exit code: $?" exit 1 fi - restartPolicy: Never \ No newline at end of file + restartPolicy: Never + +{{- end }} \ No newline at end of file diff --git a/helm/amanuensis/templates/amanuensis-config-public.yaml b/helm/amanuensis/templates/amanuensis-config-public.yaml new file mode 100644 index 000000000..36c3672da --- /dev/null +++ b/helm/amanuensis/templates/amanuensis-config-public.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: manifest-amanuensis +data: + amanuensis-config-public.yaml: | + {{- with .Values.amanuensis_CONFIG_PUBLIC }} + {{- toYaml . | nindent 4 }} + {{ end }} + diff --git a/helm/amanuensis/templates/amanuensis-db-migrate-job.yaml b/helm/amanuensis/templates/amanuensis-db-migrate-job.yaml index abf55a8d3..e3eabbebb 100644 --- a/helm/amanuensis/templates/amanuensis-db-migrate-job.yaml +++ b/helm/amanuensis/templates/amanuensis-db-migrate-job.yaml @@ -1,3 +1,4 @@ +{{- if .Values.amanuensisJobs.dbMigrateJob }} --- apiVersion: batch/v1 kind: Job @@ -75,4 +76,6 @@ spec: echo "WARNING: non zero exit code: $?" exit 1 fi - restartPolicy: OnFailure \ No newline at end of file + restartPolicy: OnFailure + +{{- end }} \ No newline at end of file diff --git a/helm/amanuensis/templates/amanuensis-logo-config.yaml b/helm/amanuensis/templates/amanuensis-logo-config.yaml deleted file mode 100644 index 6131c0fca..000000000 --- a/helm/amanuensis/templates/amanuensis-logo-config.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: amanuensis-logo-config -data: - logo.svg: {{ .Values.logo | default ((.Files.Get "logo/logo.svg")) }} \ No newline at end of file diff --git a/helm/amanuensis/templates/amanuensis-secrets.yaml b/helm/amanuensis/templates/amanuensis-secrets.yaml index 8d15a44db..0449672a0 100644 --- a/helm/amanuensis/templates/amanuensis-secrets.yaml +++ b/helm/amanuensis/templates/amanuensis-secrets.yaml @@ -1,3 +1,4 @@ +{{- if or (not .Values.global.externalSecrets.deploy) (and .Values.global.externalSecrets.deploy .Values.externalSecrets.createK8sAmanuensisConfigSecret) }} apiVersion: v1 kind: ServiceAccount metadata: @@ -68,17 +69,8 @@ data: {} {{- end }} --- -{{- define "create-amanuensis-config-job" -}} -{{- $existingSecretConfig := lookup "v1" "Secret" .Release.Namespace (printf "amanuensis-config") }} -{{- $existingSecretCreds := lookup "v1" "Secret" .Release.Namespace (printf "amanuensis-creds") }} -{{- $shouldRunJob := true }} -{{- if and - (and $existingSecretConfig $existingSecretConfig.data (hasKey $existingSecretConfig.data "amanuensis-config.yaml")) - (and $existingSecretCreds $existingSecretCreds.data (hasKey $existingSecretCreds.data "creds.json")) -}} - {{- $shouldRunJob = false }} -{{- end }} -{{- if $shouldRunJob }} +{{- $shouldRunJob := include "amanuensis.shouldRunJob" . | trim }} +{{- if eq $shouldRunJob "true" }} apiVersion: batch/v1 kind: Job metadata: @@ -222,7 +214,5 @@ spec: restartPolicy: Never {{- end }} -{{- end }} --- -# Include the job template here -{{- include "create-amanuensis-config-job" . }} \ No newline at end of file +{{- end }} \ No newline at end of file diff --git a/helm/amanuensis/templates/amanuensis-validate-filter-sets-job.yaml b/helm/amanuensis/templates/amanuensis-validate-filter-sets-job.yaml index 8508205f8..8dac2b62a 100644 --- a/helm/amanuensis/templates/amanuensis-validate-filter-sets-job.yaml +++ b/helm/amanuensis/templates/amanuensis-validate-filter-sets-job.yaml @@ -1,3 +1,5 @@ +{{- if .Values.amanuensisJobs.validateFilterSetsJob }} +--- apiVersion: batch/v1 kind: Job metadata: @@ -100,3 +102,4 @@ spec: restartPolicy: Never # Optional: add a backoff limit to control retries backoffLimit: 3 +{{- end }} \ No newline at end of file diff --git a/helm/amanuensis/templates/amanuensis-yaml-merge.yaml b/helm/amanuensis/templates/amanuensis-yaml-merge.yaml new file mode 100644 index 000000000..27b730044 --- /dev/null +++ b/helm/amanuensis/templates/amanuensis-yaml-merge.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: amanuensis-yaml-merge +data: +{{ (.Files.Glob "scripts/*").AsConfig | indent 2 }} \ No newline at end of file diff --git a/helm/amanuensis/templates/aws-config.yaml b/helm/amanuensis/templates/aws-config.yaml new file mode 100644 index 000000000..a49fb9f1b --- /dev/null +++ b/helm/amanuensis/templates/aws-config.yaml @@ -0,0 +1,3 @@ +{{- if or (.Values.secrets.awsSecretAccessKey) (.Values.global.aws.awsSecretAccessKey) (.Values.global.aws.externalSecrets.enabled) }} +{{ include "common.awsconfig" . }} +{{- end -}} \ No newline at end of file diff --git a/helm/amanuensis/templates/crossplane.yaml b/helm/amanuensis/templates/crossplane.yaml new file mode 100644 index 000000000..4d63843ac --- /dev/null +++ b/helm/amanuensis/templates/crossplane.yaml @@ -0,0 +1,65 @@ +{{- if .Values.global.crossplane.enabled }} +apiVersion: iam.aws.crossplane.io/v1beta1 +kind: Role +metadata: + name: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-amanuensis-sa" +spec: + providerConfigRef: + name: provider-aws + forProvider: + name: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-amanuensis-sa" + description: "Role for amanuensis service account for {{ .Values.global.environment }}" + path: "/gen3-service/" + assumeRolePolicyDocument: | + { + "Version":"2012-10-17", + "Statement":[ + { "Effect":"Allow","Principal":{"Service":"ec2.amazonaws.com"},"Action":"sts:AssumeRole" }, + { + "Sid":"", + "Effect":"Allow", + "Principal":{"Federated":"arn:aws:iam::{{ .Values.global.crossplane.accountId }}:oidc-provider/{{ .Values.global.crossplane.oidcProviderUrl }}"}, + "Action":"sts:AssumeRoleWithWebIdentity", + "Condition":{ + "StringEquals":{ + "{{ .Values.global.crossplane.oidcProviderUrl }}:sub":"system:serviceaccount:{{ .Release.Namespace }}:amanuensis-sa", + "{{ .Values.global.crossplane.oidcProviderUrl }}:aud":"sts.amazonaws.com" + } + } + } + ] + } +--- +apiVersion: iam.aws.crossplane.io/v1beta1 +kind: Policy +metadata: + name: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-amanuensis-role-policy" +spec: + providerConfigRef: + name: provider-aws + forProvider: + name: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-amanuensis-role-policy" + document: | + { + "Version":"2012-10-17", + "Statement":[ + { + "Effect":"Allow", + "Action":["sqs:SendMessage"], + "Resource":["arn:aws:sqs:{{ .Values.global.aws.region }}:{{ .Values.global.crossplane.accountId }}:{{ .Values.global.environment }}-audit-sqs-queue", "arn:aws:sqs:{{ .Values.global.aws.region }}:{{ .Values.global.crossplane.accountId }}:{{ .Values.global.environment }}-ssjdispatcher-sqs-queue"] + } + ] + } +--- +apiVersion: iam.aws.crossplane.io/v1beta1 +kind: RolePolicyAttachment +metadata: + name: amanuensis-sa-managed-policy-attachment +spec: + providerConfigRef: + name: provider-aws + forProvider: + roleName: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-amanuensis-sa" + policyArnRef: + name: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-amanuensis-role-policy" +{{- end}} \ No newline at end of file diff --git a/helm/amanuensis/templates/deployment.yaml b/helm/amanuensis/templates/deployment.yaml index a47e7b6a3..afb2da5cb 100644 --- a/helm/amanuensis/templates/deployment.yaml +++ b/helm/amanuensis/templates/deployment.yaml @@ -4,9 +4,6 @@ metadata: name: amanuensis-deployment labels: {{- include "amanuensis.labels" . | nindent 4 }} - {{- if .Values.global.ddEnabled }} - {{- include "common.datadogLabels" . | nindent 4 }} - {{- end }} spec: {{- if not .Values.autoscaling.enabled }} replicas: {{ .Values.replicaCount }} @@ -16,64 +13,60 @@ spec: {{- include "amanuensis.selectorLabels" . | nindent 6 }} template: metadata: - {{- with .Values.podAnnotations }} annotations: + {{- with .Values.podAnnotations }} {{- toYaml . | nindent 8 }} - {{- end }} + {{- end }} + {{- if coalesce .Values.metricsEnabled .Values.global.metricsEnabled true }} + {{- include "common.grafanaAnnotations" . | nindent 8 }} + {{- end }} labels: + authprovider: "yes" + netnolimit: "yes" + userhelper: "yes" {{- include "amanuensis.selectorLabels" . | nindent 8 }} - {{- if .Values.global.ddEnabled }} - {{- include "common.datadogLabels" . | nindent 8 }} - {{- end }} + {{- include "common.extraLabels" . | nindent 8 }} spec: - volumes: - {{- toYaml .Values.volumes | nindent 8 }} - {{- with .Values.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - serviceAccountName: {{ include "amanuensis.serviceAccountName" . }} + enableServiceLinks: false securityContext: {{- toYaml .Values.podSecurityContext | nindent 8 }} + serviceAccountName: {{ include "amanuensis.serviceAccountName" . }} + volumes: + {{- toYaml .Values.volumes | nindent 8 }} containers: - - name: {{ .Chart.Name }} - securityContext: - {{- toYaml .Values.securityContext | nindent 12 }} + - name: amanuensis image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} ports: - name: http - containerPort: {{ .Values.service.port }} + containerPort: 80 + protocol: TCP + - name: https + containerPort: 443 protocol: TCP livenessProbe: httpGet: path: /_status - port: 80 - initialDelaySeconds: 30 + port: http + initialDelaySeconds: 60 periodSeconds: 60 - timeoutSeconds: 60 + timeoutSeconds: 30 readinessProbe: httpGet: path: /_status - port: 80 + port: http resources: {{- toYaml .Values.resources | nindent 12 }} command: ["/bin/bash"] args: - "-c" - | - echo "${AMANUENSIS_PUBLIC_CONFIG:-""}" > "/var/www/amanuensis/amanuensis-config-public.yaml" python /var/www/amanuensis/yaml_merge.py /var/www/amanuensis/amanuensis-config-public.yaml /var/www/amanuensis/amanuensis-config-secret.yaml > /var/www/amanuensis/amanuensis-config.yaml if [[ -f /amanuensis/dockerrun.bash ]]; then bash /amanuensis/dockerrun.bash; elif [[ -f /dockerrun.sh ]]; then bash /dockerrun.sh; else echo 'Error: Neither /amanuensis/dockerrun.bash nor /dockerrun.sh exists.' >&2; exit 1; fi env: - {{- if .Values.global.ddEnabled }} - {{- include "common.datadogEnvVar" . | nindent 12 }} - {{- end }} {{- toYaml .Values.env | nindent 12 }} volumeMounts: {{- toYaml .Values.volumeMounts | nindent 12 }} - - {{- with .Values.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} diff --git a/helm/amanuensis/templates/external-secret.yaml b/helm/amanuensis/templates/external-secret.yaml new file mode 100644 index 000000000..9f490e753 --- /dev/null +++ b/helm/amanuensis/templates/external-secret.yaml @@ -0,0 +1,28 @@ +{{ if .Values.global.externalSecrets.deploy }} +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: amanuensis-config +spec: + refreshInterval: 5m + secretStoreRef: + {{- if ne .Values.global.externalSecrets.clusterSecretStoreRef "" }} + name: {{ .Values.global.externalSecrets.clusterSecretStoreRef }} + kind: ClusterSecretStore + {{- else }} + name: {{include "common.SecretStore" .}} + kind: SecretStore + {{- end }} + target: + name: amanuensis-config + creationPolicy: Owner + data: + - secretKey: amanuensis-config.yaml + remoteRef: + #name of secret in secrets manager + key: {{include "amanuensis-config" .}} +--- +{{- if and (.Values.global.externalSecrets.deploy) (not .Values.global.externalSecrets.dbCreate) }} +{{ include "common.externalSecret.db" . }} +{{- end}} +{{- end}} \ No newline at end of file diff --git a/helm/amanuensis/templates/hpa.yaml b/helm/amanuensis/templates/hpa.yaml index 386a3d4c5..b12e046e8 100644 --- a/helm/amanuensis/templates/hpa.yaml +++ b/helm/amanuensis/templates/hpa.yaml @@ -1,32 +1,32 @@ -{{- if .Values.autoscaling.enabled }} +{{- if default .Values.global.autoscaling.enabled .Values.autoscaling.enabled }} apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler metadata: - name: {{ include "amanuensis.fullname" . }} + name: amanuensis-deployment labels: {{- include "amanuensis.labels" . | nindent 4 }} spec: scaleTargetRef: apiVersion: apps/v1 kind: Deployment - name: {{ include "amanuensis.fullname" . }} - minReplicas: {{ .Values.autoscaling.minReplicas }} - maxReplicas: {{ .Values.autoscaling.maxReplicas }} + name: amanuensis-deployment + minReplicas: {{ default .Values.global.autoscaling.minReplicas .Values.autoscaling.minReplicas }} + maxReplicas: {{ default .Values.global.autoscaling.maxReplicas .Values.autoscaling.maxReplicas }} metrics: - {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} - {{- end }} - {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} - - type: Resource - resource: - name: memory - target: - type: Utilization - averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} - {{- end }} -{{- end }} + {{- if default .Values.global.autoscaling.targetCPUUtilizationPercentage .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ default .Values.global.autoscaling.targetCPUUtilizationPercentage .Values.autoscaling.targetCPUUtilizationPercentage}} + {{- end }} + {{- if default .Values.global.autoscaling.targetMemoryUtilizationPercentage .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + averageUtilization: {{ default .Values.global.autoscaling.targetMemoryUtilizationPercentage .Values.autoscaling.targetMemoryUtilizationPercentage }} + type: Utilization + {{- end }} +{{- end }} \ No newline at end of file diff --git a/helm/amanuensis/templates/netpolicy.yaml b/helm/amanuensis/templates/netpolicy.yaml new file mode 100644 index 000000000..70a5c3b5d --- /dev/null +++ b/helm/amanuensis/templates/netpolicy.yaml @@ -0,0 +1 @@ +{{ include "common.db_netpolicy" . }} \ No newline at end of file diff --git a/helm/amanuensis/templates/pdb.yaml b/helm/amanuensis/templates/pdb.yaml new file mode 100644 index 000000000..2ef2de13d --- /dev/null +++ b/helm/amanuensis/templates/pdb.yaml @@ -0,0 +1,3 @@ +{{- if and .Values.global.pdb (gt (int .Values.replicaCount) 1) }} +{{ include "common.pod_disruption_budget" . }} +{{- end }} \ No newline at end of file diff --git a/helm/amanuensis/templates/secret-store.yaml b/helm/amanuensis/templates/secret-store.yaml new file mode 100644 index 000000000..771c7760d --- /dev/null +++ b/helm/amanuensis/templates/secret-store.yaml @@ -0,0 +1,3 @@ +{{ if .Values.global.externalSecrets.separateSecretStore }} +{{ include "common.secretstore" . }} +{{- end }} \ No newline at end of file diff --git a/helm/amanuensis/templates/service.yaml b/helm/amanuensis/templates/service.yaml index 781b31a52..445c6081c 100644 --- a/helm/amanuensis/templates/service.yaml +++ b/helm/amanuensis/templates/service.yaml @@ -1,7 +1,7 @@ apiVersion: v1 kind: Service metadata: - name: "amanuensis-service" + name: amanuensis-service labels: {{- include "amanuensis.labels" . | nindent 4 }} spec: @@ -12,4 +12,4 @@ spec: protocol: TCP name: http selector: - {{- include "amanuensis.selectorLabels" . | nindent 4 }} + {{- include "amanuensis.selectorLabels" . | nindent 4 }} \ No newline at end of file diff --git a/helm/amanuensis/templates/serviceaccount.yaml b/helm/amanuensis/templates/serviceaccount.yaml index 7e6f0a46e..f214414d1 100644 --- a/helm/amanuensis/templates/serviceaccount.yaml +++ b/helm/amanuensis/templates/serviceaccount.yaml @@ -5,8 +5,13 @@ metadata: name: {{ include "amanuensis.serviceAccountName" . }} labels: {{- include "amanuensis.labels" . | nindent 4 }} - {{- with .Values.serviceAccount.annotations }} + {{- if .Values.global.crossplane.enabled }} + annotations: + eks.amazonaws.com/role-arn: arn:aws:iam::{{ .Values.global.crossplane.accountId }}:role/{{ .Values.global.environment }}-{{ .Release.Namespace }}-amanuensis-sa + {{- else }} + {{ with .Values.serviceAccount.annotations }} annotations: {{- toYaml . | nindent 4 }} {{- end }} -{{- end }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/helm/amanuensis/templates/tests/test-connection.yaml b/helm/amanuensis/templates/tests/test-connection.yaml deleted file mode 100644 index a54ae8e77..000000000 --- a/helm/amanuensis/templates/tests/test-connection.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: "{{ include "amanuensis.fullname" . }}-test-connection" - labels: - {{- include "amanuensis.labels" . | nindent 4 }} - annotations: - "helm.sh/hook": test -spec: - containers: - - name: wget - image: busybox - command: ['wget'] - args: ['{{ include "amanuensis.fullname" . }}:{{ .Values.service.port }}'] - restartPolicy: Never diff --git a/helm/amanuensis/values.yaml b/helm/amanuensis/values.yaml index 5a8bae290..2f4b2c90e 100644 --- a/helm/amanuensis/values.yaml +++ b/helm/amanuensis/values.yaml @@ -1,13 +1,49 @@ -# Default values for amanuensis. +# Default values for audit. # This is a YAML-formatted file. # Declare variables to be passed into your templates. + +# Global configuration global: + # -- (map) External Secrets settings. + externalSecrets: + # -- (bool) Will use ExternalSecret resources to pull secrets from Secrets Manager instead of creating them locally. Be cautious as this will override secrets you have deployed. + deploy: false + # -- (bool) Will create the databases and store the creds in Kubernetes Secrets even if externalSecrets is deployed. Useful if you want to use ExternalSecrets for other secrets besides db secrets. + dbCreate: false + # -- (string) Will deploy a separate External Secret Store for this service. + separateSecretStore: false + # -- (string) Will use a manually deployed clusterSecretStore if defined. + clusterSecretStoreRef: "" + + # -- (map) AWS configuration + aws: + # -- (bool) Set to true if deploying to AWS. Controls ingress annotations. + enabled: false + # -- (string) Credentials for AWS stuff. + awsAccessKeyId: + # -- (string) Credentials for AWS stuff. + awsSecretAccessKey: + # -- (map) Local secret setting if using a pre-exising secret. + useLocalSecret: + # -- (bool) Set to true if you would like to use a secret that is already running on your cluster. + enabled: false + # -- (string) Name of the local secret. + localSecretName: + # -- (string) Namespace of the local secret. + localSecretNamespace: + externalSecrets: + # -- (bool) Whether to use External Secrets for aws config. + enabled: false + # -- (String) Name of Secrets Manager secret. + externalSecretAwsCreds: # -- (bool) Whether the deployment is for development purposes. dev: true - # -- (map) Postgres database configuration. + postgres: # -- (bool) Whether the database should be created. dbCreate: true + # -- (string) Name of external secret. Disabled if empty + externalSecret: "" # -- (map) Master credentials to postgres. This is going to be the default postgres server being used for each service, unless each service specifies their own postgres master: # -- (string) hostname of postgres server @@ -32,26 +68,67 @@ global: kubeBucket: kube-gen3 # -- (string) S3 bucket name for log files. logsBucket: logs-gen3 - # -- (bool) Whether to sync data from dbGaP. - syncFromDbgap: false # -- (bool) Whether public datasets are enabled. publicDataSets: true # -- (string) Access level for tiers. acceptable values for `tier_access_level` are: `libre`, `regular` and `private`. If omitted, by default common will be treated as `private` tierAccessLevel: libre # -- (int) Only relevant if tireAccessLevel is set to "regular". Summary charts below this limit will not appear for aggregated data. - tierAccessLimit: 1000 - # -- (bool) Whether network policies are enabled. + tierAccessLimit: "1000" + # -- (map) Controls network policy settings netPolicy: enabled: false # -- (int) Number of dispatcher jobs. - dispatcherJobNum: 10 - # -- (bool) Whether Datadog is enabled. - ddEnabled: false + dispatcherJobNum: "10" # -- (bool) If the service will be deployed with a Pod Disruption Budget. Note- you need to have more than 2 replicas for the pdb to be deployed. pdb: false # -- (int) The minimum amount of pods that are available at all times if the PDB is deployed. minAvialable: 1 + # -- (map) Kubernetes configuration + crossplane: + # -- (bool) Set to true if deploying to AWS and want to use crossplane for AWS resources. + enabled: false + # -- (string) The name of the crossplane provider config. + providerConfigName: provider-aws + # -- (string) OIDC provider URL. This is used for authentication of roles/service accounts. + oidcProviderUrl: oidc.eks.us-east-1.amazonaws.com/id/12345678901234567890 + # -- (string) The account ID of the AWS account. + accountId: 123456789012 + s3: + # -- (string) The kms key id for the s3 bucket. + kmsKeyId: + # -- (bool) Whether to use s3 bucket versioning. + versioningEnabled: false + # -- (map) This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/ + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 80 + +# -- (map) This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/ +autoscaling: {} + +# -- (bool) Whether Metrics are enabled. +metricsEnabled: true +# -- (map) External Secrets settings. +externalSecrets: + # -- (string) Will create the Helm "amanuensis-config" secret even if Secrets Manager is enabled. This is helpful if you are wanting to use External Secrets for some, but not all secrets. + createK8sAmanuensisConfigSecret: false + # -- (string) Will override the name of the aws secrets manager secret. Default is "amanuensis-config" + amanuensisConfig: + # -- (string) Will override the name of the aws secrets manager secret. Default is "Values.global.environment-.Chart.Name-creds" + dbcreds: + +# -- (map) Secret information for Usersync and External Secrets. +secrets: + # -- (str) AWS access key ID. Overrides global key. + awsAccessKeyId: + # -- (str) AWS access key ID. Overrides global key. + awsSecretAccessKey: + +# -- (map) Postgres database configuration. If db does not exist in postgres cluster and dbCreate is set ot true then these databases will be created for you postgres: # (bool) Whether the database should be restored from s3. Default to global.postgres.dbRestore dbRestore: false @@ -70,42 +147,57 @@ postgres: # -- (string) Will create a Database for the individual service to help with developing it. separate: false +# -- (map) Postgresql subchart settings if deployed separately option is set to "true". +# Disable persistence by default so we can spin up and down ephemeral environments postgresql: primary: persistence: # -- (bool) Option to persist the dbs data. enabled: false -arboristUrl: - +# -- (int) Number of desired replicas replicaCount: 1 + image: + # -- (string) The Docker image repository for the amanuensis service repository: quay.io/pcdc/amanuensis - pullPolicy: IfNotPresent - tag: "pcdc_dev_2023-09-06T16_36_49-05_00" + # -- (string) When to pull the image. This value should be "Always" to ensure the latest image is used. + pullPolicy: Always + # -- (string) Overrides the image tag whose default is the chart appVersion. + tag: "master" +# -- (list) Docker image pull secrets. imagePullSecrets: [] +# -- (string) Override the name of the chart. nameOverride: "" +# -- (string) Override the full name of the deployment. fullnameOverride: "" +# -- (map) Service account to use or create. serviceAccount: - # Specifies whether a service account should be created + # -- (bool) Specifies whether a service account should be created. create: true - # Annotations to add to the service account - annotations: {} - # The name of the service account to use. + # -- (map) Annotations to add to the service account. + annotations: + # -- (string) The Amazon Resource Name (ARN) of the role to associate with the service account + eks.amazonaws.com/role-arn: # If not set and create is true, a name is generated using the fullname template - name: "" + # -- (string) The name of the service account + name: "amanuensis-sa" +# -- (map) Annotations to add to the pod podAnnotations: {} -podSecurityContext: {} - # fsGroup: 2000 +# -- (map) Security context for the pod +podSecurityContext: + fsGroup: 101 -securityContext: {} +# -- (map) Security context for the containers in the pod +securityContext: + {} # capabilities: # drop: # - ALL @@ -113,37 +205,60 @@ securityContext: {} # runAsNonRoot: true # runAsUser: 1000 +# -- (map) Kubernetes service information. service: + # -- (string) Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". type: ClusterIP + # -- (int) The port number that the service exposes. port: 80 -resources: +# -- (map) Resource requests and limits for the containers in the pod +resources: + # -- (map) The amount of resources that the container requests requests: - # -- (string) The amount of CPU requested - cpu: 0.1 # -- (string) The amount of memory requested - memory: 12Mi + memory: 128Mi # -- (map) The maximum amount of resources that the container is allowed to use limits: - # -- (string) The maximum amount of CPU the container can use - cpu: 1.0 # -- (string) The maximum amount of memory the container can use - memory: 512Mi - -autoscaling: - enabled: false - minReplicas: 1 - maxReplicas: 100 - targetCPUUtilizationPercentage: 80 - # targetMemoryUtilizationPercentage: 80 + memory: 2Gi +# -- (map) Node Selector for the pods nodeSelector: {} +# -- (list) Tolerations for the pods tolerations: [] -affinity: {} +# -- (map) Labels to add to the pod. +labels: + # -- (string) Grants egress from all pods to pods labeled with authrpovider=yes. For network policy selectors. + authprovider: "yes" + # -- (string) Grants egress from pods labeled with netnolimit=yes to any IP address. Use explicit proxy and AWS APIs + netnolimit: "yes" + # -- (string) Grants ingress from the revproxy service for pods labeled with public=yes + public: "yes" + # -- (string) Grants ingress from pods in usercode namespaces for gen3 pods labeled with userhelper=yes + userhelper: "yes" -logo: +# -- (map) Affinity to use for the deployment. +affinity: + podAntiAffinity: + # -- (map) Option for scheduling to be required or preferred. + preferredDuringSchedulingIgnoredDuringExecution: + # -- (int) Weight value for preferred scheduling. + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + # -- (list) Label key for match expression. + - key: app + # -- (string) Operation type for the match expression. + operator: In + # -- (list) Value for the match expression key. + values: + - amanuensis + # -- (string) Value for topology key label. + topologyKey: "kubernetes.io/hostname" # -- (list) Environment variables to pass to the container env: @@ -203,47 +318,54 @@ volumeMounts: readOnly: true mountPath: "/var/www/amanuensis/yaml_merge.py" subPath: yaml_merge.py - - name: "amanuensis-volume" - readOnly: true - mountPath: "/var/www/amanuensis/creds.json" - subPath: creds.json + # - name: "amanuensis-volume" + # readOnly: true + # mountPath: "/var/www/amanuensis/creds.json" + # subPath: creds.json - name: "amanuensis-jwt-keys" readOnly: true mountPath: "/var/www/amanuensis/jwt_private_key.pem" subPath: "jwt_private_key.pem" - # Values to determine the labels that are used for the deployment, pod, etc. # -- (string) Valid options are "production" or "dev". If invalid option is set- the value will default to "dev". release: "production" # -- (string) Valid options are "true" or "false". If invalid option is set- the value will default to "false". criticalService: "true" # -- (string) Label to help organize pods and their use. Any value is valid, but use "_" or "-" to divide words. -partOf: "Core-Service" +partOf: "ProjectRequest" # -- (map) Will completely override the selectorLabels defined in the common chart's _label_setup.tpl selectorLabels: # -- (map) Will completely override the commonLabels defined in the common chart's _label_setup.tpl commonLabels: -# Values to configure datadog if ddEnabled is set to "true". -# -- (bool) If enabled, the Datadog Agent will automatically inject Datadog-specific metadata into your application logs. -datadogLogsInjection: true -# -- (bool) If enabled, the Datadog Agent will collect profiling data for your application using the Continuous Profiler. This data can be used to identify performance bottlenecks and optimize your application. -datadogProfilingEnabled: true -# -- (int) A value between 0 and 1, that represents the percentage of requests that will be traced. For example, a value of 0.5 means that 50% of requests will be traced. -datadogTraceSampleRate: 1 +# -- (map) Public configuration settings for Amanuensis app +AMANUENSIS_CONFIG_PUBLIC: {} +# -- (map) AWS credentials for Amanuensis app AWS_CREDENTIALS: + # -- (map) AWS SES settings for Amanuensis app AWS_SES: + # -- (string) Sender email address for Amanuensis app SENDER: "" + # -- (string) Recipient email address for Amanuensis app RECIPIENT: "" + # -- (map) AWS S3 bucket settings for Amanuensis app AWS_REGION: "us-east-1" - aws_access_key_id: "" - aws_secret_access_key: "" + # -- (map) Data delivery S3 bucket settings for Amanuensis app DATA_DELIVERY_S3_BUCKET: + # -- (string) Name of the S3 bucket for data delivery bucket_name: "" + # -- (string) AWS access key ID for accessing the S3 bucket aws_access_key_id: "" + # -- (string) AWS secret access key for accessing the S3 bucket aws_secret_access_key: "" +# -- (string) CSL key for Amanuensis app +CSL_KEY: "" -CSL_KEY: "" \ No newline at end of file +# -- (map) which amanuensis jobs to run +amanuensisJobs: + clearFilterSetCronjob: false + dbMigrateJob: true + validateFilterSetsJob: false diff --git a/helm/gen3/Chart.yaml b/helm/gen3/Chart.yaml index 20a98f085..343703e1a 100644 --- a/helm/gen3/Chart.yaml +++ b/helm/gen3/Chart.yaml @@ -116,7 +116,7 @@ dependencies: repository: "file://../pcdcanalysistools" condition: pcdcanalysistools.enabled - name: amanuensis - version: "0.1.1" + version: "1.0.0" repository: "file://../amanuensis" condition: amanuensis.enabled - name: gearbox From 9ea3ef7c9f47f13e60a41a43f13f06ae470af4e3 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Wed, 6 Aug 2025 17:13:31 -0700 Subject: [PATCH 090/126] Refactor Helm charts for AWS, Crossplane, and secrets Adds AWS and Crossplane resource templates, refactors secret management to support External Secrets, updates service account and deployment strategies, and improves configuration for gearbox and gearbox-middleware charts. Updates default values, enables portal and middleware by default, and removes legacy secret templates. Also updates dependency versions and enhances autoscaling and resource configuration. --- gearbox-default-values.yaml | 49 ++- helm/gearbox-middleware/Chart.yaml | 2 +- .../gearbox-middleware/templates/_helpers.tpl | 5 +- .../templates/aws-config.yaml | 3 + .../templates/crossplane.yaml | 102 ++++++ .../templates/deployment.yaml | 106 +++--- .../templates/external-secret.yaml | 19 ++ .../templates/gearbox-middleware-creds.yaml | 29 ++ .../templates/gearbox-middleware-secret.yaml | 11 - helm/gearbox-middleware/templates/hpa.yaml | 40 +-- helm/gearbox-middleware/templates/pdb.yaml | 3 + .../templates/secret-store.yaml | 3 + .../gearbox-middleware/templates/service.yaml | 4 +- .../templates/serviceaccount.yaml | 7 +- helm/gearbox-middleware/values.yaml | 312 ++++++++++------- helm/gearbox/templates/_helpers.tpl | 3 +- helm/gearbox/templates/aws-config.yaml | 3 + helm/gearbox/templates/crossplane.yaml | 102 ++++++ helm/gearbox/templates/deployment.yaml | 104 +++--- helm/gearbox/templates/external-secret.yaml | 19 ++ helm/gearbox/templates/gearbox-creds.yaml | 31 ++ helm/gearbox/templates/hpa.yaml | 40 +-- helm/gearbox/templates/netpolicy.yaml | 1 + helm/gearbox/templates/pdb.yaml | 3 + helm/gearbox/templates/secret-store.yaml | 3 + helm/gearbox/templates/service.yaml | 2 +- helm/gearbox/templates/serviceaccount.yaml | 8 +- .../templates/tests/test-connection.yaml | 15 - helm/gearbox/values.yaml | 323 +++++++++++------- 29 files changed, 920 insertions(+), 432 deletions(-) create mode 100644 helm/gearbox-middleware/templates/aws-config.yaml create mode 100644 helm/gearbox-middleware/templates/crossplane.yaml create mode 100644 helm/gearbox-middleware/templates/external-secret.yaml create mode 100644 helm/gearbox-middleware/templates/gearbox-middleware-creds.yaml delete mode 100644 helm/gearbox-middleware/templates/gearbox-middleware-secret.yaml create mode 100644 helm/gearbox-middleware/templates/pdb.yaml create mode 100644 helm/gearbox-middleware/templates/secret-store.yaml create mode 100644 helm/gearbox/templates/aws-config.yaml create mode 100644 helm/gearbox/templates/crossplane.yaml create mode 100644 helm/gearbox/templates/external-secret.yaml create mode 100644 helm/gearbox/templates/gearbox-creds.yaml create mode 100644 helm/gearbox/templates/netpolicy.yaml create mode 100644 helm/gearbox/templates/pdb.yaml create mode 100644 helm/gearbox/templates/secret-store.yaml delete mode 100644 helm/gearbox/templates/tests/test-connection.yaml diff --git a/gearbox-default-values.yaml b/gearbox-default-values.yaml index 3d129ac02..a936c230e 100644 --- a/gearbox-default-values.yaml +++ b/gearbox-default-values.yaml @@ -57,7 +57,7 @@ global: arborist: image: repository: quay.io/cdis/arborist - tag: 2024.03 + tag: 2025.07 fence: FENCE_CONFIG: @@ -154,10 +154,10 @@ fence: readOnly: true mountPath: "/fence/keys/key/jwt_private_key.pem" subPath: "jwt_private_key.pem" - - name: "fence-jwt-keys" - readOnly: true - mountPath: "/fence/keys/key/jwt_public_key.pem" - subPath: "jwt_public_key.pem" + # - name: "fence-jwt-keys" + # readOnly: true + # mountPath: "/fence/keys/key/jwt_public_key.pem" + # subPath: "jwt_public_key.pem" - name: "config-volume-public" readOnly: true mountPath: "/var/www/fence/fence-config-public.yaml" @@ -188,9 +188,10 @@ fence: subPath: fence_google_storage_creds_secret.json podSecurityContext: - runAsNonRoot: true - runAsUser: 1000 - runAsGroup: 1000 + # runAsNonRoot: true + # runAsUser: 1000 + # runAsGroup: 1000 + {} # securityContext: # allowPrivilegeEscalation: false # runAsNonRoot: true @@ -424,10 +425,11 @@ fence: - sower portal: - enabled: false + enabled: true image: - repository: quay.io/pcdc/gearbox_fe - tag: "dev" + repository: gearbox-frontend + tag: "GEAR-488" + pullPolicy: Never resources: requests: cpu: 1.0 @@ -443,7 +445,7 @@ revproxy: enabled: true image: repository: quay.io/cdis/nginx - tag: 2023.09 + tag: 2025.08 gearbox: enabled: true @@ -455,13 +457,28 @@ gearbox: runAsNonRoot: true runAsUser: 1000 runAsGroup: 1000 + volumes: + - name: config-volume + secret: + secretName: "gearbox-g3auto" + - name: gearbox-middleware-jwt-keys + secret: + secretName: "gearbox-middleware-jwt-keys" + items: + - key: jwt_public_key.pem + path: jwt_public_key.pem + optional: false gearbox-middleware: - enabled: false + enabled: true image: - repository: quay.io/pcdc/gearbox-middleware - tag: "helm-test" - pullPolicy: Always + repository: gearbox-middleware + tag: "GEAR-488" + pullPolicy: Never + podSecurityContext: + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 ######################################################################################## # DISABLED SERVICES # diff --git a/helm/gearbox-middleware/Chart.yaml b/helm/gearbox-middleware/Chart.yaml index 5870e9fd4..8be3b1288 100644 --- a/helm/gearbox-middleware/Chart.yaml +++ b/helm/gearbox-middleware/Chart.yaml @@ -25,7 +25,7 @@ appVersion: "1.16.0" dependencies: - name: common - version: 0.1.11 + version: 0.1.20 repository: file://../common - name: gearbox version: 0.1.0 diff --git a/helm/gearbox-middleware/templates/_helpers.tpl b/helm/gearbox-middleware/templates/_helpers.tpl index 7704d6b61..80f399786 100644 --- a/helm/gearbox-middleware/templates/_helpers.tpl +++ b/helm/gearbox-middleware/templates/_helpers.tpl @@ -30,7 +30,6 @@ Create chart name and version as used by the chart label. {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} {{- end }} -{{/* Common labels */}} {{- define "gearbox-middleware.labels" -}} @@ -61,8 +60,8 @@ Create the name of the service account to use */}} {{- define "gearbox-middleware.serviceAccountName" -}} {{- if .Values.serviceAccount.create }} -{{- default (include "gearbox-middleware.fullname" .) .Values.serviceAccount.name }} +{{- default "gearbox-middleware-sa" .Values.serviceAccount.name }} {{- else }} {{- default "default" .Values.serviceAccount.name }} {{- end }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/helm/gearbox-middleware/templates/aws-config.yaml b/helm/gearbox-middleware/templates/aws-config.yaml new file mode 100644 index 000000000..a49fb9f1b --- /dev/null +++ b/helm/gearbox-middleware/templates/aws-config.yaml @@ -0,0 +1,3 @@ +{{- if or (.Values.secrets.awsSecretAccessKey) (.Values.global.aws.awsSecretAccessKey) (.Values.global.aws.externalSecrets.enabled) }} +{{ include "common.awsconfig" . }} +{{- end -}} \ No newline at end of file diff --git a/helm/gearbox-middleware/templates/crossplane.yaml b/helm/gearbox-middleware/templates/crossplane.yaml new file mode 100644 index 000000000..12a20bffc --- /dev/null +++ b/helm/gearbox-middleware/templates/crossplane.yaml @@ -0,0 +1,102 @@ +{{- if .Values.global.crossplane.enabled }} +apiVersion: iam.aws.crossplane.io/v1beta1 +kind: Role +metadata: + name: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-gearbox-middleware-sa" +spec: + providerConfigRef: + name: provider-aws + forProvider: + name: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-gearbox-middleware-sa" + description: "Role for gearbox-middleware service account for {{ .Values.global.environment }}" + path: "/gen3-service/" + assumeRolePolicyDocument: | + { + "Version":"2012-10-17", + "Statement":[ + { "Effect":"Allow","Principal":{"Service":"ec2.amazonaws.com"},"Action":"sts:AssumeRole" }, + { + "Sid":"", + "Effect":"Allow", + "Principal":{"Federated":"arn:aws:iam::{{ .Values.global.crossplane.accountId }}:oidc-provider/{{ .Values.global.crossplane.oidcProviderUrl }}"}, + "Action":"sts:AssumeRoleWithWebIdentity", + "Condition":{ + "StringEquals":{ + "{{ .Values.global.crossplane.oidcProviderUrl }}:sub":"system:serviceaccount:{{ .Release.Namespace }}:gearbox-middleware-sa", + "{{ .Values.global.crossplane.oidcProviderUrl }}:aud":"sts.amazonaws.com" + } + } + } + ] + } +--- +apiVersion: iam.aws.crossplane.io/v1beta1 +kind: Policy +metadata: + name: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-gearbox-middleware-role-policy" +spec: + providerConfigRef: + name: provider-aws + forProvider: + roleName: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-gearbox-middleware-sa" + name: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-gearbox-middleware-role-policy" + document: | + { + "Version":"2012-10-17", + "Statement":[ + { + "Effect":"Allow", + "Action":["s3:List*","s3:Get*"], + "Resource":[ + "arn:aws:s3:::{{ .Values.global.environment }}-{{ .Release.Namespace }}-gearbox-middleware-bucket/*", + "arn:aws:s3:::{{ .Values.global.environment }}-{{ .Release.Namespace }}-gearbox-middleware-bucket" + ] + }, + { + "Effect":"Allow", + "Action":["s3:PutObject","s3:GetObject","s3:DeleteObject"], + "Resource":"arn:aws:s3:::{{ .Values.global.environment }}-{{ .Release.Namespace }}-gearbox-middleware-bucket/*" + } + ] + } + +--- +apiVersion: iam.aws.crossplane.io/v1beta1 +kind: RolePolicyAttachment +metadata: + name: gearbox-middleware-sa-managed-policy-attachment +spec: + providerConfigRef: + name: provider-aws + forProvider: + roleName: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-gearbox-middleware-sa" + policyArnRef: + name: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-gearbox-middleware-role-policy" +--- +apiVersion: s3.aws.crossplane.io/v1beta1 +kind: Bucket +metadata: + name: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-gearbox-middleware-bucket" +spec: + providerConfigRef: + name: provider-aws + forProvider: + bucketName: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-gearbox-middleware-bucket" + acl: private + forceDestroy: false + locationConstraint: {{ .Values.global.aws.region }} + tags: + Organization: gen3 + description: Created by crossplane + versioningConfiguration: + {{- if .Values.global.crossplane.s3.versioningEnabled }} + status: "Enabled" + {{- end }} + serverSideEncryptionConfiguration: + rules: + - applyServerSideEncryptionByDefault: + sseAlgorithm: aws:kms + {{- if .Values.global.crossplane.s3.kmsKeyId }} + kmsMasterKeyID: {{ .Values.global.crossplane.s3.kmsKeyId }} + {{- end }} +{{- end}} \ No newline at end of file diff --git a/helm/gearbox-middleware/templates/deployment.yaml b/helm/gearbox-middleware/templates/deployment.yaml index eca422436..4804fe430 100644 --- a/helm/gearbox-middleware/templates/deployment.yaml +++ b/helm/gearbox-middleware/templates/deployment.yaml @@ -4,9 +4,6 @@ metadata: name: gearbox-middleware-deployment labels: {{- include "gearbox-middleware.labels" . | nindent 4 }} - {{- if .Values.global.ddEnabled }} - {{- include "common.datadogLabels" . | nindent 4 }} - {{- end }} spec: {{- if not .Values.autoscaling.enabled }} replicas: {{ .Values.replicaCount }} @@ -14,71 +11,88 @@ spec: selector: matchLabels: {{- include "gearbox-middleware.selectorLabels" . | nindent 6 }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + strategy: + {{- toYaml .Values.strategy | nindent 8 }} template: metadata: - {{- with .Values.podAnnotations }} - annotations: - {{- toYaml . | nindent 8 }} - {{- end }} labels: + public: "yes" + s3: "yes" + userhelper: "yes" {{- include "gearbox-middleware.selectorLabels" . | nindent 8 }} - {{- if .Values.global.ddEnabled }} - {{- include "common.datadogLabels" . | nindent 8 }} + {{- include "common.extraLabels" . | nindent 8 }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/gearbox-middleware-creds.yaml") . | sha256sum }} + {{- if coalesce .Values.metricsEnabled .Values.global.metricsEnabled true }} + {{- include "common.grafanaAnnotations" . | nindent 8 }} {{- end }} spec: - volumes: - {{- toYaml .Values.volumes | nindent 8 }} - {{- with .Values.imagePullSecrets }} - imagePullSecrets: + serviceAccountName: {{ include "gearbox-middleware.serviceAccountName" . }} + {{- with .Values.affinity }} + affinity: {{- toYaml . | nindent 8 }} {{- end }} - serviceAccountName: {{ include "gearbox-middleware.serviceAccountName" . }} - securityContext: - {{- toYaml .Values.podSecurityContext | nindent 8 }} + automountServiceAccountToken: {{ .Values.automountServiceAccountToken}} + volumes: + {{- toYaml .Values.volumes | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds}} containers: - - name: {{ .Chart.Name }} - securityContext: - {{- toYaml .Values.securityContext | nindent 12 }} + - name: gearbox-middleware image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + {{- toYaml .Values.env | nindent 12 }} + {{- if and .Values.gearboxMiddlewareG3auto.awsaccesskey .Values.gearboxMiddlewareG3auto.awssecretkey }} + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: gearbox-middleware-g3auto + key: aws_access_key_id + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: gearbox-middleware-g3auto + key: aws_secret_access_key + {{- else if or (.Values.secrets.awsSecretAccessKey) (.Values.global.aws.awsSecretAccessKey) (.Values.global.aws.externalSecrets.enabled) (.Values.global.aws.useLocalSecret.enabled) }} + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + {{- if .Values.global.aws.useLocalSecret.enabled }} + name: {{ .Values.global.aws.useLocalSecret.localSecretName }} + {{- else }} + name: {{.Chart.Name}}-aws-config + {{- end }} + key: access-key + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + {{- if .Values.global.aws.useLocalSecret.enabled }} + name: {{ .Values.global.aws.useLocalSecret.localSecretName }} + {{- else }} + name: {{.Chart.Name}}-aws-config + {{- end }} + key: secret-access-key + {{- end }} + volumeMounts: + {{- toYaml .Values.volumeMounts | nindent 12 }} + resources: + {{- toYaml .Values.resources | nindent 12 }} ports: - - name: http - containerPort: {{ .Values.service.port }} - protocol: TCP + - containerPort: 80 livenessProbe: httpGet: path: /_status port: 80 - initialDelaySeconds: 30 + initialDelaySeconds: 10 periodSeconds: 60 timeoutSeconds: 30 readinessProbe: httpGet: path: /_status - port: http - resources: - {{- toYaml .Values.resources | nindent 12 }} - volumeMounts: - {{- toYaml .Values.volumeMounts | nindent 12 }} - env: - {{- if .Values.global.ddEnabled }} - {{- include "common.datadogEnvVar" . | nindent 12 }} - {{- end }} - {{- toYaml .Values.env | nindent 12 }} + port: 80 initContainers: - name: wait-for-gearbox image: curlimages/curl:latest command: ["/bin/sh","-c"] - args: ["while [ $(curl -sw '%{http_code}' http://gearbox-service/_status -o /dev/null) -ne 200 ]; do sleep 5; echo 'Waiting for gearbox...'; done"] - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} \ No newline at end of file + args: ["while [ $(curl -sw '%{http_code}' http://gearbox-service/_status -o /dev/null) -ne 200 ]; do sleep 5; echo 'Waiting for gearbox...'; done"] \ No newline at end of file diff --git a/helm/gearbox-middleware/templates/external-secret.yaml b/helm/gearbox-middleware/templates/external-secret.yaml new file mode 100644 index 000000000..4896a1df9 --- /dev/null +++ b/helm/gearbox-middleware/templates/external-secret.yaml @@ -0,0 +1,19 @@ +{{- if and (.Values.global.externalSecrets.deploy) (not .Values.externalSecrets.createK8sgearboxMiddlewareSecret) }} +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: gearbox-middleware-g3auto +spec: + refreshInterval: 5m + secretStoreRef: + name: {{include "common.SecretStore" .}} + kind: SecretStore + target: + name: gearbox-middleware-g3auto + creationPolicy: Owner + data: + - secretKey: config.json + remoteRef: + #name of secret in secrets manager + key: {{include "gearbox-middleware-g3auto" .}} +{{- end }} \ No newline at end of file diff --git a/helm/gearbox-middleware/templates/gearbox-middleware-creds.yaml b/helm/gearbox-middleware/templates/gearbox-middleware-creds.yaml new file mode 100644 index 000000000..9c07b41e1 --- /dev/null +++ b/helm/gearbox-middleware/templates/gearbox-middleware-creds.yaml @@ -0,0 +1,29 @@ +{{- if or (not .Values.global.externalSecrets.deploy) (and .Values.global.externalSecrets.deploy .Values.externalSecrets.createK8sgearboxMiddlewareSecret) }} +apiVersion: v1 +kind: Secret +metadata: + name: gearbox-middleware-g3auto +type: Opaque +stringData: + {{- $existingSecret := lookup "v1" "Secret" .Release.Namespace "gearbox-middleware-g3auto" }} + {{- $randomPass := printf "%s%s" "gateway:" (randAlphaNum 32) }} + base64Authz.txt: {{ if and $existingSecret (index $existingSecret.data "base64Authz.txt") }}{{ index $existingSecret.data "base64Authz.txt" | b64dec | quote }}{{ else }}{{ $randomPass | quote | b64enc }}{{ end }} + gearbox-middleware.env: | + HOSTNAME={{ .Values.global.hostname }} + {{ if and .Values.gearboxMiddlewareG3auto.awsaccesskey .Values.gearboxMiddlewareG3auto.awssecretkey }} + aws_access_key_id={{ .Values.gearboxMiddlewareG3auto.awsaccesskey }} + aws_secret_access_key={{ .Values.gearboxMiddlewareG3auto.awssecretkey }} + {{ end }} + AWS_REGION={{ .Values.gearboxMiddlewareG3auto.awsRegion }} + TESTING={{ .Values.gearboxMiddlewareG3auto.testing }} + DEBUG={{ .Values.gearboxMiddlewareG3auto.debug }} + ALLOWED_ISSUERS={{ .Values.gearboxMiddlewareG3auto.allowedIssuers }} + USER_API={{ .Values.gearboxMiddlewareG3auto.userApi }} + FORCE_ISSUER={{ .Values.gearboxMiddlewareG3auto.forceIssuer }} + PRIVATE_KEY_PATH={{ .Values.gearboxMiddlewareG3auto.gearboxMiddlewarePrivateKeyPath }} +data: + {{- if and .Values.gearboxMiddlewareG3auto.awsaccesskey .Values.gearboxMiddlewareG3auto.awssecretkey }} + aws_access_key_id: {{ .Values.gearboxMiddlewareG3auto.awsaccesskey | b64enc | quote }} + aws_secret_access_key: {{ .Values.gearboxMiddlewareG3auto.awssecretkey | b64enc | quote }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/helm/gearbox-middleware/templates/gearbox-middleware-secret.yaml b/helm/gearbox-middleware/templates/gearbox-middleware-secret.yaml deleted file mode 100644 index f986bd807..000000000 --- a/helm/gearbox-middleware/templates/gearbox-middleware-secret.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: gearbox-middleware-g3auto -type: Opaque -stringData: - gearbox-middleware.env: | - DEBUG=True - FORCE_ISSUER=True - USER_API="http://fence-service/" - ALLOWED_ISSUERS="http://fence-service/,https://localhost/user" \ No newline at end of file diff --git a/helm/gearbox-middleware/templates/hpa.yaml b/helm/gearbox-middleware/templates/hpa.yaml index 0830fef12..765725d72 100644 --- a/helm/gearbox-middleware/templates/hpa.yaml +++ b/helm/gearbox-middleware/templates/hpa.yaml @@ -1,32 +1,32 @@ -{{- if .Values.autoscaling.enabled }} +{{- if default .Values.global.autoscaling.enabled .Values.autoscaling.enabled }} apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler metadata: - name: {{ include "gearbox-middleware.fullname" . }} + name: gearbox-middleware-deployment labels: {{- include "gearbox-middleware.labels" . | nindent 4 }} spec: scaleTargetRef: apiVersion: apps/v1 kind: Deployment - name: {{ include "gearbox-middleware.fullname" . }} - minReplicas: {{ .Values.autoscaling.minReplicas }} - maxReplicas: {{ .Values.autoscaling.maxReplicas }} + name: gearbox-middleware-deployment + minReplicas: {{ default .Values.global.autoscaling.minReplicas .Values.autoscaling.minReplicas }} + maxReplicas: {{ default .Values.global.autoscaling.maxReplicas .Values.autoscaling.maxReplicas }} metrics: - {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- if default .Values.global.autoscaling.targetCPUUtilizationPercentage .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ default .Values.global.autoscaling.targetCPUUtilizationPercentage .Values.autoscaling.targetCPUUtilizationPercentage}} {{- end }} - {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} - - type: Resource - resource: - name: memory - target: - type: Utilization - averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- if default .Values.global.autoscaling.targetMemoryUtilizationPercentage .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + averageUtilization: {{ default .Values.global.autoscaling.targetMemoryUtilizationPercentage .Values.autoscaling.targetMemoryUtilizationPercentage }} + type: Utilization {{- end }} -{{- end }} +{{- end }} \ No newline at end of file diff --git a/helm/gearbox-middleware/templates/pdb.yaml b/helm/gearbox-middleware/templates/pdb.yaml new file mode 100644 index 000000000..2ef2de13d --- /dev/null +++ b/helm/gearbox-middleware/templates/pdb.yaml @@ -0,0 +1,3 @@ +{{- if and .Values.global.pdb (gt (int .Values.replicaCount) 1) }} +{{ include "common.pod_disruption_budget" . }} +{{- end }} \ No newline at end of file diff --git a/helm/gearbox-middleware/templates/secret-store.yaml b/helm/gearbox-middleware/templates/secret-store.yaml new file mode 100644 index 000000000..771c7760d --- /dev/null +++ b/helm/gearbox-middleware/templates/secret-store.yaml @@ -0,0 +1,3 @@ +{{ if .Values.global.externalSecrets.separateSecretStore }} +{{ include "common.secretstore" . }} +{{- end }} \ No newline at end of file diff --git a/helm/gearbox-middleware/templates/service.yaml b/helm/gearbox-middleware/templates/service.yaml index 0a11f55c5..550734a1e 100644 --- a/helm/gearbox-middleware/templates/service.yaml +++ b/helm/gearbox-middleware/templates/service.yaml @@ -8,8 +8,8 @@ spec: type: {{ .Values.service.type }} ports: - port: {{ .Values.service.port }} - targetPort: http + targetPort: 80 protocol: TCP name: http selector: - {{- include "gearbox-middleware.selectorLabels" . | nindent 4 }} \ No newline at end of file + {{- include "gearbox-middleware.selectorLabels" . | nindent 4 }} diff --git a/helm/gearbox-middleware/templates/serviceaccount.yaml b/helm/gearbox-middleware/templates/serviceaccount.yaml index 0d406cf0f..9cd1648eb 100644 --- a/helm/gearbox-middleware/templates/serviceaccount.yaml +++ b/helm/gearbox-middleware/templates/serviceaccount.yaml @@ -5,8 +5,13 @@ metadata: name: {{ include "gearbox-middleware.serviceAccountName" . }} labels: {{- include "gearbox-middleware.labels" . | nindent 4 }} - {{- with .Values.serviceAccount.annotations }} + {{- if .Values.global.crossplane.enabled }} + annotations: + eks.amazonaws.com/role-arn: arn:aws:iam::{{ .Values.global.crossplane.accountId }}:role/{{ .Values.global.environment }}-{{ .Release.Namespace }}-gearbox-middleware-sa + {{- else }} + {{ with .Values.serviceAccount.annotations }} annotations: {{- toYaml . | nindent 4 }} {{- end }} + {{- end }} {{- end }} diff --git a/helm/gearbox-middleware/values.yaml b/helm/gearbox-middleware/values.yaml index 6868dc575..6fe418d3e 100644 --- a/helm/gearbox-middleware/values.yaml +++ b/helm/gearbox-middleware/values.yaml @@ -1,162 +1,220 @@ -# Default values for gearbox. +# Default values for gearbox-middleware. # This is a YAML-formatted file. # Declare variables to be passed into your templates. + + +# Global configuration global: - # -- (bool) Whether the deployment is for development purposes. - dev: true - # -- (map) Postgres database configuration. - postgres: - # -- (bool) Whether the database should be created. - dbCreate: true - # -- (map) Master credentials to postgres. This is going to be the default postgres server being used for each service, unless each service specifies their own postgres - master: - # -- (string) hostname of postgres server - host: - # -- (string) username of superuser in postgres. This is used to create or restore databases - username: postgres - # -- (string) password for superuser in postgres. This is used to create or restore databases - password: - # -- (string) Port for Postgres. - port: "5432" + # -- (map) AWS configuration + aws: + # -- (bool) Set to true if deploying to AWS. Controls ingress annotations. + enabled: false + # -- (string) Credentials for AWS stuff. + awsAccessKeyId: + # -- (string) Credentials for AWS stuff. + awsSecretAccessKey: + externalSecrets: + # -- (bool) Whether to use External Secrets for aws config. + enabled: false + # -- (String) Name of Secrets Manager secret. + externalSecretAwsCreds: + # -- (map) Local secret setting if using a pre-exising secret. + useLocalSecret: + # -- (bool) Set to true if you would like to use a secret that is already running on your cluster. + enabled: false + # -- (string) Name of the local secret. + localSecretName: # -- (string) Environment name. This should be the same as vpcname if you're doing an AWS deployment. Currently this is being used to share ALB's if you have multiple namespaces. Might be used other places too. environment: default - # -- (string) Hostname for the deployment. - hostname: localhost - # -- (string) ARN of the reverse proxy certificate. - revproxyArn: arn:aws:acm:us-east-1:123456:certificate - # -- (string) URL of the data dictionary. - dictionaryUrl: https://s3.amazonaws.com/dictionary-artifacts/datadictionary/develop/schema.json - # -- (string) Portal application name. - portalApp: gitops - # -- (string) S3 bucket name for Kubernetes manifest files. - kubeBucket: kube-gen3 - # -- (string) S3 bucket name for log files. - logsBucket: logs-gen3 - # -- (bool) Whether to sync data from dbGaP. - syncFromDbgap: false - # -- (bool) Whether public datasets are enabled. - publicDataSets: true - # -- (string) Access level for tiers. acceptable values for `tier_access_level` are: `libre`, `regular` and `private`. If omitted, by default common will be treated as `private` - tierAccessLevel: libre - # -- (int) Only relevant if tireAccessLevel is set to "regular". Summary charts below this limit will not appear for aggregated data. - tierAccessLimit: 1000 - # -- (bool) Whether network policies are enabled. - netPolicy: - enabled: false - # -- (int) Number of dispatcher jobs. - dispatcherJobNum: 10 - # -- (bool) Whether Datadog is enabled. - ddEnabled: false # -- (bool) If the service will be deployed with a Pod Disruption Budget. Note- you need to have more than 2 replicas for the pdb to be deployed. pdb: false # -- (int) The minimum amount of pods that are available at all times if the PDB is deployed. minAvialable: 1 - + # -- (map) External Secrets settings. + externalSecrets: + # -- (bool) Will use ExternalSecret resources to pull secrets from Secrets Manager instead of creating them locally. Be cautious as this will override any gearbox-middleware secrets you have deployed. + deploy: false + # -- (string) Will deploy a separate External Secret Store for this service. + separateSecretStore: false + # -- (map) Kubernetes configuration + crossplane: + # -- (bool) Set to true if deploying to AWS and want to use crossplane for AWS resources. + enabled: false + # -- (string) The name of the crossplane provider config. + providerConfigName: provider-aws + # -- (string) OIDC provider URL. This is used for authentication of roles/service accounts. + oidcProviderUrl: oidc.eks.us-east-1.amazonaws.com/id/12345678901234567890 + # -- (string) The account ID of the AWS account. + accountId: 123456789012 + s3: + # -- (string) The kms key id for the s3 bucket. + kmsKeyId: + # -- (bool) Whether to use s3 bucket versioning. + versioningEnabled: false + # -- (map) This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/ + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 80 + +# -- (map) This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/ +autoscaling: {} + +# -- (bool) Whether Metrics are enabled. +metricsEnabled: true + +# -- (map) External Secrets settings. +externalSecrets: + # -- (string) Will create the Helm "gearbox-middleware-g3auto" secret even if Secrets Manager is enabled. This is helpful if you are wanting to use External Secrets for some, but not all secrets. + createK8sgearboxMiddlewareSecret: false + # -- (string) Will override the name of the aws secrets manager secret. Default is "gearbox-middleware-g3auto" + gearboxMiddlewareG3auto: +# -- (map) Secret information for External Secrets. +secrets: + # -- (str) AWS access key ID. Overrides global key. + awsAccessKeyId: + # -- (str) AWS secret access key ID. Overrides global key. + awsSecretAccessKey: + +# -- (int) Number of old revisions to retain +revisionHistoryLimit: 2 + +# -- (int) Number of replicas for the deployment. replicaCount: 1 +# -- (map) Docker image information. image: + # -- (string) Docker repository. repository: quay.io/pcdc/gearbox-middleware - pullPolicy: IfNotPresent - # Overrides the image tag whose default is the chart appVersion. - tag: test_criterion_patch_Tue__09_Apr_2024_22_32_33_GMT + # -- (string) Docker pull policy. + pullPolicy: Always + # -- (string) Overrides the image tag whose default is the chart appVersion. + tag: "" -imagePullSecrets: [] -nameOverride: "" -fullnameOverride: "" +# -- (map) Kubernetes service information. +service: + # -- (string) Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". + type: ClusterIP + # -- (int) The port number that the service exposes. + port: 80 +# -- (map) Service account to use or create. serviceAccount: - # Specifies whether a service account should be created + # -- (bool) Specifies whether a service account should be created. create: true - # Annotations to add to the service account + # -- (map) Annotations to add to the service account. annotations: {} - # The name of the service account to use. + # -- (string) The name of the service account to use. # If not set and create is true, a name is generated using the fullname template name: "" -podAnnotations: {} - -podSecurityContext: {} - # fsGroup: 2000 - -securityContext: {} - # capabilities: - # drop: - # - ALL - # readOnlyRootFilesystem: true - # runAsNonRoot: true - # runAsUser: 1000 - -service: - type: ClusterIP - port: 80 - -ingress: - enabled: false - className: "" - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - hosts: - - host: chart-example.local - paths: - - path: / - pathType: ImplementationSpecific - tls: [] - # - secretName: chart-example-tls - # hosts: - # - chart-example.local - -resources: - requests: - cpu: 0.8 - memory: 1024Mi - limits: - cpu: 2 - memory: 2048Mi - -autoscaling: - enabled: false - minReplicas: 1 - maxReplicas: 100 - targetCPUUtilizationPercentage: 80 - # targetMemoryUtilizationPercentage: 80 - -nodeSelector: {} - -tolerations: [] - -affinity: {} - -env: - +# -- (map) Rolling update deployment strategy +strategy: + type: RollingUpdate + rollingUpdate: + # -- (int) Number of additional replicas to add during rollout. + maxSurge: 1 + # -- (int) Maximum amount of pods that can be unavailable during the update. + maxUnavailable: 0 + +# -- (map) Affinity to use for the deployment. +affinity: + podAntiAffinity: + # -- (map) Option for scheduling to be required or preferred. + preferredDuringSchedulingIgnoredDuringExecution: + # -- (int) Weight value for preferred scheduling. + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + # -- (list) Label key for match expression. + - key: app + # -- (string) Operation type for the match expression. + operator: In + # -- (list) Value for the match expression key. + values: + - gearbox-middleware + # -- (string) Value for topology key label. + topologyKey: "kubernetes.io/hostname" + +# -- (bool) Automount the default service account token +automountServiceAccountToken: false + +# -- (list) Volumes to attach to the container. volumes: + - name: config-volume + secret: + secretName: "gearbox-middleware-g3auto" - name: gearbox-middleware-jwt-keys secret: secretName: "gearbox-middleware-jwt-keys" items: - key: jwt_private_key.pem path: jwt_private_key.pem - - name: config-volume-g3auto - secret: - secretName: "gearbox-middleware-g3auto" - optional: true - # This volume may or may not be needed or available. See kube-setup-gearbox.sh and note that this - # is only available if a /gearbox directory exists. - - name: config-volume - secret: - secretName: gearbox-middleware-config - optional: true + optional: false +# -- (int) Grace period that applies to the total time it takes for both the PreStop hook to execute and for the Container to stop normally. +terminationGracePeriodSeconds: 50 + +# -- (list) Environment variables to pass to the container +env: + - name: GEN3_DEBUG + value: "False" +# -- (list) Volumes to mount to the container. volumeMounts: - name: "gearbox-middleware-jwt-keys" readOnly: true - mountPath: "/gearbox_middleware/gearbox_middleware/keys/jwt_private_key.pem" + mountPath: "/gearbox-middleware/gearbox_middleware/keys/jwt_private_key.pem" subPath: jwt_private_key.pem - - name: config-volume-g3auto - readOnly: true - mountPath: "/gearbox_middleware/.env" - subPath: gearbox-middleware.env - name: config-volume readOnly: true - mountPath: /aggregate_config.json - subPath: aggregate_config.json + mountPath: /gearbox-middleware/.env + subPath: gearbox-middleware.env + +# -- (map) Resource requests and limits for the containers in the pod +resources: + # -- (map) The amount of resources that the container requests + requests: + # -- (string) The amount of memory requested + memory: 12Mi + # -- (map) The maximum amount of resources that the container is allowed to use + limits: + # -- (string) The maximum amount of memory the container can use + memory: 512Mi + +# -- (map) Values for gearbox-middleware secret. If the variable you want to change is not listed here it can be added to +gearboxMiddlewareG3auto: + hostname: "localhost" + # -- (string) AWS access key. + awsaccesskey: "" + # -- (string) AWS secret access key. + awssecretkey: "" + # -- (string) region for AWS. + awsRegion: "us-east-1" + # -- (bool) Whether to set gearbox-middleware backend into testing mode. + testing: True + # -- (bool) Whether to run in debug mode. + debug: False + # -- (string) accepted issuers in fence tokens. + allowedIssuers: "http://fence-service/,https://localhost/user" + # -- (string) url for fence. + userApi: "http://fence-service/" + # -- (string) whether to use the userApi value when validating tokens. + forceIssuer: True + # -- (string) private key path for service to service requests with gearbox-middleware. + gearboxMiddlewarePrivateKeyPath: "/gearbox-middleware/gearbox_middleware/keys/jwt_private_key.pem" + + +# Values to determine the labels that are used for the deployment, pod, etc. +# -- (string) Valid options are "production" or "dev". If invalid option is set- the value will default to "dev". +release: "production" +# -- (string) Valid options are "true" or "false". If invalid option is set- the value will default to "false". +criticalService: "true" +# -- (string) Label to help organize pods and their use. Any value is valid, but use "_" or "-" to divide words. +partOf: "Workspace-tab" +# -- (map) Will completely override the selectorLabels defined in the common chart's _label_setup.tpl +selectorLabels: +# -- (map) Will completely override the commonLabels defined in the common chart's _label_setup.tpl +commonLabels: diff --git a/helm/gearbox/templates/_helpers.tpl b/helm/gearbox/templates/_helpers.tpl index 00e8f4b2c..cb841b8ca 100644 --- a/helm/gearbox/templates/_helpers.tpl +++ b/helm/gearbox/templates/_helpers.tpl @@ -30,7 +30,6 @@ Create chart name and version as used by the chart label. {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} {{- end }} -{{/* Common labels */}} {{- define "gearbox.labels" -}} @@ -61,7 +60,7 @@ Create the name of the service account to use */}} {{- define "gearbox.serviceAccountName" -}} {{- if .Values.serviceAccount.create }} -{{- default (include "gearbox.fullname" .) .Values.serviceAccount.name }} +{{- default "gearbox-sa" .Values.serviceAccount.name }} {{- else }} {{- default "default" .Values.serviceAccount.name }} {{- end }} diff --git a/helm/gearbox/templates/aws-config.yaml b/helm/gearbox/templates/aws-config.yaml new file mode 100644 index 000000000..a49fb9f1b --- /dev/null +++ b/helm/gearbox/templates/aws-config.yaml @@ -0,0 +1,3 @@ +{{- if or (.Values.secrets.awsSecretAccessKey) (.Values.global.aws.awsSecretAccessKey) (.Values.global.aws.externalSecrets.enabled) }} +{{ include "common.awsconfig" . }} +{{- end -}} \ No newline at end of file diff --git a/helm/gearbox/templates/crossplane.yaml b/helm/gearbox/templates/crossplane.yaml new file mode 100644 index 000000000..f0bad7c05 --- /dev/null +++ b/helm/gearbox/templates/crossplane.yaml @@ -0,0 +1,102 @@ +{{- if .Values.global.crossplane.enabled }} +apiVersion: iam.aws.crossplane.io/v1beta1 +kind: Role +metadata: + name: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-gearbox-sa" +spec: + providerConfigRef: + name: provider-aws + forProvider: + name: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-gearbox-sa" + description: "Role for gearbox service account for {{ .Values.global.environment }}" + path: "/gen3-service/" + assumeRolePolicyDocument: | + { + "Version":"2012-10-17", + "Statement":[ + { "Effect":"Allow","Principal":{"Service":"ec2.amazonaws.com"},"Action":"sts:AssumeRole" }, + { + "Sid":"", + "Effect":"Allow", + "Principal":{"Federated":"arn:aws:iam::{{ .Values.global.crossplane.accountId }}:oidc-provider/{{ .Values.global.crossplane.oidcProviderUrl }}"}, + "Action":"sts:AssumeRoleWithWebIdentity", + "Condition":{ + "StringEquals":{ + "{{ .Values.global.crossplane.oidcProviderUrl }}:sub":"system:serviceaccount:{{ .Release.Namespace }}:gearbox-sa", + "{{ .Values.global.crossplane.oidcProviderUrl }}:aud":"sts.amazonaws.com" + } + } + } + ] + } +--- +apiVersion: iam.aws.crossplane.io/v1beta1 +kind: Policy +metadata: + name: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-gearbox-role-policy" +spec: + providerConfigRef: + name: provider-aws + forProvider: + roleName: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-gearbox-sa" + name: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-gearbox-role-policy" + document: | + { + "Version":"2012-10-17", + "Statement":[ + { + "Effect":"Allow", + "Action":["s3:List*","s3:Get*"], + "Resource":[ + "arn:aws:s3:::{{ .Values.global.environment }}-{{ .Release.Namespace }}-gearbox-bucket/*", + "arn:aws:s3:::{{ .Values.global.environment }}-{{ .Release.Namespace }}-gearbox-bucket" + ] + }, + { + "Effect":"Allow", + "Action":["s3:PutObject","s3:GetObject","s3:DeleteObject"], + "Resource":"arn:aws:s3:::{{ .Values.global.environment }}-{{ .Release.Namespace }}-gearbox-bucket/*" + } + ] + } + +--- +apiVersion: iam.aws.crossplane.io/v1beta1 +kind: RolePolicyAttachment +metadata: + name: gearbox-sa-managed-policy-attachment +spec: + providerConfigRef: + name: provider-aws + forProvider: + roleName: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-gearbox-sa" + policyArnRef: + name: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-gearbox-role-policy" +--- +apiVersion: s3.aws.crossplane.io/v1beta1 +kind: Bucket +metadata: + name: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-gearbox-bucket" +spec: + providerConfigRef: + name: provider-aws + forProvider: + bucketName: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-gearbox-bucket" + acl: private + forceDestroy: false + locationConstraint: {{ .Values.global.aws.region }} + tags: + Organization: gen3 + description: Created by crossplane + versioningConfiguration: + {{- if .Values.global.crossplane.s3.versioningEnabled }} + status: "Enabled" + {{- end }} + serverSideEncryptionConfiguration: + rules: + - applyServerSideEncryptionByDefault: + sseAlgorithm: aws:kms + {{- if .Values.global.crossplane.s3.kmsKeyId }} + kmsMasterKeyID: {{ .Values.global.crossplane.s3.kmsKeyId }} + {{- end }} +{{- end}} \ No newline at end of file diff --git a/helm/gearbox/templates/deployment.yaml b/helm/gearbox/templates/deployment.yaml index 0f069da84..e171deecb 100644 --- a/helm/gearbox/templates/deployment.yaml +++ b/helm/gearbox/templates/deployment.yaml @@ -4,9 +4,6 @@ metadata: name: gearbox-deployment labels: {{- include "gearbox.labels" . | nindent 4 }} - {{- if .Values.global.ddEnabled }} - {{- include "common.datadogLabels" . | nindent 4 }} - {{- end }} spec: {{- if not .Values.autoscaling.enabled }} replicas: {{ .Values.replicaCount }} @@ -14,57 +11,86 @@ spec: selector: matchLabels: {{- include "gearbox.selectorLabels" . | nindent 6 }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + strategy: + {{- toYaml .Values.strategy | nindent 8 }} template: metadata: - {{- with .Values.podAnnotations }} - annotations: - {{- toYaml . | nindent 8 }} - {{- end }} labels: + public: "yes" + s3: "yes" + userhelper: "yes" {{- include "gearbox.selectorLabels" . | nindent 8 }} - {{- if .Values.global.ddEnabled }} - {{- include "common.datadogLabels" . | nindent 8 }} + {{- include "common.extraLabels" . | nindent 8 }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/gearbox-creds.yaml") . | sha256sum }} + {{- if coalesce .Values.metricsEnabled .Values.global.metricsEnabled true }} + {{- include "common.grafanaAnnotations" . | nindent 8 }} {{- end }} spec: - volumes: - {{- toYaml .Values.volumes | nindent 8 }} - {{- with .Values.imagePullSecrets }} - imagePullSecrets: + serviceAccountName: {{ include "gearbox.serviceAccountName" . }} + {{- with .Values.affinity }} + affinity: {{- toYaml . | nindent 8 }} {{- end }} - serviceAccountName: {{ include "gearbox.serviceAccountName" . }} - securityContext: - {{- toYaml .Values.podSecurityContext | nindent 8 }} + automountServiceAccountToken: {{ .Values.automountServiceAccountToken}} + volumes: + {{- toYaml .Values.volumes | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds}} containers: - - name: {{ .Chart.Name }} - securityContext: - {{- toYaml .Values.securityContext | nindent 12 }} + - name: gearbox image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + {{- toYaml .Values.env | nindent 12 }} + {{- if and .Values.gearboxG3auto.awsaccesskey .Values.gearboxG3auto.awssecretkey }} + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: gearbox-g3auto + key: aws_access_key_id + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: gearbox-g3auto + key: aws_secret_access_key + {{- else if or (.Values.secrets.awsSecretAccessKey) (.Values.global.aws.awsSecretAccessKey) (.Values.global.aws.externalSecrets.enabled) (.Values.global.aws.useLocalSecret.enabled) }} + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + {{- if .Values.global.aws.useLocalSecret.enabled }} + name: {{ .Values.global.aws.useLocalSecret.localSecretName }} + {{- else }} + name: {{.Chart.Name}}-aws-config + {{- end }} + key: access-key + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + {{- if .Values.global.aws.useLocalSecret.enabled }} + name: {{ .Values.global.aws.useLocalSecret.localSecretName }} + {{- else }} + name: {{.Chart.Name}}-aws-config + {{- end }} + key: secret-access-key + {{- end }} + volumeMounts: + {{- toYaml .Values.volumeMounts | nindent 12 }} + resources: + {{- toYaml .Values.resources | nindent 12 }} ports: - - name: http - containerPort: {{ .Values.service.port }} - protocol: TCP + - containerPort: 80 livenessProbe: httpGet: path: /_status port: 80 - initialDelaySeconds: 30 + initialDelaySeconds: 10 periodSeconds: 60 timeoutSeconds: 30 readinessProbe: httpGet: path: /_status - port: http - resources: - {{- toYaml .Values.resources | nindent 12 }} - volumeMounts: - {{- toYaml .Values.volumeMounts | nindent 12 }} - env: - {{- if .Values.global.ddEnabled }} - {{- include "common.datadogEnvVar" . | nindent 12 }} - {{- end }} - {{- toYaml .Values.env | nindent 12 }} + port: 80 initContainers: - name: gearbox-db-migrate image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" @@ -81,15 +107,3 @@ spec: - | poetry run alembic upgrade head - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} diff --git a/helm/gearbox/templates/external-secret.yaml b/helm/gearbox/templates/external-secret.yaml new file mode 100644 index 000000000..32e868322 --- /dev/null +++ b/helm/gearbox/templates/external-secret.yaml @@ -0,0 +1,19 @@ +{{- if and (.Values.global.externalSecrets.deploy) (not .Values.externalSecrets.createK8sgearboxSecret) }} +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: gearbox-g3auto +spec: + refreshInterval: 5m + secretStoreRef: + name: {{include "common.SecretStore" .}} + kind: SecretStore + target: + name: gearbox-g3auto + creationPolicy: Owner + data: + - secretKey: config.json + remoteRef: + #name of secret in secrets manager + key: {{include "gearbox-g3auto" .}} +{{- end }} \ No newline at end of file diff --git a/helm/gearbox/templates/gearbox-creds.yaml b/helm/gearbox/templates/gearbox-creds.yaml new file mode 100644 index 000000000..229d159c5 --- /dev/null +++ b/helm/gearbox/templates/gearbox-creds.yaml @@ -0,0 +1,31 @@ +{{- if or (not .Values.global.externalSecrets.deploy) (and .Values.global.externalSecrets.deploy .Values.externalSecrets.createK8sgearboxSecret) }} +apiVersion: v1 +kind: Secret +metadata: + name: gearbox-g3auto +type: Opaque +stringData: + {{- $existingSecret := lookup "v1" "Secret" .Release.Namespace "gearbox-g3auto" }} + {{- $randomPass := printf "%s%s" "gateway:" (randAlphaNum 32) }} + base64Authz.txt: {{ if and $existingSecret (index $existingSecret.data "base64Authz.txt") }}{{ index $existingSecret.data "base64Authz.txt" | b64dec | quote }}{{ else }}{{ $randomPass | quote | b64enc }}{{ end }} + gearbox.env: | + HOSTNAME={{ .Values.global.hostname }} + {{ if and .Values.gearboxG3auto.awsaccesskey .Values.gearboxG3auto.awssecretkey }} + S3_AWS_ACCESS_KEY_ID={{ .Values.gearboxG3auto.awsaccesskey }} + S3_AWS_SECRET_ACCESS_KEY={{ .Values.gearboxG3auto.awssecretkey }} + {{ end }} + AWS_REGION={{ .Values.gearboxG3auto.awsRegion }} + TESTING={{ .Values.gearboxG3auto.testing }} + DEBUG={{ .Values.gearboxG3auto.debug }} + ENABLE_PHI={{ .Values.gearboxG3auto.enablePHI }} + DUMMY_S3={{ .Values.gearboxG3auto.dummyS3 }} + ALLOWED_ISSUERS={{ .Values.gearboxG3auto.allowedIssuers }} + USER_API={{ .Values.gearboxG3auto.userApi }} + FORCE_ISSUER={{ .Values.gearboxG3auto.forceIssuer }} + GEARBOX_MIDDLEWARE_PUBLIC_KEY_PATH={{ .Values.gearboxG3auto.gearboxMiddlewarePublicKeyPath }} +data: + {{- if and .Values.gearboxG3auto.awsaccesskey .Values.gearboxG3auto.awssecretkey }} + aws_access_key_id: {{ .Values.gearboxG3auto.awsaccesskey | b64enc | quote }} + aws_secret_access_key: {{ .Values.gearboxG3auto.awssecretkey | b64enc | quote }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/helm/gearbox/templates/hpa.yaml b/helm/gearbox/templates/hpa.yaml index b97170cfa..081704e0e 100644 --- a/helm/gearbox/templates/hpa.yaml +++ b/helm/gearbox/templates/hpa.yaml @@ -1,32 +1,32 @@ -{{- if .Values.autoscaling.enabled }} +{{- if default .Values.global.autoscaling.enabled .Values.autoscaling.enabled }} apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler metadata: - name: {{ include "gearbox.fullname" . }} + name: gearbox-deployment labels: {{- include "gearbox.labels" . | nindent 4 }} spec: scaleTargetRef: apiVersion: apps/v1 kind: Deployment - name: {{ include "gearbox.fullname" . }} - minReplicas: {{ .Values.autoscaling.minReplicas }} - maxReplicas: {{ .Values.autoscaling.maxReplicas }} + name: gearbox-deployment + minReplicas: {{ default .Values.global.autoscaling.minReplicas .Values.autoscaling.minReplicas }} + maxReplicas: {{ default .Values.global.autoscaling.maxReplicas .Values.autoscaling.maxReplicas }} metrics: - {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- if default .Values.global.autoscaling.targetCPUUtilizationPercentage .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ default .Values.global.autoscaling.targetCPUUtilizationPercentage .Values.autoscaling.targetCPUUtilizationPercentage}} {{- end }} - {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} - - type: Resource - resource: - name: memory - target: - type: Utilization - averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- if default .Values.global.autoscaling.targetMemoryUtilizationPercentage .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + averageUtilization: {{ default .Values.global.autoscaling.targetMemoryUtilizationPercentage .Values.autoscaling.targetMemoryUtilizationPercentage }} + type: Utilization {{- end }} -{{- end }} +{{- end }} \ No newline at end of file diff --git a/helm/gearbox/templates/netpolicy.yaml b/helm/gearbox/templates/netpolicy.yaml new file mode 100644 index 000000000..70a5c3b5d --- /dev/null +++ b/helm/gearbox/templates/netpolicy.yaml @@ -0,0 +1 @@ +{{ include "common.db_netpolicy" . }} \ No newline at end of file diff --git a/helm/gearbox/templates/pdb.yaml b/helm/gearbox/templates/pdb.yaml new file mode 100644 index 000000000..2ef2de13d --- /dev/null +++ b/helm/gearbox/templates/pdb.yaml @@ -0,0 +1,3 @@ +{{- if and .Values.global.pdb (gt (int .Values.replicaCount) 1) }} +{{ include "common.pod_disruption_budget" . }} +{{- end }} \ No newline at end of file diff --git a/helm/gearbox/templates/secret-store.yaml b/helm/gearbox/templates/secret-store.yaml new file mode 100644 index 000000000..771c7760d --- /dev/null +++ b/helm/gearbox/templates/secret-store.yaml @@ -0,0 +1,3 @@ +{{ if .Values.global.externalSecrets.separateSecretStore }} +{{ include "common.secretstore" . }} +{{- end }} \ No newline at end of file diff --git a/helm/gearbox/templates/service.yaml b/helm/gearbox/templates/service.yaml index 92c7bd807..cba3c01ff 100644 --- a/helm/gearbox/templates/service.yaml +++ b/helm/gearbox/templates/service.yaml @@ -8,7 +8,7 @@ spec: type: {{ .Values.service.type }} ports: - port: {{ .Values.service.port }} - targetPort: http + targetPort: 80 protocol: TCP name: http selector: diff --git a/helm/gearbox/templates/serviceaccount.yaml b/helm/gearbox/templates/serviceaccount.yaml index 7dac719ce..4349686c0 100644 --- a/helm/gearbox/templates/serviceaccount.yaml +++ b/helm/gearbox/templates/serviceaccount.yaml @@ -5,9 +5,13 @@ metadata: name: {{ include "gearbox.serviceAccountName" . }} labels: {{- include "gearbox.labels" . | nindent 4 }} - {{- with .Values.serviceAccount.annotations }} + {{- if .Values.global.crossplane.enabled }} + annotations: + eks.amazonaws.com/role-arn: arn:aws:iam::{{ .Values.global.crossplane.accountId }}:role/{{ .Values.global.environment }}-{{ .Release.Namespace }}-gearbox-sa + {{- else }} + {{ with .Values.serviceAccount.annotations }} annotations: {{- toYaml . | nindent 4 }} {{- end }} + {{- end }} {{- end }} - diff --git a/helm/gearbox/templates/tests/test-connection.yaml b/helm/gearbox/templates/tests/test-connection.yaml deleted file mode 100644 index d7364bd2c..000000000 --- a/helm/gearbox/templates/tests/test-connection.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: "{{ include "gearbox.fullname" . }}-test-connection" - labels: - {{- include "gearbox.labels" . | nindent 4 }} - annotations: - "helm.sh/hook": test -spec: - containers: - - name: wget - image: busybox - command: ['wget'] - args: ['{{ include "gearbox.fullname" . }}:{{ .Values.service.port }}'] - restartPolicy: Never diff --git a/helm/gearbox/values.yaml b/helm/gearbox/values.yaml index d0a4d1399..8d2deb602 100644 --- a/helm/gearbox/values.yaml +++ b/helm/gearbox/values.yaml @@ -1,13 +1,73 @@ # Default values for gearbox. # This is a YAML-formatted file. # Declare variables to be passed into your templates. + + +# Global configuration global: - # -- (bool) Whether the deployment is for development purposes. - dev: true - # -- (map) Postgres database configuration. + # -- (map) AWS configuration + aws: + # -- (bool) Set to true if deploying to AWS. Controls ingress annotations. + enabled: false + # -- (string) Credentials for AWS stuff. + awsAccessKeyId: + # -- (string) Credentials for AWS stuff. + awsSecretAccessKey: + externalSecrets: + # -- (bool) Whether to use External Secrets for aws config. + enabled: false + # -- (String) Name of Secrets Manager secret. + externalSecretAwsCreds: + # -- (map) Local secret setting if using a pre-exising secret. + useLocalSecret: + # -- (bool) Set to true if you would like to use a secret that is already running on your cluster. + enabled: false + # -- (string) Name of the local secret. + localSecretName: + # -- (string) Environment name. This should be the same as vpcname if you're doing an AWS deployment. Currently this is being used to share ALB's if you have multiple namespaces. Might be used other places too. + environment: default + # -- (bool) If the service will be deployed with a Pod Disruption Budget. Note- you need to have more than 2 replicas for the pdb to be deployed. + pdb: false + # -- (int) The minimum amount of pods that are available at all times if the PDB is deployed. + minAvialable: 1 + # -- (map) External Secrets settings. + externalSecrets: + # -- (bool) Will use ExternalSecret resources to pull secrets from Secrets Manager instead of creating them locally. Be cautious as this will override any gearbox secrets you have deployed. + deploy: false + # -- (string) Will deploy a separate External Secret Store for this service. + separateSecretStore: false + # -- (map) Kubernetes configuration + crossplane: + # -- (bool) Set to true if deploying to AWS and want to use crossplane for AWS resources. + enabled: false + # -- (string) The name of the crossplane provider config. + providerConfigName: provider-aws + # -- (string) OIDC provider URL. This is used for authentication of roles/service accounts. + oidcProviderUrl: oidc.eks.us-east-1.amazonaws.com/id/12345678901234567890 + # -- (string) The account ID of the AWS account. + accountId: 123456789012 + s3: + # -- (string) The kms key id for the s3 bucket. + kmsKeyId: + # -- (bool) Whether to use s3 bucket versioning. + versioningEnabled: false + # -- (map) This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/ + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 80 + + # -- (map) Controls network policy settings + netPolicy: + enabled: false + postgres: # -- (bool) Whether the database should be created. dbCreate: true + # -- (string) Name of external secret. Disabled if empty + externalSecret: "" # -- (map) Master credentials to postgres. This is going to be the default postgres server being used for each service, unless each service specifies their own postgres master: # -- (string) hostname of postgres server @@ -18,40 +78,8 @@ global: password: # -- (string) Port for Postgres. port: "5432" - # -- (string) Environment name. This should be the same as vpcname if you're doing an AWS deployment. Currently this is being used to share ALB's if you have multiple namespaces. Might be used other places too. - environment: default - # -- (string) Hostname for the deployment. - hostname: localhost - # -- (string) ARN of the reverse proxy certificate. - revproxyArn: arn:aws:acm:us-east-1:123456:certificate - # -- (string) URL of the data dictionary. - dictionaryUrl: https://s3.amazonaws.com/dictionary-artifacts/datadictionary/develop/schema.json - # -- (string) Portal application name. - portalApp: gitops - # -- (string) S3 bucket name for Kubernetes manifest files. - kubeBucket: kube-gen3 - # -- (string) S3 bucket name for log files. - logsBucket: logs-gen3 - # -- (bool) Whether to sync data from dbGaP. - syncFromDbgap: false - # -- (bool) Whether public datasets are enabled. - publicDataSets: true - # -- (string) Access level for tiers. acceptable values for `tier_access_level` are: `libre`, `regular` and `private`. If omitted, by default common will be treated as `private` - tierAccessLevel: libre - # -- (int) Only relevant if tireAccessLevel is set to "regular". Summary charts below this limit will not appear for aggregated data. - tierAccessLimit: 1000 - # -- (bool) Whether network policies are enabled. - netPolicy: - enabled: false - # -- (int) Number of dispatcher jobs. - dispatcherJobNum: 10 - # -- (bool) Whether Datadog is enabled. - ddEnabled: false - # -- (bool) If the service will be deployed with a Pod Disruption Budget. Note- you need to have more than 2 replicas for the pdb to be deployed. - pdb: false - # -- (int) The minimum amount of pods that are available at all times if the PDB is deployed. - minAvialable: 1 +# -- (map) Postgres database configuration. If db does not exist in postgres cluster and dbCreate is set ot true then these databases will be created for you postgres: # (bool) Whether the database should be restored from s3. Default to global.postgres.dbRestore dbRestore: false @@ -70,94 +98,116 @@ postgres: # -- (string) Will create a Database for the individual service to help with developing it. separate: false +# -- (map) Postgresql subchart settings if deployed separately option is set to "true". +# Disable persistence by default so we can spin up and down ephemeral environments postgresql: primary: persistence: # -- (bool) Option to persist the dbs data. enabled: false -replicaCount: 1 +# -- (map) This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/ +autoscaling: {} -image: - repository: quay.io/pcdc/gearbox_be - pullPolicy: IfNotPresent - # Overrides the image tag whose default is the chart appVersion. - tag: 1.3.0 +# -- (bool) Whether Metrics are enabled. +metricsEnabled: true -imagePullSecrets: [] -nameOverride: "" -fullnameOverride: "" +# -- (map) External Secrets settings. +externalSecrets: + # -- (string) Will create the Helm "gearbox-g3auto" secret even if Secrets Manager is enabled. This is helpful if you are wanting to use External Secrets for some, but not all secrets. + createK8sgearboxSecret: false + # -- (string) Will override the name of the aws secrets manager secret. Default is "gearbox-g3auto" + gearboxG3auto: +# -- (map) Secret information for External Secrets. +secrets: + # -- (str) AWS access key ID. Overrides global key. + awsAccessKeyId: + # -- (str) AWS secret access key ID. Overrides global key. + awsSecretAccessKey: -serviceAccount: - # Specifies whether a service account should be created - create: true - # Annotations to add to the service account - annotations: {} - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - name: "" - -podAnnotations: {} +# -- (int) Number of old revisions to retain +revisionHistoryLimit: 2 -podSecurityContext: - {} - # fsGroup: 2000 +# -- (int) Number of replicas for the deployment. +replicaCount: 1 -securityContext: - {} - # capabilities: - # drop: - # - ALL - # readOnlyRootFilesystem: true - # runAsNonRoot: true - # runAsUser: 1000 +# -- (map) Docker image information. +image: + # -- (string) Docker repository. + repository: quay.io/cdis/gearbox + # -- (string) Docker pull policy. + pullPolicy: Always + # -- (string) Overrides the image tag whose default is the chart appVersion. + tag: "" +# -- (map) Kubernetes service information. service: + # -- (string) Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". type: ClusterIP + # -- (int) The port number that the service exposes. port: 80 -resources: - requests: - cpu: 0.4 - memory: 512Mi - limits: - cpu: 1 - memory: 2048Mi +# -- (map) Service account to use or create. +serviceAccount: + # -- (bool) Specifies whether a service account should be created. + create: true + # -- (map) Annotations to add to the service account. + annotations: {} + # -- (string) The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" -autoscaling: - enabled: false - minReplicas: 1 - maxReplicas: 100 - targetCPUUtilizationPercentage: 80 - # targetMemoryUtilizationPercentage: 80 +# -- (map) Rolling update deployment strategy +strategy: + type: RollingUpdate + rollingUpdate: + # -- (int) Number of additional replicas to add during rollout. + maxSurge: 1 + # -- (int) Maximum amount of pods that can be unavailable during the update. + maxUnavailable: 0 -nodeSelector: {} +# -- (map) Affinity to use for the deployment. +affinity: + podAntiAffinity: + # -- (map) Option for scheduling to be required or preferred. + preferredDuringSchedulingIgnoredDuringExecution: + # -- (int) Weight value for preferred scheduling. + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + # -- (list) Label key for match expression. + - key: app + # -- (string) Operation type for the match expression. + operator: In + # -- (list) Value for the match expression key. + values: + - gearbox + # -- (string) Value for topology key label. + topologyKey: "kubernetes.io/hostname" -tolerations: [] +# -- (bool) Automount the default service account token +automountServiceAccountToken: false -affinity: {} +# -- (list) Volumes to attach to the container. +volumes: + - name: config-volume + secret: + secretName: "gearbox-g3auto" + - name: gearbox-middleware-jwt-keys + secret: + secretName: "gearbox-middleware-jwt-keys" + items: + - key: jwt_public_key.pem + path: jwt_public_key.pem + optional: false +# -- (int) Grace period that applies to the total time it takes for both the PreStop hook to execute and for the Container to stop normally. +terminationGracePeriodSeconds: 50 +# -- (list) Environment variables to pass to the container env: - name: GEN3_DEBUG value: "False" - - name: GEN3_ES_ENDPOINT - value: http://esproxy-service:9200 - - name: AWS_REGION - value: "us-east-1" - - name: DEBUG - value: "0" - - name: FORCE_ISSUER - value: "True" - - name: USER_API - value: "http://fence-service/" - - name: ALLOWED_ISSUERS - value: "http://fence-service/,https://localhost/user" - - name: DUMMY_S3 - value: "True" - - name: ENABLE_PHI - value: "True" - - name: TESTING - value: "False" - name: DB_DATABASE valueFrom: secretKeyRef: @@ -195,20 +245,61 @@ env: key: dbcreated optional: false -volumes: - - name: gearbox-middleware-jwt-keys - secret: - secretName: "gearbox-middleware-jwt-keys" - items: - - key: jwt_public_key.pem - path: jwt_public_key.pem - optional: true - +# -- (list) Volumes to mount to the container. volumeMounts: - name: "gearbox-middleware-jwt-keys" readOnly: true - mountPath: "/src/src/gearbox/keys/jwt_public_key.pem" + mountPath: "/gearbox/src/gearbox/keys/jwt_public_key.pem" subPath: jwt_public_key.pem + - name: config-volume + readOnly: true + mountPath: /gearbox/.env + subPath: gearbox.env + +initVolumeMounts: + - name: config-volume + readOnly: true + mountPath: /gearbox/.env + subPath: gearbox.env + +# -- (map) Resource requests and limits for the containers in the pod +resources: + # -- (map) The amount of resources that the container requests + requests: + # -- (string) The amount of memory requested + memory: 12Mi + # -- (map) The maximum amount of resources that the container is allowed to use + limits: + # -- (string) The maximum amount of memory the container can use + memory: 512Mi + +# -- (map) Values for gearbox secret. If the variable you want to change is not listed here it can be added to +gearboxG3auto: + hostname: "localhost" + # -- (string) AWS access key. + awsaccesskey: "" + # -- (string) AWS secret access key. + awssecretkey: "" + # -- (string) region for AWS. + awsRegion: "us-east-1" + # -- (bool) Whether to set gearbox backend into testing mode. + testing: False + # -- (bool) Whether to run in debug mode. + debug: False + # -- (bool) Whether to allow for phi. + enablePhi: False + # -- (string) use a public dummy S3 bucket for testing presigned urls + dummyS3: True + # -- (string) accepted issuers in fence tokens. + allowedIssuers: "http://fence-service/,https://localhost/user" + # -- (string) url for fence. + userApi: "http://fence-service/" + # -- (string) whether to use the userApi value when validating tokens. + forceIssuer: True + # -- (string) public key path for service to service requests with gearbox. + gearboxMiddlewarePublicKeyPath: "/gearbox/src/gearbox/keys/jwt_public_key.pem" + + # Values to determine the labels that are used for the deployment, pod, etc. # -- (string) Valid options are "production" or "dev". If invalid option is set- the value will default to "dev". @@ -216,16 +307,8 @@ release: "production" # -- (string) Valid options are "true" or "false". If invalid option is set- the value will default to "false". criticalService: "true" # -- (string) Label to help organize pods and their use. Any value is valid, but use "_" or "-" to divide words. -partOf: "Core-Service" +partOf: "Workspace-tab" # -- (map) Will completely override the selectorLabels defined in the common chart's _label_setup.tpl selectorLabels: # -- (map) Will completely override the commonLabels defined in the common chart's _label_setup.tpl -commonLabels: - -# Values to configure datadog if ddEnabled is set to "true". -# -- (bool) If enabled, the Datadog Agent will automatically inject Datadog-specific metadata into your application logs. -datadogLogsInjection: true -# -- (bool) If enabled, the Datadog Agent will collect profiling data for your application using the Continuous Profiler. This data can be used to identify performance bottlenecks and optimize your application. -datadogProfilingEnabled: true -# -- (int) A value between 0 and 1, that represents the percentage of requests that will be traced. For example, a value of 0.5 means that 50% of requests will be traced. -datadogTraceSampleRate: 1 +commonLabels: \ No newline at end of file From feff7b4f3265aa598f4631c662b1c346e6b1899f Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Wed, 13 Aug 2025 15:06:58 -0700 Subject: [PATCH 091/126] Update service configs and external references Refactored USER_API and OIDC_ISSUER configuration logic in pcdcanalysistools, peregrine, and sheepdog Helm settings for improved localhost handling. Updated Arborist and revproxy image tags, enabled persistent storage for PostgreSQL and Elasticsearch, and adjusted resource requests in pcdc-default-values.yaml. Modified submitter_id values in external_reference.json for several external references. --- .../pcdcanalysistools-secret/settings.py | 7 +++++- helm/peregrine/peregrine-secret/settings.py | 9 +++++-- helm/sheepdog/sheepdog-secret/settings.py | 7 +++++- pcdc-default-values.yaml | 24 +++++++++++++++---- pcdc_data/external/external_reference.json | 8 +++---- 5 files changed, 43 insertions(+), 12 deletions(-) diff --git a/helm/pcdcanalysistools/pcdcanalysistools-secret/settings.py b/helm/pcdcanalysistools/pcdcanalysistools-secret/settings.py index 6a3be935d..6c7c384d3 100644 --- a/helm/pcdcanalysistools/pcdcanalysistools-secret/settings.py +++ b/helm/pcdcanalysistools/pcdcanalysistools-secret/settings.py @@ -53,9 +53,14 @@ def load_json(file_name): fence_database, ) -config["USER_API"] = "https://%s/user" % conf_data.get( +hostname = conf_data.get( "hostname", environ.get("CONF_HOSTNAME", "localhost") ) # for use by authutils +config["OIDC_ISSUER"] = "https://%s/user" % hostname +if hostname == "localhost": + config["USER_API"] = "http://fence-service/" +else: + config["USER_API"] = "https://%s/user" % hostname # for use by authutils # config['USER_API'] = 'http://fence-service/' # option to force authutils to prioritize USER_API setting over the issuer from diff --git a/helm/peregrine/peregrine-secret/settings.py b/helm/peregrine/peregrine-secret/settings.py index 9288629cc..75afb2428 100644 --- a/helm/peregrine/peregrine-secret/settings.py +++ b/helm/peregrine/peregrine-secret/settings.py @@ -64,10 +64,15 @@ def load_json(file_name): "https://s3.amazonaws.com/dictionary-artifacts/datadictionary/develop/schema.json", ) -hostname = environ.get("CONF_HOSTNAME") or conf_data["hostname"] +hostname = conf_data.get( + "hostname", environ.get("CONF_HOSTNAME", "localhost") +) # for use by authutils config["OIDC_ISSUER"] = "https://%s/user" % hostname +if hostname == "localhost": + config["USER_API"] = "http://fence-service/" +else: + config["USER_API"] = "https://%s/user" % hostname # for use by authutils -config["USER_API"] = config["OIDC_ISSUER"] # for use by authutils # use the USER_API URL instead of the public issuer URL to accquire JWT keys config["FORCE_ISSUER"] = True app_init(app) diff --git a/helm/sheepdog/sheepdog-secret/settings.py b/helm/sheepdog/sheepdog-secret/settings.py index 9f7564af3..0700c13f3 100644 --- a/helm/sheepdog/sheepdog-secret/settings.py +++ b/helm/sheepdog/sheepdog-secret/settings.py @@ -55,9 +55,14 @@ def load_json(file_name): fence_database, ) -config["USER_API"] = "https://%s/user" % conf_data.get( +hostname = conf_data.get( "hostname", os.environ.get("CONF_HOSTNAME", "localhost") ) # for use by authutils +config["OIDC_ISSUER"] = "https://%s/user" % hostname +if hostname == "localhost": + config["USER_API"] = "http://fence-service/" +else: + config["USER_API"] = "https://%s/user" % hostname # for use by authutils # use the USER_API URL instead of the public issuer URL to accquire JWT keys config["FORCE_ISSUER"] = True config["DICTIONARY_URL"] = os.environ.get( diff --git a/pcdc-default-values.yaml b/pcdc-default-values.yaml index f25be583e..725f4981f 100644 --- a/pcdc-default-values.yaml +++ b/pcdc-default-values.yaml @@ -59,7 +59,19 @@ global: arborist: image: repository: quay.io/pcdc/arborist - tag: "2025.01" + tag: "2025.07" + resources: + requests: + memory: "128Mi" + # env: + # # -- (string) The URL of the JSON Web Key Set (JWKS) endpoint for authentication + # # - name: JWKS_ENDPOINT + # # value: "http://fence-service/.well-known/jwks" + # # - name: GODEBUG + # # value: "asyncpreemptoff=1" + # # - name: GOMAXPROCS + # # value: "2" + amanuensis: enabled: true @@ -1003,7 +1015,7 @@ portal: revproxy: image: repository: quay.io/cdis/nginx - tag: "2025.07" + tag: "2025.08" sheepdog: dictionaryUrl: https://pcdc-staging-dictionaries.s3.amazonaws.com/pcdc-schema-staging-20250114.json @@ -1075,8 +1087,8 @@ wts: postgresql: primary: persistence: - # -- (bool) Option to persist the dbs data. - enabled: false + enabled: true + size: 5Gi elasticsearch: clusterName: gen3-elasticsearch @@ -1087,6 +1099,10 @@ elasticsearch: esConfig: elasticsearch.yml: | # Here we can add elasticsearch config + volumeClaimTemplate: + resources: + requests: + storage: 5Gi resources: requests: diff --git a/pcdc_data/external/external_reference.json b/pcdc_data/external/external_reference.json index e3a0ac116..d743f2984 100644 --- a/pcdc_data/external/external_reference.json +++ b/pcdc_data/external/external_reference.json @@ -9,7 +9,7 @@ "external_subject_url": "https://portal.gdc.cancer.gov/cases/4e824cfb-d887-57b3-bff2-95e2e7b4d410", "subjects": [ { - "submitter_id": "subject_myogram_Yakala" + "submitter_id": "subject_misparse_arthrodynic" } ], "submitter_id": "external_reference_isomaltose_unmingling", @@ -25,7 +25,7 @@ "external_subject_url": "https://portal.gdc.cancer.gov/cases/448a7c70-73e8-5b2f-b226-83e4065dc6ef", "subjects": [ { - "submitter_id": "subject_euglobulin_unlacquered" + "submitter_id": "subject_acalycinous_indemnificatory" } ], "submitter_id": "external_reference_homemaking_antivibrator", @@ -41,7 +41,7 @@ "external_subject_url": "https://portal.kidsfirstdrc.org/participant/PT_72AZK0JR", "subjects": [ { - "submitter_id": "subject_pubiotomy_nonaction" + "submitter_id": "subject_palebelly_telfairic" } ], "submitter_id": "external_reference_irreverendly_subtrifid", @@ -57,7 +57,7 @@ "external_subject_url": "https://portal.kidsfirstdrc.org/participant/PT_N5N59J9M", "subjects": [ { - "submitter_id": "subject_forerehearsed_subelongate" + "submitter_id": "subject_crystallology_Dacrydium" } ], "submitter_id": "external_reference_communicative_syntactics", From 9c4ea78d98241c4aefbedb2db76b0d2f42561075 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Wed, 13 Aug 2025 15:09:23 -0700 Subject: [PATCH 092/126] Remove unused Arborist resources and clean up Fence config Deleted the 'resources' section and commented env variables from Arborist configuration. Simplified the 'podSecurityContext' in the Fence configuration for better readability. --- pcdc-default-values.yaml | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/pcdc-default-values.yaml b/pcdc-default-values.yaml index 725f4981f..e4ac9c657 100644 --- a/pcdc-default-values.yaml +++ b/pcdc-default-values.yaml @@ -60,18 +60,6 @@ arborist: image: repository: quay.io/pcdc/arborist tag: "2025.07" - resources: - requests: - memory: "128Mi" - # env: - # # -- (string) The URL of the JSON Web Key Set (JWKS) endpoint for authentication - # # - name: JWKS_ENDPOINT - # # value: "http://fence-service/.well-known/jwks" - # # - name: GODEBUG - # # value: "asyncpreemptoff=1" - # # - name: GOMAXPROCS - # # value: "2" - amanuensis: enabled: true @@ -89,8 +77,7 @@ fence: MOCK_GOOGLE_AUTH: true mock_default_user: "test@example.com" #LOGIN_REDIRECT_WHITELIST: ["https://localhost:9443/", "http://localhost:9443/"] - podSecurityContext: - {} + podSecurityContext: {} image: repository: "quay.io/pcdc/fence" tag: "helm-test" From a00fdaedf63a07f25d87a0f9941b2eb5a7de20d6 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Thu, 21 Aug 2025 18:42:02 -0700 Subject: [PATCH 093/126] sync cdis changes with pcdc services --- .../amanuensis-clear-filter-set-cronjob.yaml | 2 +- .../templates/amanuensis-db-migrate-job.yaml | 2 +- helm/amanuensis/templates/deployment.yaml | 14 +++++-- helm/amanuensis/values.yaml | 5 +-- .../templates/deployment.yaml | 12 +++++- helm/gearbox-middleware/values.yaml | 32 +++++++-------- helm/gearbox/templates/deployment.yaml | 12 +++++- helm/gearbox/values.yaml | 39 +++++++++---------- .../templates/deployment.yaml | 12 +++++- helm/pcdcanalysistools/values.yaml | 9 ++--- 10 files changed, 82 insertions(+), 57 deletions(-) diff --git a/helm/amanuensis/templates/amanuensis-clear-filter-set-cronjob.yaml b/helm/amanuensis/templates/amanuensis-clear-filter-set-cronjob.yaml index 14e57b7e1..4065028ab 100644 --- a/helm/amanuensis/templates/amanuensis-clear-filter-set-cronjob.yaml +++ b/helm/amanuensis/templates/amanuensis-clear-filter-set-cronjob.yaml @@ -74,7 +74,7 @@ spec: - "-c" - | echo "${AMANUENSIS_PUBLIC_CONFIG:-""}" > "/var/www/amanuensis/amanuensis-config-public.yaml" - python /var/www/amanuensis/yaml_merge.py /var/www/amanuensis/amanuensis-config-public.yaml /var/www/amanuensis/amanuensis-config-secret.yaml > /var/www/amanuensis/amanuensis-config.yaml + python /var/www/amanuensis/yaml_merge.py /var/www/amanuensis/amanuensis-config-public.yaml /var/www/amanuensis/amanuensis-config-secret.yaml /var/www/amanuensis/amanuensis-config.yaml cd /amanuensis clear-old-filter-sets if [[ $? != 0 ]]; then diff --git a/helm/amanuensis/templates/amanuensis-db-migrate-job.yaml b/helm/amanuensis/templates/amanuensis-db-migrate-job.yaml index e3eabbebb..55e89ee40 100644 --- a/helm/amanuensis/templates/amanuensis-db-migrate-job.yaml +++ b/helm/amanuensis/templates/amanuensis-db-migrate-job.yaml @@ -69,7 +69,7 @@ spec: - "-c" - | # echo "${AMANUENSIS_PUBLIC_CONFIG:-""}" > "/var/www/amanuensis/amanuensis-config-public.yaml" - # python /var/www/amanuensis/yaml_merge.py /var/www/amanuensis/amanuensis-config-public.yaml /var/www/amanuensis/amanuensis-config-secret.yaml > /var/www/amanuensis/amanuensis-config.yaml + # python /var/www/amanuensis/yaml_merge.py /var/www/amanuensis/amanuensis-config-public.yaml /var/www/amanuensis/amanuensis-config-secret.yaml /var/www/amanuensis/amanuensis-config.yaml cd /amanuensis fence-create migrate if [[ $? != 0 ]]; then diff --git a/helm/amanuensis/templates/deployment.yaml b/helm/amanuensis/templates/deployment.yaml index afb2da5cb..729674633 100644 --- a/helm/amanuensis/templates/deployment.yaml +++ b/helm/amanuensis/templates/deployment.yaml @@ -17,9 +17,17 @@ spec: {{- with .Values.podAnnotations }} {{- toYaml . | nindent 8 }} {{- end }} - {{- if coalesce .Values.metricsEnabled .Values.global.metricsEnabled true }} + {{- $metricsEnabled := .Values.metricsEnabled }} + {{- if eq $metricsEnabled nil }} + {{- $metricsEnabled = .Values.global.metricsEnabled }} + {{- end }} + {{- if eq $metricsEnabled nil }} + {{- $metricsEnabled = true }} + {{- end }} + + {{- if $metricsEnabled }} {{- include "common.grafanaAnnotations" . | nindent 8 }} - {{- end }} + {{- end }} labels: authprovider: "yes" netnolimit: "yes" @@ -61,7 +69,7 @@ spec: args: - "-c" - | - python /var/www/amanuensis/yaml_merge.py /var/www/amanuensis/amanuensis-config-public.yaml /var/www/amanuensis/amanuensis-config-secret.yaml > /var/www/amanuensis/amanuensis-config.yaml + python /var/www/amanuensis/yaml_merge.py /var/www/amanuensis/amanuensis-config-public.yaml /var/www/amanuensis/amanuensis-config-secret.yaml /var/www/amanuensis/amanuensis-config.yaml if [[ -f /amanuensis/dockerrun.bash ]]; then bash /amanuensis/dockerrun.bash; elif [[ -f /dockerrun.sh ]]; then bash /dockerrun.sh; else echo 'Error: Neither /amanuensis/dockerrun.bash nor /dockerrun.sh exists.' >&2; exit 1; fi env: {{- toYaml .Values.env | nindent 12 }} diff --git a/helm/amanuensis/values.yaml b/helm/amanuensis/values.yaml index 2f4b2c90e..f80a2da80 100644 --- a/helm/amanuensis/values.yaml +++ b/helm/amanuensis/values.yaml @@ -110,7 +110,7 @@ global: autoscaling: {} # -- (bool) Whether Metrics are enabled. -metricsEnabled: true +metricsEnabled: # -- (map) External Secrets settings. externalSecrets: @@ -158,7 +158,6 @@ postgresql: # -- (int) Number of desired replicas replicaCount: 1 - image: # -- (string) The Docker image repository for the amanuensis service repository: quay.io/pcdc/amanuensis @@ -342,7 +341,7 @@ commonLabels: # -- (map) Public configuration settings for Amanuensis app AMANUENSIS_CONFIG_PUBLIC: {} -# -- (map) AWS credentials for Amanuensis app +# -- (map) AWS credentials for Amanuensis app AWS_CREDENTIALS: # -- (map) AWS SES settings for Amanuensis app AWS_SES: diff --git a/helm/gearbox-middleware/templates/deployment.yaml b/helm/gearbox-middleware/templates/deployment.yaml index 4804fe430..e9c1574f4 100644 --- a/helm/gearbox-middleware/templates/deployment.yaml +++ b/helm/gearbox-middleware/templates/deployment.yaml @@ -24,9 +24,17 @@ spec: {{- include "common.extraLabels" . | nindent 8 }} annotations: checksum/config: {{ include (print $.Template.BasePath "/gearbox-middleware-creds.yaml") . | sha256sum }} - {{- if coalesce .Values.metricsEnabled .Values.global.metricsEnabled true }} + {{- $metricsEnabled := .Values.metricsEnabled }} + {{- if eq $metricsEnabled nil }} + {{- $metricsEnabled = .Values.global.metricsEnabled }} + {{- end }} + {{- if eq $metricsEnabled nil }} + {{- $metricsEnabled = true }} + {{- end }} + + {{- if $metricsEnabled }} {{- include "common.grafanaAnnotations" . | nindent 8 }} - {{- end }} + {{- end }} spec: serviceAccountName: {{ include "gearbox-middleware.serviceAccountName" . }} {{- with .Values.affinity }} diff --git a/helm/gearbox-middleware/values.yaml b/helm/gearbox-middleware/values.yaml index 6fe418d3e..866028e95 100644 --- a/helm/gearbox-middleware/values.yaml +++ b/helm/gearbox-middleware/values.yaml @@ -2,7 +2,6 @@ # This is a YAML-formatted file. # Declare variables to be passed into your templates. - # Global configuration global: # -- (map) AWS configuration @@ -63,7 +62,7 @@ global: autoscaling: {} # -- (bool) Whether Metrics are enabled. -metricsEnabled: true +metricsEnabled: # -- (map) External Secrets settings. externalSecrets: @@ -124,20 +123,20 @@ affinity: podAntiAffinity: # -- (map) Option for scheduling to be required or preferred. preferredDuringSchedulingIgnoredDuringExecution: - # -- (int) Weight value for preferred scheduling. - - weight: 100 - podAffinityTerm: - labelSelector: - matchExpressions: - # -- (list) Label key for match expression. - - key: app - # -- (string) Operation type for the match expression. - operator: In - # -- (list) Value for the match expression key. - values: - - gearbox-middleware - # -- (string) Value for topology key label. - topologyKey: "kubernetes.io/hostname" + # -- (int) Weight value for preferred scheduling. + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + # -- (list) Label key for match expression. + - key: app + # -- (string) Operation type for the match expression. + operator: In + # -- (list) Value for the match expression key. + values: + - gearbox-middleware + # -- (string) Value for topology key label. + topologyKey: "kubernetes.io/hostname" # -- (bool) Automount the default service account token automountServiceAccountToken: false @@ -206,7 +205,6 @@ gearboxMiddlewareG3auto: # -- (string) private key path for service to service requests with gearbox-middleware. gearboxMiddlewarePrivateKeyPath: "/gearbox-middleware/gearbox_middleware/keys/jwt_private_key.pem" - # Values to determine the labels that are used for the deployment, pod, etc. # -- (string) Valid options are "production" or "dev". If invalid option is set- the value will default to "dev". release: "production" diff --git a/helm/gearbox/templates/deployment.yaml b/helm/gearbox/templates/deployment.yaml index e171deecb..b72e53544 100644 --- a/helm/gearbox/templates/deployment.yaml +++ b/helm/gearbox/templates/deployment.yaml @@ -24,9 +24,17 @@ spec: {{- include "common.extraLabels" . | nindent 8 }} annotations: checksum/config: {{ include (print $.Template.BasePath "/gearbox-creds.yaml") . | sha256sum }} - {{- if coalesce .Values.metricsEnabled .Values.global.metricsEnabled true }} + {{- $metricsEnabled := .Values.metricsEnabled }} + {{- if eq $metricsEnabled nil }} + {{- $metricsEnabled = .Values.global.metricsEnabled }} + {{- end }} + {{- if eq $metricsEnabled nil }} + {{- $metricsEnabled = true }} + {{- end }} + + {{- if $metricsEnabled }} {{- include "common.grafanaAnnotations" . | nindent 8 }} - {{- end }} + {{- end }} spec: serviceAccountName: {{ include "gearbox.serviceAccountName" . }} {{- with .Values.affinity }} diff --git a/helm/gearbox/values.yaml b/helm/gearbox/values.yaml index 8d2deb602..af7c8353a 100644 --- a/helm/gearbox/values.yaml +++ b/helm/gearbox/values.yaml @@ -2,7 +2,6 @@ # This is a YAML-formatted file. # Declare variables to be passed into your templates. - # Global configuration global: # -- (map) AWS configuration @@ -58,11 +57,11 @@ global: maxReplicas: 100 targetCPUUtilizationPercentage: 80 targetMemoryUtilizationPercentage: 80 - + # -- (map) Controls network policy settings netPolicy: enabled: false - + postgres: # -- (bool) Whether the database should be created. dbCreate: true @@ -110,7 +109,7 @@ postgresql: autoscaling: {} # -- (bool) Whether Metrics are enabled. -metricsEnabled: true +metricsEnabled: # -- (map) External Secrets settings. externalSecrets: @@ -171,20 +170,20 @@ affinity: podAntiAffinity: # -- (map) Option for scheduling to be required or preferred. preferredDuringSchedulingIgnoredDuringExecution: - # -- (int) Weight value for preferred scheduling. - - weight: 100 - podAffinityTerm: - labelSelector: - matchExpressions: - # -- (list) Label key for match expression. - - key: app - # -- (string) Operation type for the match expression. - operator: In - # -- (list) Value for the match expression key. - values: - - gearbox - # -- (string) Value for topology key label. - topologyKey: "kubernetes.io/hostname" + # -- (int) Weight value for preferred scheduling. + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + # -- (list) Label key for match expression. + - key: app + # -- (string) Operation type for the match expression. + operator: In + # -- (list) Value for the match expression key. + values: + - gearbox + # -- (string) Value for topology key label. + topologyKey: "kubernetes.io/hostname" # -- (bool) Automount the default service account token automountServiceAccountToken: false @@ -299,8 +298,6 @@ gearboxG3auto: # -- (string) public key path for service to service requests with gearbox. gearboxMiddlewarePublicKeyPath: "/gearbox/src/gearbox/keys/jwt_public_key.pem" - - # Values to determine the labels that are used for the deployment, pod, etc. # -- (string) Valid options are "production" or "dev". If invalid option is set- the value will default to "dev". release: "production" @@ -311,4 +308,4 @@ partOf: "Workspace-tab" # -- (map) Will completely override the selectorLabels defined in the common chart's _label_setup.tpl selectorLabels: # -- (map) Will completely override the commonLabels defined in the common chart's _label_setup.tpl -commonLabels: \ No newline at end of file +commonLabels: diff --git a/helm/pcdcanalysistools/templates/deployment.yaml b/helm/pcdcanalysistools/templates/deployment.yaml index db06e7753..7dfb913de 100644 --- a/helm/pcdcanalysistools/templates/deployment.yaml +++ b/helm/pcdcanalysistools/templates/deployment.yaml @@ -31,9 +31,17 @@ spec: {{- include "common.extraLabels" . | nindent 8 }} annotations: checksum/config: {{ include (print $.Template.BasePath "/pcdcanalysistools-secret.yaml") . | sha256sum }} - {{- if coalesce .Values.metricsEnabled .Values.global.metricsEnabled true }} + {{- $metricsEnabled := .Values.metricsEnabled }} + {{- if eq $metricsEnabled nil }} + {{- $metricsEnabled = .Values.global.metricsEnabled }} + {{- end }} + {{- if eq $metricsEnabled nil }} + {{- $metricsEnabled = true }} + {{- end }} + + {{- if $metricsEnabled }} {{- include "common.grafanaAnnotations" . | nindent 8 }} - {{- end }} + {{- end }} spec: {{- with .Values.affinity }} affinity: diff --git a/helm/pcdcanalysistools/values.yaml b/helm/pcdcanalysistools/values.yaml index 08adb21a0..1d68d15c1 100644 --- a/helm/pcdcanalysistools/values.yaml +++ b/helm/pcdcanalysistools/values.yaml @@ -2,7 +2,6 @@ # This is a YAML-formatted file. # Declare variables to be passed into your templates. - # Global configuration global: # -- (map) AWS configuration @@ -81,7 +80,7 @@ global: autoscaling: {} # -- (bool) Whether Metrics are enabled. -metricsEnabled: true +metricsEnabled: # -- (map) External Secrets settings. externalSecrets: @@ -119,7 +118,7 @@ postgresql: releaseLabel: production # -- (map) Annotations to add to the pod -podAnnotations: {"gen3.io/network-ingress": "pcdcanalysistools"} +podAnnotations: { "gen3.io/network-ingress": "pcdcanalysistools" } # -- (int) Number of replicas for the deployment. replicaCount: 1 @@ -200,7 +199,7 @@ volumeMounts: - name: "config-volume" readOnly: true mountPath: "PcdcAnalysisTools/bin/settings.py" - subPath: "settings.py" + subPath: "settings.py" - name: "config-volume" readOnly: true mountPath: "PcdcAnalysisTools/bin/confighelper.py" @@ -247,4 +246,4 @@ partOf: "Core-Service" # -- (map) Will completely override the selectorLabels defined in the common chart's _label_setup.tpl selectorLabels: # -- (map) Will completely override the commonLabels defined in the common chart's _label_setup.tpl -commonLabels: \ No newline at end of file +commonLabels: From 079c84141183fa1f6c4dd36f1afff6dfccce4353 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Fri, 22 Aug 2025 13:14:53 -0700 Subject: [PATCH 094/126] Refactor service account naming and update common chart Updated Helm charts for amanuensis, gearbox, and gearbox-middleware to use templated service account names via include functions, improving consistency and flexibility. Also bumped the 'common' chart dependency version from 0.1.20 to 0.1.23 across all affected charts. Adjusted IAM role, policy, and annotation references to use the new naming convention. --- helm/amanuensis/Chart.yaml | 2 +- helm/amanuensis/templates/crossplane.yaml | 14 +++++++------- helm/amanuensis/templates/serviceaccount.yaml | 2 +- helm/gearbox-middleware/Chart.yaml | 2 +- helm/gearbox-middleware/templates/crossplane.yaml | 13 ++++++------- .../templates/serviceaccount.yaml | 4 ++-- helm/gearbox/Chart.yaml | 2 +- helm/gearbox/templates/crossplane.yaml | 13 ++++++------- helm/gearbox/templates/serviceaccount.yaml | 2 +- 9 files changed, 26 insertions(+), 28 deletions(-) diff --git a/helm/amanuensis/Chart.yaml b/helm/amanuensis/Chart.yaml index 1c89fe987..b8b7fe684 100644 --- a/helm/amanuensis/Chart.yaml +++ b/helm/amanuensis/Chart.yaml @@ -25,7 +25,7 @@ appVersion: "master" dependencies: - name: common - version: 0.1.20 + version: 0.1.23 repository: file://../common - name: postgresql version: 11.9.13 diff --git a/helm/amanuensis/templates/crossplane.yaml b/helm/amanuensis/templates/crossplane.yaml index 4d63843ac..90ec9507b 100644 --- a/helm/amanuensis/templates/crossplane.yaml +++ b/helm/amanuensis/templates/crossplane.yaml @@ -2,14 +2,13 @@ apiVersion: iam.aws.crossplane.io/v1beta1 kind: Role metadata: - name: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-amanuensis-sa" + name: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-{{ include "amanuensis.serviceAccountName" . }}" spec: providerConfigRef: name: provider-aws forProvider: - name: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-amanuensis-sa" + name: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-{{ include "amanuensis.serviceAccountName" . }}" description: "Role for amanuensis service account for {{ .Values.global.environment }}" - path: "/gen3-service/" assumeRolePolicyDocument: | { "Version":"2012-10-17", @@ -22,7 +21,7 @@ spec: "Action":"sts:AssumeRoleWithWebIdentity", "Condition":{ "StringEquals":{ - "{{ .Values.global.crossplane.oidcProviderUrl }}:sub":"system:serviceaccount:{{ .Release.Namespace }}:amanuensis-sa", + "{{ .Values.global.crossplane.oidcProviderUrl }}:sub":"system:serviceaccount:{{ .Release.Namespace }}:{{ include "amanuensis.serviceAccountName" . }}", "{{ .Values.global.crossplane.oidcProviderUrl }}:aud":"sts.amazonaws.com" } } @@ -46,7 +45,7 @@ spec: { "Effect":"Allow", "Action":["sqs:SendMessage"], - "Resource":["arn:aws:sqs:{{ .Values.global.aws.region }}:{{ .Values.global.crossplane.accountId }}:{{ .Values.global.environment }}-audit-sqs-queue", "arn:aws:sqs:{{ .Values.global.aws.region }}:{{ .Values.global.crossplane.accountId }}:{{ .Values.global.environment }}-ssjdispatcher-sqs-queue"] + "Resource":["arn:aws:sqs:{{ .Values.global.aws.region }}:{{ .Values.global.crossplane.accountId }}:{{ .Values.global.environment }}-{{ .Release.Namespace }}-audit-sqs-queue", "arn:aws:sqs:{{ .Values.global.aws.region }}:{{ .Values.global.crossplane.accountId }}:{{ .Values.global.environment }}-ssjdispatcher-sqs-queue"] } ] } @@ -54,7 +53,7 @@ spec: apiVersion: iam.aws.crossplane.io/v1beta1 kind: RolePolicyAttachment metadata: - name: amanuensis-sa-managed-policy-attachment + name: "{{ include "amanuensis.serviceAccountName" . }}-{{ .Release.Namespace }}-managed-policy-attachment" spec: providerConfigRef: name: provider-aws @@ -62,4 +61,5 @@ spec: roleName: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-amanuensis-sa" policyArnRef: name: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-amanuensis-role-policy" -{{- end}} \ No newline at end of file +{{- end}} + diff --git a/helm/amanuensis/templates/serviceaccount.yaml b/helm/amanuensis/templates/serviceaccount.yaml index f214414d1..d16786c5c 100644 --- a/helm/amanuensis/templates/serviceaccount.yaml +++ b/helm/amanuensis/templates/serviceaccount.yaml @@ -7,7 +7,7 @@ metadata: {{- include "amanuensis.labels" . | nindent 4 }} {{- if .Values.global.crossplane.enabled }} annotations: - eks.amazonaws.com/role-arn: arn:aws:iam::{{ .Values.global.crossplane.accountId }}:role/{{ .Values.global.environment }}-{{ .Release.Namespace }}-amanuensis-sa + eks.amazonaws.com/role-arn: arn:aws:iam::{{ .Values.global.crossplane.accountId }}:role/{{ .Values.global.environment }}-{{ .Release.Namespace }}-{{ include "amanuensis.serviceAccountName" . }} {{- else }} {{ with .Values.serviceAccount.annotations }} annotations: diff --git a/helm/gearbox-middleware/Chart.yaml b/helm/gearbox-middleware/Chart.yaml index 8be3b1288..111b74431 100644 --- a/helm/gearbox-middleware/Chart.yaml +++ b/helm/gearbox-middleware/Chart.yaml @@ -25,7 +25,7 @@ appVersion: "1.16.0" dependencies: - name: common - version: 0.1.20 + version: 0.1.23 repository: file://../common - name: gearbox version: 0.1.0 diff --git a/helm/gearbox-middleware/templates/crossplane.yaml b/helm/gearbox-middleware/templates/crossplane.yaml index 12a20bffc..d8913be95 100644 --- a/helm/gearbox-middleware/templates/crossplane.yaml +++ b/helm/gearbox-middleware/templates/crossplane.yaml @@ -2,14 +2,13 @@ apiVersion: iam.aws.crossplane.io/v1beta1 kind: Role metadata: - name: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-gearbox-middleware-sa" + name: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-{{ include "gearbox-middleware.serviceAccountName" . }}" spec: providerConfigRef: name: provider-aws forProvider: - name: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-gearbox-middleware-sa" + name: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-{{ include "gearbox-middleware.serviceAccountName" . }}" description: "Role for gearbox-middleware service account for {{ .Values.global.environment }}" - path: "/gen3-service/" assumeRolePolicyDocument: | { "Version":"2012-10-17", @@ -22,7 +21,7 @@ spec: "Action":"sts:AssumeRoleWithWebIdentity", "Condition":{ "StringEquals":{ - "{{ .Values.global.crossplane.oidcProviderUrl }}:sub":"system:serviceaccount:{{ .Release.Namespace }}:gearbox-middleware-sa", + "{{ .Values.global.crossplane.oidcProviderUrl }}:sub":"system:serviceaccount:{{ .Release.Namespace }}:{{ include "gearbox-middleware.serviceAccountName" . }}", "{{ .Values.global.crossplane.oidcProviderUrl }}:aud":"sts.amazonaws.com" } } @@ -38,7 +37,7 @@ spec: providerConfigRef: name: provider-aws forProvider: - roleName: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-gearbox-middleware-sa" + roleName: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-{{ include "gearbox-middleware.serviceAccountName" . }}" name: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-gearbox-middleware-role-policy" document: | { @@ -64,12 +63,12 @@ spec: apiVersion: iam.aws.crossplane.io/v1beta1 kind: RolePolicyAttachment metadata: - name: gearbox-middleware-sa-managed-policy-attachment + name: "{{ include "gearbox-middleware.serviceAccountName" . }}-{{ .Release.Namespace }}-managed-policy-attachment" spec: providerConfigRef: name: provider-aws forProvider: - roleName: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-gearbox-middleware-sa" + roleName: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-{{ include "gearbox-middleware.serviceAccountName" . }}" policyArnRef: name: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-gearbox-middleware-role-policy" --- diff --git a/helm/gearbox-middleware/templates/serviceaccount.yaml b/helm/gearbox-middleware/templates/serviceaccount.yaml index 9cd1648eb..72ba1cf7c 100644 --- a/helm/gearbox-middleware/templates/serviceaccount.yaml +++ b/helm/gearbox-middleware/templates/serviceaccount.yaml @@ -7,11 +7,11 @@ metadata: {{- include "gearbox-middleware.labels" . | nindent 4 }} {{- if .Values.global.crossplane.enabled }} annotations: - eks.amazonaws.com/role-arn: arn:aws:iam::{{ .Values.global.crossplane.accountId }}:role/{{ .Values.global.environment }}-{{ .Release.Namespace }}-gearbox-middleware-sa + eks.amazonaws.com/role-arn: arn:aws:iam::{{ .Values.global.crossplane.accountId }}:role/{{ .Values.global.environment }}-{{ .Release.Namespace }}-{{ include "gearbox-middleware.serviceAccountName" . }} {{- else }} {{ with .Values.serviceAccount.annotations }} annotations: {{- toYaml . | nindent 4 }} {{- end }} {{- end }} -{{- end }} +{{- end }} \ No newline at end of file diff --git a/helm/gearbox/Chart.yaml b/helm/gearbox/Chart.yaml index f4e1af41b..32d287fa0 100644 --- a/helm/gearbox/Chart.yaml +++ b/helm/gearbox/Chart.yaml @@ -25,7 +25,7 @@ appVersion: "1.16.0" dependencies: - name: common - version: 0.1.20 + version: 0.1.23 repository: file://../common - name: postgresql version: 11.9.13 diff --git a/helm/gearbox/templates/crossplane.yaml b/helm/gearbox/templates/crossplane.yaml index f0bad7c05..97fbcec4b 100644 --- a/helm/gearbox/templates/crossplane.yaml +++ b/helm/gearbox/templates/crossplane.yaml @@ -2,14 +2,13 @@ apiVersion: iam.aws.crossplane.io/v1beta1 kind: Role metadata: - name: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-gearbox-sa" + name: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-{{ include "gearbox.serviceAccountName" . }}" spec: providerConfigRef: name: provider-aws forProvider: - name: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-gearbox-sa" + name: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-{{ include "gearbox.serviceAccountName" . }}" description: "Role for gearbox service account for {{ .Values.global.environment }}" - path: "/gen3-service/" assumeRolePolicyDocument: | { "Version":"2012-10-17", @@ -22,7 +21,7 @@ spec: "Action":"sts:AssumeRoleWithWebIdentity", "Condition":{ "StringEquals":{ - "{{ .Values.global.crossplane.oidcProviderUrl }}:sub":"system:serviceaccount:{{ .Release.Namespace }}:gearbox-sa", + "{{ .Values.global.crossplane.oidcProviderUrl }}:sub":"system:serviceaccount:{{ .Release.Namespace }}:{{ include "gearbox.serviceAccountName" . }}", "{{ .Values.global.crossplane.oidcProviderUrl }}:aud":"sts.amazonaws.com" } } @@ -38,7 +37,7 @@ spec: providerConfigRef: name: provider-aws forProvider: - roleName: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-gearbox-sa" + roleName: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-{{ include "gearbox.serviceAccountName" . }}" name: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-gearbox-role-policy" document: | { @@ -64,12 +63,12 @@ spec: apiVersion: iam.aws.crossplane.io/v1beta1 kind: RolePolicyAttachment metadata: - name: gearbox-sa-managed-policy-attachment + name: "{{ include "gearbox.serviceAccountName" . }}-{{ .Release.Namespace }}-managed-policy-attachment" spec: providerConfigRef: name: provider-aws forProvider: - roleName: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-gearbox-sa" + roleName: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-{{ include "gearbox.serviceAccountName" . }}" policyArnRef: name: "{{ .Values.global.environment }}-{{ .Release.Namespace }}-gearbox-role-policy" --- diff --git a/helm/gearbox/templates/serviceaccount.yaml b/helm/gearbox/templates/serviceaccount.yaml index 4349686c0..cb90113b6 100644 --- a/helm/gearbox/templates/serviceaccount.yaml +++ b/helm/gearbox/templates/serviceaccount.yaml @@ -7,7 +7,7 @@ metadata: {{- include "gearbox.labels" . | nindent 4 }} {{- if .Values.global.crossplane.enabled }} annotations: - eks.amazonaws.com/role-arn: arn:aws:iam::{{ .Values.global.crossplane.accountId }}:role/{{ .Values.global.environment }}-{{ .Release.Namespace }}-gearbox-sa + eks.amazonaws.com/role-arn: arn:aws:iam::{{ .Values.global.crossplane.accountId }}:role/{{ .Values.global.environment }}-{{ .Release.Namespace }}-{{ include "gearbox.serviceAccountName" . }} {{- else }} {{ with .Values.serviceAccount.annotations }} annotations: From 3bd3f51c8ab2f504ef24e5c375acf603b76a3c1e Mon Sep 17 00:00:00 2001 From: pkellyc Date: Mon, 25 Aug 2025 18:47:48 -0400 Subject: [PATCH 095/126] Resolving conflicts in gearbox yaml file for helm build --- gearbox-default-values.yaml | 48 ++++++++++++++----------------------- 1 file changed, 18 insertions(+), 30 deletions(-) diff --git a/gearbox-default-values.yaml b/gearbox-default-values.yaml index a936c230e..2c256d045 100644 --- a/gearbox-default-values.yaml +++ b/gearbox-default-values.yaml @@ -57,7 +57,7 @@ global: arborist: image: repository: quay.io/cdis/arborist - tag: 2025.07 + tag: 2025.07 fence: FENCE_CONFIG: @@ -427,9 +427,9 @@ fence: portal: enabled: true image: - repository: gearbox-frontend - tag: "GEAR-488" - pullPolicy: Never + repository: quay.io/pcdc/gearbox_fe + tag: "dev" + pullPolicy: Always resources: requests: cpu: 1.0 @@ -445,40 +445,28 @@ revproxy: enabled: true image: repository: quay.io/cdis/nginx - tag: 2025.08 + tag: 2023.09 gearbox: enabled: true image: - repository: gearbox-be - tag: "GEAR-488" - pullPolicy: Never - podSecurityContext: - runAsNonRoot: true - runAsUser: 1000 - runAsGroup: 1000 - volumes: - - name: config-volume - secret: - secretName: "gearbox-g3auto" - - name: gearbox-middleware-jwt-keys - secret: - secretName: "gearbox-middleware-jwt-keys" - items: - - key: jwt_public_key.pem - path: jwt_public_key.pem - optional: false + repository: quay.io/pcdc/gearbox_be + tag: "pcdc_dev_2025-08-22T09_51_08-05_00" + pullPolicy: Always + # Note: gearbox = matching + #repository: gearbox-matching + #tag: "PEDS-1379" + #pullPolicy: Never gearbox-middleware: enabled: true image: - repository: gearbox-middleware - tag: "GEAR-488" - pullPolicy: Never - podSecurityContext: - runAsNonRoot: true - runAsUser: 1000 - runAsGroup: 1000 + repository: quay.io/pcdc/gearbox-middleware + tag: "pcdc_dev_Fri__15_Aug_2025_17_57_50_GMT" + pullPolicy: Always + #repository: gearbox-middleware + #tag: "PEDS-1379" + #pullPolicy: Never ######################################################################################## # DISABLED SERVICES # From f22268eb49d15b0e91f35579119f7f6dcfaa4bb4 Mon Sep 17 00:00:00 2001 From: pkellyc Date: Mon, 25 Aug 2025 18:52:52 -0400 Subject: [PATCH 096/126] Cleaned up --- gearbox-default-values.yaml | 7 ------- 1 file changed, 7 deletions(-) diff --git a/gearbox-default-values.yaml b/gearbox-default-values.yaml index 2c256d045..5a9920e01 100644 --- a/gearbox-default-values.yaml +++ b/gearbox-default-values.yaml @@ -453,10 +453,6 @@ gearbox: repository: quay.io/pcdc/gearbox_be tag: "pcdc_dev_2025-08-22T09_51_08-05_00" pullPolicy: Always - # Note: gearbox = matching - #repository: gearbox-matching - #tag: "PEDS-1379" - #pullPolicy: Never gearbox-middleware: enabled: true @@ -464,9 +460,6 @@ gearbox-middleware: repository: quay.io/pcdc/gearbox-middleware tag: "pcdc_dev_Fri__15_Aug_2025_17_57_50_GMT" pullPolicy: Always - #repository: gearbox-middleware - #tag: "PEDS-1379" - #pullPolicy: Never ######################################################################################## # DISABLED SERVICES # From 3d676d1cf99cf56aece4fd54b91aa0e8596b5e92 Mon Sep 17 00:00:00 2001 From: pkellyc Date: Tue, 26 Aug 2025 13:36:57 -0400 Subject: [PATCH 097/126] Gearbox default updates from PR review --- gearbox-default-values.yaml | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/gearbox-default-values.yaml b/gearbox-default-values.yaml index 5a9920e01..5fc0e9e93 100644 --- a/gearbox-default-values.yaml +++ b/gearbox-default-values.yaml @@ -445,7 +445,7 @@ revproxy: enabled: true image: repository: quay.io/cdis/nginx - tag: 2023.09 + tag: 2025.08 gearbox: enabled: true @@ -453,6 +453,21 @@ gearbox: repository: quay.io/pcdc/gearbox_be tag: "pcdc_dev_2025-08-22T09_51_08-05_00" pullPolicy: Always + podSecurityContext: + # runAsNonRoot: true + # runAsUser: 1000 + # runAsGroup: 1000 + volumes: + - name: config-volume + secret: + secretName: "gearbox-g3auto" + - name: gearbox-middleware-jwt-keys + secret: + secretName: "gearbox-middleware-jwt-keys" + items: + - key: jwt_public_key.pem + path: jwt_public_key.pem + optional: false gearbox-middleware: enabled: true @@ -460,6 +475,10 @@ gearbox-middleware: repository: quay.io/pcdc/gearbox-middleware tag: "pcdc_dev_Fri__15_Aug_2025_17_57_50_GMT" pullPolicy: Always + podSecurityContext: + # runAsNonRoot: true + # runAsUser: 1000 + # runAsGroup: 1000 ######################################################################################## # DISABLED SERVICES # From b2a6dc2bc6cb76b3c335375b14d2b66860f3173c Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Mon, 8 Sep 2025 14:15:02 -0700 Subject: [PATCH 098/126] Update release workflow to run on openshift branch Changed the GitHub Actions release workflow trigger from the master branch to the openshift branch. Also made minor whitespace adjustments for consistency. --- .github/workflows/release.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index f4391ce5e..8f0c7afce 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -3,7 +3,7 @@ name: Release Charts on: push: branches: - - master + - openshift jobs: release: @@ -20,12 +20,12 @@ jobs: run: | git config user.name "$GITHUB_ACTOR" git config user.email "$GITHUB_ACTOR@users.noreply.github.com" - + - name: Install Helm uses: azure/setup-helm@v3 with: version: v3.10.0 - + - name: Add helm repositories run: | helm repo add bitnami https://charts.bitnami.com/bitnami From b1fcf888af5854341a431dc715b366f2599f5ef5 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Mon, 8 Sep 2025 14:53:26 -0700 Subject: [PATCH 099/126] Update ct.yaml to use 'openshift' as target branch Changed the target branch in .github/ct.yaml from 'master' to 'openshift' to align with the current development workflow. --- .github/ct.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/ct.yaml b/.github/ct.yaml index 969d7b0d5..db50feae6 100644 --- a/.github/ct.yaml +++ b/.github/ct.yaml @@ -1,5 +1,5 @@ remote: origin -target-branch: master +target-branch: openshift chart-dirs: - helm chart-repos: @@ -10,4 +10,4 @@ helm-extra-args: --timeout 600s check-version-increment: true debug: false validate-maintainers: false -helm-dependency-extra-args: "--skip-refresh" \ No newline at end of file +helm-dependency-extra-args: "--skip-refresh" From 39720fb8ab1b2cd458a254fd9cb7a156e4405675 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Mon, 8 Sep 2025 15:33:04 -0700 Subject: [PATCH 100/126] Update common chart dependency to v0.1.23 Bump the 'common' Helm chart dependency from version 0.1.20 to 0.1.23 across all service charts to ensure consistency and incorporate the latest improvements or fixes from the common chart. --- helm/access-backend/Chart.yaml | 2 +- helm/ambassador/Chart.yaml | 2 +- helm/arborist/Chart.yaml | 2 +- helm/cedar/Chart.yaml | 2 +- helm/cohort-middleware/Chart.yaml | 2 +- helm/dicom-server/Chart.yaml | 2 +- helm/frontend-framework/Chart.yaml | 2 +- helm/gen3-analysis/Chart.yaml | 2 +- helm/gen3-user-data-library/Chart.yaml | 2 +- helm/guppy/Chart.yaml | 2 +- helm/ohif-viewer/Chart.yaml | 2 +- helm/orthanc/Chart.yaml | 2 +- helm/pcdcanalysistools/Chart.yaml | 2 +- helm/peregrine/Chart.yaml | 2 +- helm/requestor/Chart.yaml | 2 +- helm/sheepdog/Chart.yaml | 2 +- 16 files changed, 16 insertions(+), 16 deletions(-) diff --git a/helm/access-backend/Chart.yaml b/helm/access-backend/Chart.yaml index 19f417bff..dfdea210a 100644 --- a/helm/access-backend/Chart.yaml +++ b/helm/access-backend/Chart.yaml @@ -25,5 +25,5 @@ appVersion: "1.6.1" dependencies: - name: common - version: 0.1.20 + version: 0.1.23 repository: file://../common diff --git a/helm/ambassador/Chart.yaml b/helm/ambassador/Chart.yaml index a6f534ad7..d24552f6f 100644 --- a/helm/ambassador/Chart.yaml +++ b/helm/ambassador/Chart.yaml @@ -25,5 +25,5 @@ appVersion: "1.4.2" dependencies: - name: common - version: 0.1.20 + version: 0.1.23 repository: file://../common diff --git a/helm/arborist/Chart.yaml b/helm/arborist/Chart.yaml index 3ef9a1bb6..5053a709d 100644 --- a/helm/arborist/Chart.yaml +++ b/helm/arborist/Chart.yaml @@ -25,7 +25,7 @@ appVersion: "master" dependencies: - name: common - version: 0.1.20 + version: 0.1.23 repository: file://../common - name: postgresql version: 11.9.13 diff --git a/helm/cedar/Chart.yaml b/helm/cedar/Chart.yaml index 43e138f6a..c364a59d9 100644 --- a/helm/cedar/Chart.yaml +++ b/helm/cedar/Chart.yaml @@ -25,5 +25,5 @@ appVersion: "master" dependencies: - name: common - version: 0.1.20 + version: 0.1.23 repository: file://../common diff --git a/helm/cohort-middleware/Chart.yaml b/helm/cohort-middleware/Chart.yaml index 325d3d0e5..24a7e752a 100644 --- a/helm/cohort-middleware/Chart.yaml +++ b/helm/cohort-middleware/Chart.yaml @@ -25,5 +25,5 @@ appVersion: "master" dependencies: - name: common - version: 0.1.20 + version: 0.1.23 repository: file://../common diff --git a/helm/dicom-server/Chart.yaml b/helm/dicom-server/Chart.yaml index 93b076e70..eec257deb 100644 --- a/helm/dicom-server/Chart.yaml +++ b/helm/dicom-server/Chart.yaml @@ -25,5 +25,5 @@ appVersion: "master" dependencies: - name: common - version: 0.1.20 + version: 0.1.23 repository: file://../common diff --git a/helm/frontend-framework/Chart.yaml b/helm/frontend-framework/Chart.yaml index b2ca8939e..09244adf1 100644 --- a/helm/frontend-framework/Chart.yaml +++ b/helm/frontend-framework/Chart.yaml @@ -25,5 +25,5 @@ appVersion: "develop" dependencies: - name: common - version: 0.1.20 + version: 0.1.23 repository: file://../common diff --git a/helm/gen3-analysis/Chart.yaml b/helm/gen3-analysis/Chart.yaml index 19eb571df..a3dc60588 100644 --- a/helm/gen3-analysis/Chart.yaml +++ b/helm/gen3-analysis/Chart.yaml @@ -25,5 +25,5 @@ appVersion: "master" dependencies: - name: common - version: 0.1.20 + version: 0.1.23 repository: file://../common diff --git a/helm/gen3-user-data-library/Chart.yaml b/helm/gen3-user-data-library/Chart.yaml index 18a7e3f83..bfc09177a 100644 --- a/helm/gen3-user-data-library/Chart.yaml +++ b/helm/gen3-user-data-library/Chart.yaml @@ -24,7 +24,7 @@ version: 0.1.5 appVersion: "main" dependencies: - name: common - version: 0.1.20 + version: 0.1.23 repository: file://../common - name: postgresql version: 11.9.13 diff --git a/helm/guppy/Chart.yaml b/helm/guppy/Chart.yaml index 00a4346c2..253fe8050 100644 --- a/helm/guppy/Chart.yaml +++ b/helm/guppy/Chart.yaml @@ -25,5 +25,5 @@ appVersion: "master" dependencies: - name: common - version: 0.1.20 + version: 0.1.23 repository: file://../common diff --git a/helm/ohif-viewer/Chart.yaml b/helm/ohif-viewer/Chart.yaml index 6ea132776..ad6536119 100644 --- a/helm/ohif-viewer/Chart.yaml +++ b/helm/ohif-viewer/Chart.yaml @@ -25,5 +25,5 @@ appVersion: "master" dependencies: - name: common - version: 0.1.20 + version: 0.1.23 repository: file://../common diff --git a/helm/orthanc/Chart.yaml b/helm/orthanc/Chart.yaml index ec4f4ac2b..2e6453560 100644 --- a/helm/orthanc/Chart.yaml +++ b/helm/orthanc/Chart.yaml @@ -25,5 +25,5 @@ appVersion: "master" dependencies: - name: common - version: 0.1.20 + version: 0.1.23 repository: file://../common diff --git a/helm/pcdcanalysistools/Chart.yaml b/helm/pcdcanalysistools/Chart.yaml index 7b7fc61af..b537e83ba 100644 --- a/helm/pcdcanalysistools/Chart.yaml +++ b/helm/pcdcanalysistools/Chart.yaml @@ -25,5 +25,5 @@ appVersion: "master" dependencies: - name: common - version: 0.1.20 + version: 0.1.23 repository: file://../common diff --git a/helm/peregrine/Chart.yaml b/helm/peregrine/Chart.yaml index 26d6a504c..a134334b9 100644 --- a/helm/peregrine/Chart.yaml +++ b/helm/peregrine/Chart.yaml @@ -25,7 +25,7 @@ appVersion: "master" dependencies: - name: common - version: 0.1.20 + version: 0.1.23 repository: file://../common - name: postgresql version: 11.9.13 diff --git a/helm/requestor/Chart.yaml b/helm/requestor/Chart.yaml index 2bd172287..936a5a644 100644 --- a/helm/requestor/Chart.yaml +++ b/helm/requestor/Chart.yaml @@ -25,7 +25,7 @@ appVersion: "master" dependencies: - name: common - version: 0.1.20 + version: 0.1.23 repository: file://../common - name: postgresql version: 11.9.13 diff --git a/helm/sheepdog/Chart.yaml b/helm/sheepdog/Chart.yaml index 3edb55d31..422f2d990 100644 --- a/helm/sheepdog/Chart.yaml +++ b/helm/sheepdog/Chart.yaml @@ -25,7 +25,7 @@ appVersion: "master" dependencies: - name: common - version: 0.1.20 + version: 0.1.23 repository: file://../common - name: postgresql version: 11.9.13 From 4e1f885d009f4733b15b4f677d70c93778e3bed3 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Mon, 8 Sep 2025 15:35:07 -0700 Subject: [PATCH 101/126] Update common dependency to version 0.1.23 Bumped the 'common' chart dependency from version 0.1.21 to 0.1.23 in Chart.yaml to use the latest features and fixes. --- helm/argo-wrapper/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helm/argo-wrapper/Chart.yaml b/helm/argo-wrapper/Chart.yaml index 41fc6a4c4..78a0ec8b2 100644 --- a/helm/argo-wrapper/Chart.yaml +++ b/helm/argo-wrapper/Chart.yaml @@ -25,5 +25,5 @@ appVersion: "master" dependencies: - name: common - version: 0.1.21 + version: 0.1.23 repository: file://../common From ce934324b85d86d32f12ff21304d67a065f9584d Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Thu, 11 Sep 2025 15:56:21 -0700 Subject: [PATCH 102/126] Update service account RBAC and namespace templating Corrected namespace templating syntax in ServiceAccount, Role, RoleBinding resources. Refined RBAC rules by splitting resources between core and apps API groups, and removed batch resources from the role. --- helm/gen3/templates/tests/service-account.yaml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/helm/gen3/templates/tests/service-account.yaml b/helm/gen3/templates/tests/service-account.yaml index 95b67cfdf..868c1ac31 100644 --- a/helm/gen3/templates/tests/service-account.yaml +++ b/helm/gen3/templates/tests/service-account.yaml @@ -2,26 +2,26 @@ apiVersion: v1 kind: ServiceAccount metadata: name: kubectl-access - namespace: {{ .Release.Namespace }} + namespace: { { .Release.Namespace } } --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: kubectl-access-role - namespace: {{ .Release.Namespace }} + namespace: { { .Release.Namespace } } rules: - apiGroups: [""] - resources: ["pods", "pods/exec", "configmaps", "deployments"] + resources: ["pods", "pods/exec", "configmaps"] + verbs: ["get", "list", "create"] + - apiGroups: ["apps"] + resources: ["deployments"] verbs: ["get", "list", "create"] - - apiGroups: ["batch"] - resources: ["cronjobs", "jobs"] - verbs: ["get", "list", "create", "delete", "watch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: kubectl-access-binding - namespace: {{ .Release.Namespace }} + namespace: { { .Release.Namespace } } roleRef: apiGroup: rbac.authorization.k8s.io kind: Role @@ -29,4 +29,4 @@ roleRef: subjects: - kind: ServiceAccount name: kubectl-access - namespace: {{ .Release.Namespace }} + namespace: { { .Release.Namespace } } From 85d989ceee3516958957ba2e5b880c378b803eb2 Mon Sep 17 00:00:00 2001 From: pkellyc Date: Thu, 18 Sep 2025 13:57:32 -0500 Subject: [PATCH 103/126] Fail to delete in helm, resolved with this solution --- helm/common/templates/_jwt_key_pairs.tpl | 2 +- helm/gen3/templates/cleanup-helm-hooks-job.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/helm/common/templates/_jwt_key_pairs.tpl b/helm/common/templates/_jwt_key_pairs.tpl index 89bc22d61..bf9f3c877 100644 --- a/helm/common/templates/_jwt_key_pairs.tpl +++ b/helm/common/templates/_jwt_key_pairs.tpl @@ -53,7 +53,7 @@ spec: serviceAccountName: {{ .Chart.Name }}-jwt-public-key-patch-sa containers: - name: public-key-gen - image: bitnami/kubectl:latest + image: bitnamisecure/kubectl:latest env: - name: PRIVATE_KEY_PEM valueFrom: diff --git a/helm/gen3/templates/cleanup-helm-hooks-job.yaml b/helm/gen3/templates/cleanup-helm-hooks-job.yaml index 476dd6f16..fdf7cdc01 100644 --- a/helm/gen3/templates/cleanup-helm-hooks-job.yaml +++ b/helm/gen3/templates/cleanup-helm-hooks-job.yaml @@ -72,7 +72,7 @@ spec: serviceAccountName: {{ include "gen3.fullname" . }}-cleanup containers: - name: cleanup - image: bitnami/kubectl:latest + image: bitnamisecure/kubectl:latest command: - /bin/bash - -c From 40abd69e30dce0b42c80fd967fa8cd24cc587b10 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Thu, 18 Sep 2025 12:18:26 -0700 Subject: [PATCH 104/126] Move 'enabled' flag to TABLE_ONE config root Refactored the TABLE_ONE configuration by moving the 'enabled' flag from the nested 'result' dictionary to the root level of TABLE_ONE. This simplifies the configuration structure. --- helm/pcdcanalysistools/pcdcanalysistools-secret/settings.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/helm/pcdcanalysistools/pcdcanalysistools-secret/settings.py b/helm/pcdcanalysistools/pcdcanalysistools-secret/settings.py index 6c7c384d3..7d8d57293 100644 --- a/helm/pcdcanalysistools/pcdcanalysistools-secret/settings.py +++ b/helm/pcdcanalysistools/pcdcanalysistools-secret/settings.py @@ -123,10 +123,7 @@ def load_json(file_name): 'field': 'studies.treatment_arm', } ], - - 'result': { - "enabled": True - } + "enabled": True } config['EXTERNAL'] = { From 63209cebf1223039466bbd638172f30f8c16bdd1 Mon Sep 17 00:00:00 2001 From: Jawad Qureshi Date: Wed, 24 Sep 2025 11:10:34 -0500 Subject: [PATCH 105/126] Add securityContext and targetPort configurations --- .secrets.baseline | 6 +- helm/access-backend/Chart.yaml | 2 +- helm/access-backend/README.md | 3 +- helm/access-backend/templates/deployment.yaml | 2 +- helm/access-backend/values.yaml | 1 + helm/ambassador/Chart.yaml | 2 +- helm/ambassador/README.md | 4 +- helm/ambassador/templates/deployment.yaml | 2 +- helm/ambassador/templates/service.yaml | 2 +- helm/ambassador/values.yaml | 1 + helm/arborist/Chart.yaml | 2 +- helm/arborist/README.md | 4 +- helm/arborist/templates/deployment.yaml | 2 +- helm/arborist/values.yaml | 1 + helm/argo-wrapper/Chart.yaml | 2 +- helm/argo-wrapper/README.md | 4 +- helm/argo-wrapper/templates/deployment.yaml | 5 +- helm/argo-wrapper/values.yaml | 1 + helm/audit/Chart.yaml | 2 +- helm/audit/README.md | 4 +- helm/audit/templates/deployment.yaml | 8 +- helm/audit/values.yaml | 1 + helm/cedar/Chart.yaml | 2 +- helm/cedar/README.md | 2 +- helm/cedar/templates/deployment.yaml | 6 +- helm/cohort-middleware/Chart.yaml | 2 +- helm/cohort-middleware/README.md | 3 +- .../templates/deployment.yaml | 3 +- helm/cohort-middleware/templates/service.yaml | 2 +- helm/cohort-middleware/values.yaml | 1 + helm/common/Chart.yaml | 2 +- helm/common/README.md | 2 +- helm/common/templates/_db_setup_job.tpl | 6 + helm/dicom-server/Chart.yaml | 2 +- helm/dicom-server/README.md | 2 +- helm/dicom-server/templates/deployment.yaml | 7 +- helm/etl/Chart.yaml | 2 +- helm/etl/README.md | 5 +- helm/etl/templates/etl-job.yaml | 8 + helm/etl/values.yaml | 16 ++ helm/fence/Chart.yaml | 2 +- helm/fence/README.md | 6 +- helm/fence/templates/fence-deployment.yaml | 6 +- helm/fence/templates/presigned-url-fence.yaml | 4 +- helm/fence/values.yaml | 5 +- helm/gen3-analysis/Chart.yaml | 2 +- helm/gen3-analysis/README.md | 4 +- helm/gen3-analysis/templates/deployment.yaml | 7 +- helm/gen3-analysis/values.yaml | 1 + helm/gen3-user-data-library/Chart.yaml | 2 +- helm/gen3-user-data-library/README.md | 3 +- .../templates/deployment.yaml | 6 +- helm/gen3-user-data-library/values.yaml | 1 + helm/gen3/Chart.yaml | 58 +++--- helm/gen3/README.md | 58 +++--- helm/gen3/templates/nginx-config.yaml | 68 +++++++ .../gen3/templates/tests/service-account.yaml | 32 ---- helm/guppy/Chart.yaml | 2 +- helm/guppy/README.md | 6 +- helm/guppy/templates/deployment.yaml | 11 +- helm/guppy/values.yaml | 16 ++ helm/hatchery/Chart.yaml | 2 +- helm/hatchery/README.md | 4 +- helm/hatchery/templates/deployment.yaml | 2 +- helm/hatchery/values.yaml | 1 + helm/indexd/Chart.yaml | 2 +- helm/indexd/README.md | 4 +- helm/indexd/templates/deployment.yaml | 2 +- helm/indexd/templates/pre-install.yaml | 2 + helm/indexd/values.yaml | 1 + helm/manifestservice/Chart.yaml | 2 +- helm/manifestservice/README.md | 6 +- .../manifestservice/templates/deployment.yaml | 9 +- helm/manifestservice/templates/service.yaml | 2 +- helm/manifestservice/values.yaml | 17 ++ helm/metadata/Chart.yaml | 2 +- helm/metadata/README.md | 8 +- helm/metadata/templates/deployment.yaml | 9 +- helm/metadata/templates/secrets.yaml | 16 -- helm/metadata/values.yaml | 3 +- helm/ohif-viewer/Chart.yaml | 2 +- helm/ohif-viewer/README.md | 2 +- helm/ohif-viewer/templates/deployment.yaml | 7 +- helm/orthanc/Chart.yaml | 2 +- helm/orthanc/README.md | 2 +- helm/orthanc/templates/deployment.yaml | 3 +- helm/peregrine/Chart.yaml | 2 +- helm/peregrine/README.md | 4 +- helm/peregrine/templates/deployment.yaml | 2 +- helm/peregrine/values.yaml | 1 + helm/portal/Chart.yaml | 2 +- helm/portal/README.md | 4 +- helm/portal/templates/deployment.yaml | 18 +- helm/portal/templates/job.yaml | 5 +- helm/portal/templates/nginx-conf.yaml | 180 ++++++++++++++++++ helm/portal/templates/service.yaml | 2 +- helm/portal/values.yaml | 1 + helm/requestor/Chart.yaml | 2 +- helm/requestor/README.md | 4 +- helm/requestor/templates/deployment.yaml | 7 +- helm/requestor/values.yaml | 1 + helm/revproxy/Chart.yaml | 2 +- helm/revproxy/README.md | 6 +- helm/revproxy/nginx/nginx.conf | 11 +- helm/revproxy/templates/configMaps.yaml | 2 +- helm/revproxy/templates/deployment.yaml | 11 +- helm/revproxy/templates/service.yaml | 2 +- helm/revproxy/values.yaml | 5 + helm/sheepdog/Chart.yaml | 2 +- helm/sheepdog/README.md | 10 +- helm/sheepdog/templates/deployment.yaml | 9 +- helm/sheepdog/templates/service.yaml | 2 +- helm/sheepdog/values.yaml | 19 +- helm/sower/Chart.yaml | 2 +- helm/sower/README.md | 4 +- helm/sower/templates/deployment.yaml | 6 +- helm/sower/values.yaml | 1 + helm/ssjdispatcher/Chart.yaml | 2 +- helm/ssjdispatcher/README.md | 4 +- helm/ssjdispatcher/templates/deployment.yaml | 6 +- helm/ssjdispatcher/values.yaml | 1 + helm/wts/Chart.yaml | 2 +- helm/wts/README.md | 4 +- helm/wts/templates/deployment.yaml | 6 +- helm/wts/values.yaml | 1 + 125 files changed, 637 insertions(+), 264 deletions(-) create mode 100644 helm/gen3/templates/nginx-config.yaml delete mode 100644 helm/gen3/templates/tests/service-account.yaml create mode 100644 helm/portal/templates/nginx-conf.yaml diff --git a/.secrets.baseline b/.secrets.baseline index 1f43a7c3e..11f68ea1a 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -153,14 +153,14 @@ "filename": "helm/portal/values.yaml", "hashed_secret": "08eeb737b239bdb7362a875b90e22c10b8826b20", "is_verified": false, - "line_number": 506 + "line_number": 507 }, { "type": "Base64 High Entropy String", "filename": "helm/portal/values.yaml", "hashed_secret": "eb9739c6625f06b4ab73035223366dda6262ae77", "is_verified": false, - "line_number": 508 + "line_number": 509 } ], "helm/revproxy/nginx/helpers.js": [ @@ -173,5 +173,5 @@ } ] }, - "generated_at": "2025-07-16T21:27:02Z" + "generated_at": "2025-09-24T16:09:34Z" } diff --git a/helm/access-backend/Chart.yaml b/helm/access-backend/Chart.yaml index a49e9d7e9..652d372a0 100644 --- a/helm/access-backend/Chart.yaml +++ b/helm/access-backend/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.9 +version: 0.1.10 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/access-backend/README.md b/helm/access-backend/README.md index c8b3ada16..2bedfcf59 100644 --- a/helm/access-backend/README.md +++ b/helm/access-backend/README.md @@ -1,6 +1,6 @@ # access-backend -![Version: 0.1.9](https://img.shields.io/badge/Version-0.1.9-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.6.1](https://img.shields.io/badge/AppVersion-1.6.1-informational?style=flat-square) +![Version: 0.1.10](https://img.shields.io/badge/Version-0.1.10-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.6.1](https://img.shields.io/badge/AppVersion-1.6.1-informational?style=flat-square) A Helm chart for Kubernetes @@ -124,6 +124,7 @@ A Helm chart for Kubernetes | secrets.awsSecretAccessKey | str | `nil` | AWS secret access key ID. Overrides global key. | | securityContext | object | `{}` | | | service.port | int | `80` | | +| service.targetPort | int | `80` | | | service.type | string | `"ClusterIP"` | | | serviceAccount.annotations | object | `{}` | | | serviceAccount.automount | bool | `true` | | diff --git a/helm/access-backend/templates/deployment.yaml b/helm/access-backend/templates/deployment.yaml index a0e3cfd9e..cefdb4dec 100644 --- a/helm/access-backend/templates/deployment.yaml +++ b/helm/access-backend/templates/deployment.yaml @@ -69,7 +69,7 @@ spec: port: 80 ports: - name: http - containerPort: 80 + containerPort: {{ .Values.service.targetPort }} {{- with .Values.volumeMounts }} volumeMounts: {{- toYaml . | nindent 10 }} diff --git a/helm/access-backend/values.yaml b/helm/access-backend/values.yaml index e53974459..efbf4f7c3 100644 --- a/helm/access-backend/values.yaml +++ b/helm/access-backend/values.yaml @@ -266,6 +266,7 @@ service: type: ClusterIP # This sets the ports more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports port: 80 + targetPort: 80 # This block is for setting up the ingress for more information can be found here: https://kubernetes.io/docs/concepts/services-networking/ingress/ ingress: diff --git a/helm/ambassador/Chart.yaml b/helm/ambassador/Chart.yaml index a6f534ad7..2e7db9879 100644 --- a/helm/ambassador/Chart.yaml +++ b/helm/ambassador/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.25 +version: 0.1.26 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/ambassador/README.md b/helm/ambassador/README.md index b98cb2220..df11dac5a 100644 --- a/helm/ambassador/README.md +++ b/helm/ambassador/README.md @@ -1,6 +1,6 @@ # ambassador -![Version: 0.1.25](https://img.shields.io/badge/Version-0.1.25-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.4.2](https://img.shields.io/badge/AppVersion-1.4.2-informational?style=flat-square) +![Version: 0.1.26](https://img.shields.io/badge/Version-0.1.26-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.4.2](https://img.shields.io/badge/AppVersion-1.4.2-informational?style=flat-square) A Helm chart for deploying ambassador for gen3 @@ -48,7 +48,7 @@ A Helm chart for deploying ambassador for gen3 | resources.requests.memory | string | `"100Mi"` | The amount of memory requested | | securityContext | map | `{}` | Container-level security context. | | selectorLabels | map | `nil` | Will completely override the selectorLabels defined in the common chart's _label_setup.tpl | -| service | map | `{"port":8877,"type":"ClusterIP"}` | Kubernetes service information. | +| service | map | `{"port":8877,"targetPort":8080,"type":"ClusterIP"}` | Kubernetes service information. | | service.port | int | `8877` | The port number that the service exposes. | | service.type | string | `"ClusterIP"` | Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". | | serviceAccount | map | `{"annotations":{},"create":true,"name":""}` | Service account to use or create. | diff --git a/helm/ambassador/templates/deployment.yaml b/helm/ambassador/templates/deployment.yaml index dae93e31e..78f524628 100644 --- a/helm/ambassador/templates/deployment.yaml +++ b/helm/ambassador/templates/deployment.yaml @@ -68,7 +68,7 @@ spec: value: "true" ports: - name: http - containerPort: 8080 + containerPort: {{ .Values.service.targetPort }} - name: https containerPort: 8443 - name: admin diff --git a/helm/ambassador/templates/service.yaml b/helm/ambassador/templates/service.yaml index 8fc57bfe9..b25331452 100644 --- a/helm/ambassador/templates/service.yaml +++ b/helm/ambassador/templates/service.yaml @@ -22,7 +22,7 @@ metadata: spec: ports: - port: 80 - targetPort: 8080 + targetPort: http name: proxy selector: {{- include "ambassador.selectorLabels" . | nindent 4 }} \ No newline at end of file diff --git a/helm/ambassador/values.yaml b/helm/ambassador/values.yaml index e8ac8ff07..4bce716a4 100644 --- a/helm/ambassador/values.yaml +++ b/helm/ambassador/values.yaml @@ -78,6 +78,7 @@ service: type: ClusterIP # -- (int) The port number that the service exposes. port: 8877 + targetPort: 8080 # -- (string) Namespace to use for user resources. userNamespace: "jupyter-pods" diff --git a/helm/arborist/Chart.yaml b/helm/arborist/Chart.yaml index 3ef9a1bb6..ee3b52af6 100644 --- a/helm/arborist/Chart.yaml +++ b/helm/arborist/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.24 +version: 0.1.25 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/arborist/README.md b/helm/arborist/README.md index 632f7590d..7fd8a2b4e 100644 --- a/helm/arborist/README.md +++ b/helm/arborist/README.md @@ -1,6 +1,6 @@ # arborist -![Version: 0.1.24](https://img.shields.io/badge/Version-0.1.24-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) +![Version: 0.1.25](https://img.shields.io/badge/Version-0.1.25-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) A Helm chart for gen3 arborist @@ -93,7 +93,7 @@ A Helm chart for gen3 arborist | secrets.awsSecretAccessKey | str | `nil` | AWS secret access key ID. Overrides global key. | | securityContext | map | `{}` | Security context to apply to the container | | selectorLabels | map | `nil` | Will completely override the selectorLabels defined in the common chart's _label_setup.tpl | -| service | map | `{"port":80,"type":"ClusterIP"}` | Kubernetes service information. | +| service | map | `{"port":80,"targetPort":80,"type":"ClusterIP"}` | Kubernetes service information. | | service.port | int | `80` | The port number that the service exposes. | | service.type | string | `"ClusterIP"` | Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". | | serviceAccount | map | `{"annotations":{},"create":true,"name":""}` | Service account to use or create. | diff --git a/helm/arborist/templates/deployment.yaml b/helm/arborist/templates/deployment.yaml index a542d5abf..3ad5d93e2 100644 --- a/helm/arborist/templates/deployment.yaml +++ b/helm/arborist/templates/deployment.yaml @@ -54,7 +54,7 @@ spec: imagePullPolicy: {{ .Values.image.pullPolicy }} ports: - name: http - containerPort: 80 + containerPort: {{ .Values.service.targetPort }} protocol: TCP livenessProbe: httpGet: diff --git a/helm/arborist/values.yaml b/helm/arborist/values.yaml index 8a624bd49..7d152f975 100644 --- a/helm/arborist/values.yaml +++ b/helm/arborist/values.yaml @@ -185,6 +185,7 @@ service: type: ClusterIP # -- (int) The port number that the service exposes. port: 80 + targetPort: 80 # -- (map) Resource requests and limits for the containers in the pod resources: diff --git a/helm/argo-wrapper/Chart.yaml b/helm/argo-wrapper/Chart.yaml index 41fc6a4c4..bf2f75a2b 100644 --- a/helm/argo-wrapper/Chart.yaml +++ b/helm/argo-wrapper/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.19 +version: 0.1.20 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/argo-wrapper/README.md b/helm/argo-wrapper/README.md index 3672d075a..138d2d43d 100644 --- a/helm/argo-wrapper/README.md +++ b/helm/argo-wrapper/README.md @@ -1,6 +1,6 @@ # argo-wrapper -![Version: 0.1.19](https://img.shields.io/badge/Version-0.1.19-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) +![Version: 0.1.20](https://img.shields.io/badge/Version-0.1.20-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) A Helm chart for gen3 Argo Wrapper Service @@ -58,7 +58,7 @@ A Helm chart for gen3 Argo Wrapper Service | s3Bucket | string | `"argo-artifact-downloadable"` | S3 bucket name for Argo artifacts (allows pre-signed URLs). | | scalingGroups | list | `[{"user1":"workflow1"},{"user2":"workflow2"},{"user3":"workflow3"}]` | The workflow scaling groups to be used by Argo. | | selectorLabels | map | `nil` | Will completely override the selectorLabels defined in the common chart's _label_setup.tpl | -| service | map | `{"port":8000,"type":"ClusterIP"}` | Kubernetes service information. | +| service | map | `{"port":8000,"targetPort":80,"type":"ClusterIP"}` | Kubernetes service information. | | service.port | int | `8000` | The port number that the service exposes. | | service.type | string | `"ClusterIP"` | Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". | | strategy | map | `{"rollingUpdate":{"maxSurge":1,"maxUnavailable":0},"type":"RollingUpdate"}` | Rolling update deployment strategy | diff --git a/helm/argo-wrapper/templates/deployment.yaml b/helm/argo-wrapper/templates/deployment.yaml index 9038c94d4..74fa5f2b5 100644 --- a/helm/argo-wrapper/templates/deployment.yaml +++ b/helm/argo-wrapper/templates/deployment.yaml @@ -56,13 +56,14 @@ spec: livenessProbe: httpGet: path: /test - port: 8000 + port: http initialDelaySeconds: 30 periodSeconds: 60 timeoutSeconds: 30 imagePullPolicy: {{ .Values.image.pullPolicy }} ports: - - containerPort: 8000 + - containerPort: {{ .Values.service.targetPort }} + name: http protocol: TCP {{- with .Values.volumeMounts }} volumeMounts: diff --git a/helm/argo-wrapper/values.yaml b/helm/argo-wrapper/values.yaml index f57c6dd62..6f152cd75 100644 --- a/helm/argo-wrapper/values.yaml +++ b/helm/argo-wrapper/values.yaml @@ -109,6 +109,7 @@ service: type: ClusterIP # -- (int) The port number that the service exposes. port: 8000 + targetPort: 80 # -- (map) Configuration for network policies created by this chart. Only relevant if "global.netPolicy.enabled" is set to true netPolicy: diff --git a/helm/audit/Chart.yaml b/helm/audit/Chart.yaml index e3b4c48df..605a1330b 100644 --- a/helm/audit/Chart.yaml +++ b/helm/audit/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.30 +version: 0.1.31 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/audit/README.md b/helm/audit/README.md index 37baeae6b..fc08dabae 100644 --- a/helm/audit/README.md +++ b/helm/audit/README.md @@ -1,6 +1,6 @@ # audit -![Version: 0.1.30](https://img.shields.io/badge/Version-0.1.30-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) +![Version: 0.1.31](https://img.shields.io/badge/Version-0.1.31-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) A Helm chart for Kubernetes @@ -122,7 +122,7 @@ A Helm chart for Kubernetes | server.sqs.region | string | `"us-east-1"` | SQS queue AWS region. | | server.sqs.url | string | `"http://sqs.com"` | The URL for the SQS queue. | | server.type | string | `"aws_sqs"` | Whether audit should use the api or aws_sqs. | -| service | map | `{"port":80,"type":"ClusterIP"}` | Configuration for the service | +| service | map | `{"port":80,"targetPort":80,"type":"ClusterIP"}` | Configuration for the service | | service.port | int | `80` | Port on which the service is exposed | | service.type | string | `"ClusterIP"` | Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". | | serviceAccount | map | `{"annotations":{"eks.amazonaws.com/role-arn":null},"create":true,"name":"audit-service-sa"}` | Service account to use or create. | diff --git a/helm/audit/templates/deployment.yaml b/helm/audit/templates/deployment.yaml index 75b359cad..8f22451c0 100644 --- a/helm/audit/templates/deployment.yaml +++ b/helm/audit/templates/deployment.yaml @@ -34,6 +34,8 @@ spec: {{- include "common.extraLabels" . | nindent 8 }} spec: serviceAccountName: {{ include "audit.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} volumes: - name: config-volume secret: @@ -46,20 +48,20 @@ spec: image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} ports: - - containerPort: 80 + - containerPort: {{ .Values.service.targetPort }} name: http protocol: TCP livenessProbe: httpGet: path: /_status - port: 80 + port: http initialDelaySeconds: 30 periodSeconds: 60 timeoutSeconds: 30 readinessProbe: httpGet: path: /_status - port: 80 + port: http resources: {{- toYaml .Values.resources | nindent 12 }} env: diff --git a/helm/audit/values.yaml b/helm/audit/values.yaml index 9bd1befcb..566de2e85 100644 --- a/helm/audit/values.yaml +++ b/helm/audit/values.yaml @@ -185,6 +185,7 @@ service: type: ClusterIP # -- (int) Port on which the service is exposed port: 80 + targetPort: 80 # -- (map) Configuration for network policies created by this chart. Only relevant if "global.netPolicy.enabled" is set to true netPolicy: diff --git a/helm/cedar/Chart.yaml b/helm/cedar/Chart.yaml index 43e138f6a..0aea66792 100644 --- a/helm/cedar/Chart.yaml +++ b/helm/cedar/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.12 +version: 0.1.13 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/cedar/README.md b/helm/cedar/README.md index 30d7e7076..f2f97fe6b 100644 --- a/helm/cedar/README.md +++ b/helm/cedar/README.md @@ -1,6 +1,6 @@ # cedar -![Version: 0.1.12](https://img.shields.io/badge/Version-0.1.12-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) +![Version: 0.1.13](https://img.shields.io/badge/Version-0.1.13-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) A Helm chart for gen3 cedar wrapper diff --git a/helm/cedar/templates/deployment.yaml b/helm/cedar/templates/deployment.yaml index 44467dbc8..35a0029e5 100644 --- a/helm/cedar/templates/deployment.yaml +++ b/helm/cedar/templates/deployment.yaml @@ -94,19 +94,19 @@ spec: - /src/start.sh ports: - name: http - containerPort: 8000 + containerPort: {{ .Values.service.targetPort }} protocol: TCP readinessProbe: httpGet: path: /_status/ - port: 8000 + port: http initialDelaySeconds: 30 periodSeconds: 60 timeoutSeconds: 30 livenessProbe: httpGet: path: /_status/ - port: 8000 + port: http initialDelaySeconds: 60 periodSeconds: 60 timeoutSeconds: 30 diff --git a/helm/cohort-middleware/Chart.yaml b/helm/cohort-middleware/Chart.yaml index 325d3d0e5..816522b7c 100644 --- a/helm/cohort-middleware/Chart.yaml +++ b/helm/cohort-middleware/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.11 +version: 0.1.12 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/cohort-middleware/README.md b/helm/cohort-middleware/README.md index 5ad5e49cf..3c0bb3105 100644 --- a/helm/cohort-middleware/README.md +++ b/helm/cohort-middleware/README.md @@ -1,6 +1,6 @@ # cohort-middleware -![Version: 0.1.11](https://img.shields.io/badge/Version-0.1.11-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) +![Version: 0.1.12](https://img.shields.io/badge/Version-0.1.12-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) A Helm chart for gen3 cohort-middleware @@ -101,6 +101,7 @@ A Helm chart for gen3 cohort-middleware | resources.requests.memory | string | `"128Mi"` | | | securityContext | object | `{}` | | | service.port | int | `80` | | +| service.targetPort | int | `8080` | | | service.type | string | `"ClusterIP"` | | | serviceAccount.annotations | object | `{}` | | | serviceAccount.automount | bool | `true` | | diff --git a/helm/cohort-middleware/templates/deployment.yaml b/helm/cohort-middleware/templates/deployment.yaml index ff220a5fb..017f8d4d3 100644 --- a/helm/cohort-middleware/templates/deployment.yaml +++ b/helm/cohort-middleware/templates/deployment.yaml @@ -54,7 +54,8 @@ spec: mountPath: /config/development.yaml subPath: development.yaml ports: - - containerPort: 8080 + - containerPort: {{ .Values.service.targetPort }} + name: http livenessProbe: {{- toYaml .Values.livenessProbe | nindent 12 }} readinessProbe: diff --git a/helm/cohort-middleware/templates/service.yaml b/helm/cohort-middleware/templates/service.yaml index c40a19239..3a945e8fa 100644 --- a/helm/cohort-middleware/templates/service.yaml +++ b/helm/cohort-middleware/templates/service.yaml @@ -8,7 +8,7 @@ spec: type: {{ .Values.service.type }} ports: - port: {{ .Values.service.port }} - targetPort: 8080 + targetPort: http protocol: TCP name: http selector: diff --git a/helm/cohort-middleware/values.yaml b/helm/cohort-middleware/values.yaml index bb2822976..d80ddfe17 100644 --- a/helm/cohort-middleware/values.yaml +++ b/helm/cohort-middleware/values.yaml @@ -136,6 +136,7 @@ service: type: ClusterIP # This sets the ports more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports port: 80 + targetPort: 8080 # This block is for setting up the ingress for more information can be found here: https://kubernetes.io/docs/concepts/services-networking/ingress/ ingress: diff --git a/helm/common/Chart.yaml b/helm/common/Chart.yaml index 5efa24b30..a220155a4 100644 --- a/helm/common/Chart.yaml +++ b/helm/common/Chart.yaml @@ -15,7 +15,7 @@ type: library # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.23 +version: 0.1.24 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/common/README.md b/helm/common/README.md index c62d1e6f6..1bf51d26a 100644 --- a/helm/common/README.md +++ b/helm/common/README.md @@ -1,6 +1,6 @@ # common -![Version: 0.1.23](https://img.shields.io/badge/Version-0.1.23-informational?style=flat-square) ![Type: library](https://img.shields.io/badge/Type-library-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) +![Version: 0.1.24](https://img.shields.io/badge/Version-0.1.24-informational?style=flat-square) ![Type: library](https://img.shields.io/badge/Type-library-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) A Helm chart for provisioning databases in gen3 diff --git a/helm/common/templates/_db_setup_job.tpl b/helm/common/templates/_db_setup_job.tpl index b0b3d8009..d637057ad 100644 --- a/helm/common/templates/_db_setup_job.tpl +++ b/helm/common/templates/_db_setup_job.tpl @@ -44,6 +44,12 @@ spec: app: gen3job spec: serviceAccountName: {{ .Chart.Name }}-dbcreate-sa + {{- if $.Values.podSecurityContext }} + securityContext: + {{- range $k, $v := $.Values.podSecurityContext }} + {{ $k }}: {{ $v }} + {{- end }} + {{- end }} restartPolicy: Never containers: - name: db-setup diff --git a/helm/dicom-server/Chart.yaml b/helm/dicom-server/Chart.yaml index 93b076e70..e7b2a1078 100644 --- a/helm/dicom-server/Chart.yaml +++ b/helm/dicom-server/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.19 +version: 0.1.20 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/dicom-server/README.md b/helm/dicom-server/README.md index b9262c8d9..7804ccd8e 100644 --- a/helm/dicom-server/README.md +++ b/helm/dicom-server/README.md @@ -1,6 +1,6 @@ # dicom-server -![Version: 0.1.19](https://img.shields.io/badge/Version-0.1.19-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) +![Version: 0.1.20](https://img.shields.io/badge/Version-0.1.20-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) A Helm chart for gen3 Dicom Server diff --git a/helm/dicom-server/templates/deployment.yaml b/helm/dicom-server/templates/deployment.yaml index fec47c2f0..58b8d248b 100644 --- a/helm/dicom-server/templates/deployment.yaml +++ b/helm/dicom-server/templates/deployment.yaml @@ -48,19 +48,20 @@ spec: readinessProbe: httpGet: path: /system - port: 8042 + port: http initialDelaySeconds: 5 periodSeconds: 20 timeoutSeconds: 30 livenessProbe: httpGet: path: /system - port: 8042 + port: http initialDelaySeconds: 5 periodSeconds: 60 timeoutSeconds: 30 ports: - - containerPort: 8042 + - containerPort: {{ .Values.service.targetPort }} + name: http env: - name: PGHOST valueFrom: diff --git a/helm/etl/Chart.yaml b/helm/etl/Chart.yaml index 4cb17ae39..30a808bce 100644 --- a/helm/etl/Chart.yaml +++ b/helm/etl/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.15 +version: 0.1.16 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/etl/README.md b/helm/etl/README.md index f232c3fd8..5a6bf0628 100644 --- a/helm/etl/README.md +++ b/helm/etl/README.md @@ -1,6 +1,6 @@ # etl -![Version: 0.1.15](https://img.shields.io/badge/Version-0.1.15-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) +![Version: 0.1.16](https://img.shields.io/badge/Version-0.1.16-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) A Helm chart for gen3 etl @@ -8,6 +8,7 @@ A Helm chart for gen3 etl | Key | Type | Default | Description | |-----|------|---------|-------------| +| env | string | `nil` | | | esEndpoint | string | `"gen3-elasticsearch-master"` | | | esGarbageCollect | map | `{"custom_image":null,"enabled":false,"schedule":"0 0 * * *","slack_webhook":"None"}` | Configuration options for es garbage cronjob. | | esGarbageCollect.custom_image | string | `nil` | To set a custom image for the es garbage collect cronjob. Default is the Gen3 Awshelper image. | @@ -100,10 +101,12 @@ A Helm chart for gen3 etl | imagePullSecrets | list | `[]` | Docker image pull secrets. | | legacySupport | bool | `false` | | | podAnnotations | map | `{}` | Annotations to add to the pod | +| podSecurityContext | map | `{}` | Security context for the pod | | resources | map | `{"spark":{"requests":{"memory":"128Mi"}},"tube":{"requests":{"memory":"128Mi"}}}` | Resource requests and limits for the containers in the pod | | resources.spark.requests | map | `{"memory":"128Mi"}` | The amount of resources that the container requests | | resources.spark.requests.memory | string | `"128Mi"` | The amount of memory requested | | resources.tube.requests | map | `{"memory":"128Mi"}` | The amount of resources that the container requests | | resources.tube.requests.memory | string | `"128Mi"` | The amount of memory requested | | schedule | string | `"*/30 * * * *"` | | +| securityContext | map | `{}` | Security context for the containers in the pod | | suspendCronjob | bool | `true` | | diff --git a/helm/etl/templates/etl-job.yaml b/helm/etl/templates/etl-job.yaml index 5a4aee8b1..b0622c8fb 100644 --- a/helm/etl/templates/etl-job.yaml +++ b/helm/etl/templates/etl-job.yaml @@ -36,6 +36,8 @@ spec: operator: In values: - ONDEMAND + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 12 }} volumes: {{- if .Values.legacySupport }} - name: config-volume @@ -111,6 +113,12 @@ spec: ports: - containerPort: 80 env: + {{- with .Values.env }} + {{- range $key, $value := . }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- end }} - name: DB_HOST valueFrom: secretKeyRef: diff --git a/helm/etl/values.yaml b/helm/etl/values.yaml index 75ea1d365..d0f552e6b 100644 --- a/helm/etl/values.yaml +++ b/helm/etl/values.yaml @@ -43,6 +43,8 @@ resources: esEndpoint: gen3-elasticsearch-master +env: + etlMapping: mappings: - name: dev_case @@ -152,3 +154,17 @@ suspendCronjob: true legacySupport: false etlForced: "TRUE" + +# -- (map) Security context for the pod +podSecurityContext: {} + +# -- (map) Security context for the containers in the pod +securityContext: + {} + + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 diff --git a/helm/fence/Chart.yaml b/helm/fence/Chart.yaml index 841c660b0..c769729b7 100644 --- a/helm/fence/Chart.yaml +++ b/helm/fence/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.59 +version: 0.1.60 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/fence/README.md b/helm/fence/README.md index 6dea09e62..c50257e47 100644 --- a/helm/fence/README.md +++ b/helm/fence/README.md @@ -1,6 +1,6 @@ # fence -![Version: 0.1.59](https://img.shields.io/badge/Version-0.1.59-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) +![Version: 0.1.60](https://img.shields.io/badge/Version-0.1.60-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) A Helm chart for gen3 Fence @@ -171,7 +171,7 @@ A Helm chart for gen3 Fence | nodeSelector | map | `{}` | Node Selector for the pods | | partOf | string | `"Authentication"` | Label to help organize pods and their use. Any value is valid, but use "_" or "-" to divide words. | | podAnnotations | map | `{}` | Annotations to add to the pod | -| podSecurityContext | map | `{"fsGroup":101}` | Security context for the pod | +| podSecurityContext | map | `{}` | Security context for the pod | | postgres | map | `{"database":null,"dbCreate":null,"dbRestore":false,"host":null,"password":null,"port":"5432","separate":false,"username":null}` | Postgres database configuration. If db does not exist in postgres cluster and dbCreate is set ot true then these databases will be created for you | | postgres.database | string | `nil` | Database name for postgres. This is a service override, defaults to - | | postgres.dbCreate | bool | `nil` | Whether the database should be created. Default to global.postgres.dbCreate | @@ -196,7 +196,7 @@ A Helm chart for gen3 Fence | secrets.awsSecretAccessKey | str | `nil` | AWS access key ID. Overrides global key. | | securityContext | map | `{}` | Security context for the containers in the pod | | selectorLabels | map | `nil` | Will completely override the selectorLabels defined in the common chart's _label_setup.tpl | -| service | map | `{"port":80,"type":"ClusterIP"}` | Kubernetes service information. | +| service | map | `{"port":80,"targetPort":80,"type":"ClusterIP"}` | Kubernetes service information. | | service.port | int | `80` | The port number that the service exposes. | | service.type | string | `"ClusterIP"` | Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". | | serviceAccount | map | `{"annotations":{"eks.amazonaws.com/role-arn":null},"create":true,"name":"fence-sa"}` | Service account to use or create. | diff --git a/helm/fence/templates/fence-deployment.yaml b/helm/fence/templates/fence-deployment.yaml index 8ff37c4f6..3eac9adb1 100644 --- a/helm/fence/templates/fence-deployment.yaml +++ b/helm/fence/templates/fence-deployment.yaml @@ -39,6 +39,8 @@ spec: spec: enableServiceLinks: false serviceAccountName: {{ include "fence.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} volumes: {{- toYaml .Values.volumes | nindent 8 }} containers: @@ -47,7 +49,7 @@ spec: imagePullPolicy: {{ .Values.image.pullPolicy }} ports: - name: http - containerPort: 80 + containerPort: {{ .Values.service.targetPort }} protocol: TCP - name: https containerPort: 443 @@ -87,7 +89,7 @@ spec: imagePullPolicy: {{ .Values.image.pullPolicy }} ports: - name: http - containerPort: 80 + containerPort: {{ .Values.service.targetPort }} protocol: TCP - name: https containerPort: 443 diff --git a/helm/fence/templates/presigned-url-fence.yaml b/helm/fence/templates/presigned-url-fence.yaml index 4244e3c4c..c8ba108df 100644 --- a/helm/fence/templates/presigned-url-fence.yaml +++ b/helm/fence/templates/presigned-url-fence.yaml @@ -38,6 +38,8 @@ spec: userhelper: "yes" spec: serviceAccountName: {{ include "fence.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} volumes: {{- toYaml .Values.volumes | nindent 8 }} containers: @@ -46,7 +48,7 @@ spec: imagePullPolicy: Always ports: - name: http - containerPort: 80 + containerPort: {{ .Values.service.targetPort }} protocol: TCP - name: https containerPort: 443 diff --git a/helm/fence/values.yaml b/helm/fence/values.yaml index 5aa31526c..ed6434ab5 100644 --- a/helm/fence/values.yaml +++ b/helm/fence/values.yaml @@ -258,8 +258,8 @@ serviceAccount: podAnnotations: {} # -- (map) Security context for the pod -podSecurityContext: - fsGroup: 101 +podSecurityContext: {} + # fsGroup: 101 # -- (map) Security context for the containers in the pod securityContext: @@ -277,6 +277,7 @@ service: type: ClusterIP # -- (int) The port number that the service exposes. port: 80 + targetPort: 80 # -- (map) Resource requests and limits for the containers in the pod resources: diff --git a/helm/gen3-analysis/Chart.yaml b/helm/gen3-analysis/Chart.yaml index 19eb571df..dd4c4c7f4 100644 --- a/helm/gen3-analysis/Chart.yaml +++ b/helm/gen3-analysis/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.1 +version: 0.1.2 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/gen3-analysis/README.md b/helm/gen3-analysis/README.md index 79e2e5ab5..c8c74a0d0 100644 --- a/helm/gen3-analysis/README.md +++ b/helm/gen3-analysis/README.md @@ -1,6 +1,6 @@ # gen3-analysis -![Version: 0.1.1](https://img.shields.io/badge/Version-0.1.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) +![Version: 0.1.2](https://img.shields.io/badge/Version-0.1.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) A Helm chart for gen3 gen3-analysis Service @@ -88,7 +88,7 @@ A Helm chart for gen3 gen3-analysis Service | secrets.awsAccessKeyId | str | `nil` | AWS access key ID. Overrides global key. | | secrets.awsSecretAccessKey | str | `nil` | AWS secret access key ID. Overrides global key. | | selectorLabels | map | `nil` | Will completely override the selectorLabels defined in the common chart's _label_setup.tpl | -| service | map | `{"port":[{"name":"http","port":80,"protocol":"TCP","targetPort":8000}],"type":"ClusterIP"}` | Kubernetes service information. | +| service | map | `{"port":[{"name":"http","port":80,"protocol":"TCP","targetPort":8000}],"targetPort":8000,"type":"ClusterIP"}` | Kubernetes service information. | | service.port | int | `[{"name":"http","port":80,"protocol":"TCP","targetPort":8000}]` | The port number that the service exposes. | | service.type | string | `"ClusterIP"` | Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". | | strategy | map | `{"rollingUpdate":{"maxSurge":1,"maxUnavailable":0},"type":"RollingUpdate"}` | Rolling update deployment strategy | diff --git a/helm/gen3-analysis/templates/deployment.yaml b/helm/gen3-analysis/templates/deployment.yaml index b4b55bda9..ac1a13cbf 100644 --- a/helm/gen3-analysis/templates/deployment.yaml +++ b/helm/gen3-analysis/templates/deployment.yaml @@ -60,16 +60,17 @@ spec: livenessProbe: httpGet: path: /_status - port: 8000 + port: http initialDelaySeconds: 30 periodSeconds: 60 timeoutSeconds: 30 readinessProbe: httpGet: path: /_status - port: 8000 + port: http ports: - - containerPort: 8000 + - containerPort: {{ .Values.service.targetPort }} + name: http {{- with .Values.volumeMounts }} volumeMounts: {{- toYaml . | nindent 10 }} diff --git a/helm/gen3-analysis/values.yaml b/helm/gen3-analysis/values.yaml index d49136c94..8053a7c99 100644 --- a/helm/gen3-analysis/values.yaml +++ b/helm/gen3-analysis/values.yaml @@ -189,6 +189,7 @@ resources: service: # -- (string) Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". type: ClusterIP + targetPort: 8000 # -- (int) The port number that the service exposes. port: - protocol: TCP diff --git a/helm/gen3-user-data-library/Chart.yaml b/helm/gen3-user-data-library/Chart.yaml index 18a7e3f83..3c643ea70 100644 --- a/helm/gen3-user-data-library/Chart.yaml +++ b/helm/gen3-user-data-library/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.5 +version: 0.1.6 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/gen3-user-data-library/README.md b/helm/gen3-user-data-library/README.md index 2d2f9f997..f20ae1f89 100644 --- a/helm/gen3-user-data-library/README.md +++ b/helm/gen3-user-data-library/README.md @@ -1,6 +1,6 @@ # gen3-user-data-library -![Version: 0.1.5](https://img.shields.io/badge/Version-0.1.5-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: main](https://img.shields.io/badge/AppVersion-main-informational?style=flat-square) +![Version: 0.1.6](https://img.shields.io/badge/Version-0.1.6-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: main](https://img.shields.io/badge/AppVersion-main-informational?style=flat-square) A Helm chart for Kubernetes @@ -85,6 +85,7 @@ A Helm chart for Kubernetes | secrets.awsSecretAccessKey | str | `nil` | AWS secret access key ID. Overrides global key. | | selectorLabels | map | `nil` | Will completely override the selectorLabels defined in the common chart's _label_setup.tpl | | service.port | int | `80` | | +| service.targetPort | int | `80` | | | service.type | string | `"ClusterIP"` | | | volumeMounts[0].mountPath | string | `"/gen3userdatalibrary/.env"` | | | volumeMounts[0].name | string | `"gen3-user-data-library-g3auto-volume"` | | diff --git a/helm/gen3-user-data-library/templates/deployment.yaml b/helm/gen3-user-data-library/templates/deployment.yaml index 4d7632060..6ad788201 100644 --- a/helm/gen3-user-data-library/templates/deployment.yaml +++ b/helm/gen3-user-data-library/templates/deployment.yaml @@ -87,19 +87,19 @@ spec: optional: false imagePullPolicy: {{ .Values.image.pullPolicy }} ports: - - containerPort: 80 + - containerPort: {{ .Values.service.targetPort }} name: http livenessProbe: httpGet: path: /_status - port: 80 + port: http initialDelaySeconds: 30 periodSeconds: 60 timeoutSeconds: 30 readinessProbe: httpGet: path: /_status - port: 80 + port: http {{- with .Values.volumeMounts }} volumeMounts: {{- toYaml . | nindent 10 }} diff --git a/helm/gen3-user-data-library/values.yaml b/helm/gen3-user-data-library/values.yaml index 85327ced0..951533522 100644 --- a/helm/gen3-user-data-library/values.yaml +++ b/helm/gen3-user-data-library/values.yaml @@ -72,6 +72,7 @@ service: type: ClusterIP # This sets the ports more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports port: 80 + targetPort: 80 # This block is for setting up the ingress for more information can be found here: https://kubernetes.io/docs/concepts/services-networking/ingress/ ingress: diff --git a/helm/gen3/Chart.yaml b/helm/gen3/Chart.yaml index c04244838..c54c7ea5e 100644 --- a/helm/gen3/Chart.yaml +++ b/helm/gen3/Chart.yaml @@ -5,23 +5,23 @@ description: Helm chart to deploy Gen3 Data Commons # Dependencies dependencies: - name: access-backend - version: 0.1.9 + version: 0.1.10 repository: "file://../access-backend" condition: access-backend.enabled - name: ambassador - version: 0.1.25 + version: 0.1.26 repository: "file://../ambassador" condition: ambassador.enabled - name: arborist - version: 0.1.24 + version: 0.1.25 repository: "file://../arborist" condition: arborist.enabled - name: argo-wrapper - version: 0.1.19 + version: 0.1.20 repository: "file://../argo-wrapper" condition: argo-wrapper.enabled - name: audit - version: 0.1.30 + version: 0.1.31 repository: "file://../audit" condition: audit.enabled - name: aws-es-proxy @@ -29,15 +29,15 @@ dependencies: repository: "file://../aws-es-proxy" condition: aws-es-proxy.enabled - name: cedar - version: 0.1.12 + version: 0.1.13 repository: "file://../cedar" condition: cedar.enabled - name: cohort-middleware - version: 0.1.11 + version: 0.1.12 repository: "file://../cohort-middleware" condition: cohort-middleware.enabled - name: common - version: 0.1.23 + version: 0.1.24 repository: file://../common - name: dashboard version: 0.1.8 @@ -48,7 +48,7 @@ dependencies: repository: "file://../datareplicate" condition: datareplicate.enabled - name: etl - version: 0.1.15 + version: 0.1.16 repository: file://../etl condition: etl.enabled - name: frontend-framework @@ -56,63 +56,63 @@ dependencies: repository: "file://../frontend-framework" condition: frontend-framework.enabled - name: fence - version: 0.1.59 + version: 0.1.60 repository: "file://../fence" condition: fence.enabled - name: gen3-user-data-library - version: 0.1.5 + version: 0.1.6 repository: "file://../gen3-user-data-library" condition: gen3-user-data-library.enabled - name: guppy - version: 0.1.25 + version: 0.1.26 repository: "file://../guppy" condition: guppy.enabled - name: hatchery - version: 0.1.52 + version: 0.1.53 repository: "file://../hatchery" condition: hatchery.enabled - name: indexd - version: 0.1.33 + version: 0.1.34 repository: "file://../indexd" condition: indexd.enabled - name: manifestservice - version: 0.1.32 + version: 0.1.33 repository: "file://../manifestservice" condition: manifestservice.enabled - name: metadata - version: 0.1.30 + version: 0.1.31 repository: "file://../metadata" condition: metadata.enabled - name: peregrine - version: 0.1.31 + version: 0.1.32 repository: "file://../peregrine" condition: peregrine.enabled - name: portal - version: 0.1.45 + version: 0.1.46 repository: "file://../portal" condition: portal.enabled - name: requestor - version: 0.1.24 + version: 0.1.25 repository: "file://../requestor" condition: requestor.enabled - name: revproxy - version: 0.1.43 + version: 0.1.44 repository: "file://../revproxy" condition: revproxy.enabled - name: sheepdog - version: 0.1.29 + version: 0.1.30 repository: "file://../sheepdog" condition: sheepdog.enabled - name: ssjdispatcher - version: 0.1.31 + version: 0.1.32 repository: "file://../ssjdispatcher" condition: ssjdispatcher.enabled - name: sower - version: 0.1.34 + version: 0.1.35 condition: sower.enabled repository: "file://../sower" - name: wts - version: 0.1.30 + version: 0.1.31 repository: "file://../wts" condition: wts.enabled - name: gen3-network-policies @@ -120,19 +120,19 @@ dependencies: repository: "file://../gen3-network-policies" condition: global.netPolicy.enabled - name: dicom-server - version: 0.1.19 + version: 0.1.20 repository: file://../dicom-server condition: dicom-server.enabled - name: ohif-viewer - version: 0.1.3 + version: 0.1.4 repository: file://../ohif-viewer condition: ohif-viewer.enabled - name: orthanc - version: 0.1.4 + version: 0.1.5 repository: file://../orthanc condition: orthanc.enabled - name: gen3-analysis - version: 0.1.1 + version: 0.1.2 repository: file://../gen3-analysis condition: gen3-analysis.enabled @@ -169,7 +169,7 @@ type: application # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.2.59 +version: 0.2.60 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/gen3/README.md b/helm/gen3/README.md index 68b79f497..586a66b62 100644 --- a/helm/gen3/README.md +++ b/helm/gen3/README.md @@ -1,6 +1,6 @@ # gen3 -![Version: 0.2.59](https://img.shields.io/badge/Version-0.2.59-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) +![Version: 0.2.60](https://img.shields.io/badge/Version-0.2.60-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) Helm chart to deploy Gen3 Data Commons @@ -18,40 +18,40 @@ Helm chart to deploy Gen3 Data Commons | Repository | Name | Version | |------------|------|---------| -| file://../access-backend | access-backend | 0.1.9 | -| file://../ambassador | ambassador | 0.1.25 | -| file://../arborist | arborist | 0.1.24 | -| file://../argo-wrapper | argo-wrapper | 0.1.19 | -| file://../audit | audit | 0.1.30 | +| file://../access-backend | access-backend | 0.1.10 | +| file://../ambassador | ambassador | 0.1.26 | +| file://../arborist | arborist | 0.1.25 | +| file://../argo-wrapper | argo-wrapper | 0.1.20 | +| file://../audit | audit | 0.1.31 | | file://../aws-es-proxy | aws-es-proxy | 0.1.30 | -| file://../cedar | cedar | 0.1.12 | -| file://../cohort-middleware | cohort-middleware | 0.1.11 | -| file://../common | common | 0.1.23 | +| file://../cedar | cedar | 0.1.13 | +| file://../cohort-middleware | cohort-middleware | 0.1.12 | +| file://../common | common | 0.1.24 | | file://../dashboard | dashboard | 0.1.8 | | file://../datareplicate | datareplicate | 0.0.29 | -| file://../dicom-server | dicom-server | 0.1.19 | -| file://../etl | etl | 0.1.15 | -| file://../fence | fence | 0.1.59 | +| file://../dicom-server | dicom-server | 0.1.20 | +| file://../etl | etl | 0.1.16 | +| file://../fence | fence | 0.1.60 | | file://../frontend-framework | frontend-framework | 0.1.13 | -| file://../gen3-analysis | gen3-analysis | 0.1.1 | +| file://../gen3-analysis | gen3-analysis | 0.1.2 | | file://../gen3-network-policies | gen3-network-policies | 0.1.2 | -| file://../gen3-user-data-library | gen3-user-data-library | 0.1.5 | -| file://../guppy | guppy | 0.1.25 | -| file://../hatchery | hatchery | 0.1.52 | -| file://../indexd | indexd | 0.1.33 | -| file://../manifestservice | manifestservice | 0.1.32 | -| file://../metadata | metadata | 0.1.30 | +| file://../gen3-user-data-library | gen3-user-data-library | 0.1.6 | +| file://../guppy | guppy | 0.1.26 | +| file://../hatchery | hatchery | 0.1.53 | +| file://../indexd | indexd | 0.1.34 | +| file://../manifestservice | manifestservice | 0.1.33 | +| file://../metadata | metadata | 0.1.31 | | file://../neuvector | neuvector | 0.1.2 | -| file://../ohif-viewer | ohif-viewer | 0.1.3 | -| file://../orthanc | orthanc | 0.1.4 | -| file://../peregrine | peregrine | 0.1.31 | -| file://../portal | portal | 0.1.45 | -| file://../requestor | requestor | 0.1.24 | -| file://../revproxy | revproxy | 0.1.43 | -| file://../sheepdog | sheepdog | 0.1.29 | -| file://../sower | sower | 0.1.34 | -| file://../ssjdispatcher | ssjdispatcher | 0.1.31 | -| file://../wts | wts | 0.1.30 | +| file://../ohif-viewer | ohif-viewer | 0.1.4 | +| file://../orthanc | orthanc | 0.1.5 | +| file://../peregrine | peregrine | 0.1.32 | +| file://../portal | portal | 0.1.46 | +| file://../requestor | requestor | 0.1.25 | +| file://../revproxy | revproxy | 0.1.44 | +| file://../sheepdog | sheepdog | 0.1.30 | +| file://../sower | sower | 0.1.35 | +| file://../ssjdispatcher | ssjdispatcher | 0.1.32 | +| file://../wts | wts | 0.1.31 | | https://charts.bitnami.com/bitnami | postgresql | 11.9.13 | | https://helm.elastic.co | elasticsearch | 7.10.2 | diff --git a/helm/gen3/templates/nginx-config.yaml b/helm/gen3/templates/nginx-config.yaml new file mode 100644 index 000000000..4698aaa28 --- /dev/null +++ b/helm/gen3/templates/nginx-config.yaml @@ -0,0 +1,68 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-config +data: + nginx.conf: | + user gen3; + worker_processes auto; + error_log /var/log/nginx/error.log notice; + pid /var/lib/nginx/nginx.pid; + + # Load dynamic modules. See /usr/share/doc/nginx/README.dynamic. + include /usr/share/nginx/modules/*.conf; + + events { + worker_connections 1024; + } + + http { + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + + # Suppress logging for known health checks + map $http_user_agent $loggable { + default 1; + "ELB-HealthChecker/2.0" 0; + ~^Uptime-Kuma 0; + ~^kube-probe 0; + ~GoogleStackdriverMonitoring 0; + } + + access_log /var/log/nginx/access.log main if=$loggable; + + sendfile on; + tcp_nopush on; + keepalive_timeout 65; + types_hash_max_size 4096; + + # increase max from default 1m + client_max_body_size 200m; + + + + include /etc/nginx/mime.types; + default_type application/octet-stream; + + # Load modular configuration files from the /etc/nginx/conf.d directory. + # See http://nginx.org/en/docs/ngx_core_module.html#include + # for more information. + include /etc/nginx/conf.d/*.conf; + + server { + + listen 8080; + server_name localhost; + proxy_read_timeout 400; + proxy_send_timeout 400; + proxy_connect_timeout 400; + + location / { + proxy_pass http://127.0.0.1:8000; + proxy_set_header Host $host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + } + } \ No newline at end of file diff --git a/helm/gen3/templates/tests/service-account.yaml b/helm/gen3/templates/tests/service-account.yaml deleted file mode 100644 index 95b67cfdf..000000000 --- a/helm/gen3/templates/tests/service-account.yaml +++ /dev/null @@ -1,32 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: kubectl-access - namespace: {{ .Release.Namespace }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: kubectl-access-role - namespace: {{ .Release.Namespace }} -rules: - - apiGroups: [""] - resources: ["pods", "pods/exec", "configmaps", "deployments"] - verbs: ["get", "list", "create"] - - apiGroups: ["batch"] - resources: ["cronjobs", "jobs"] - verbs: ["get", "list", "create", "delete", "watch"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: kubectl-access-binding - namespace: {{ .Release.Namespace }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: kubectl-access-role -subjects: - - kind: ServiceAccount - name: kubectl-access - namespace: {{ .Release.Namespace }} diff --git a/helm/guppy/Chart.yaml b/helm/guppy/Chart.yaml index 00a4346c2..3dd680dc0 100644 --- a/helm/guppy/Chart.yaml +++ b/helm/guppy/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.25 +version: 0.1.26 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/guppy/README.md b/helm/guppy/README.md index cde485207..76620b50d 100644 --- a/helm/guppy/README.md +++ b/helm/guppy/README.md @@ -1,6 +1,6 @@ # guppy -![Version: 0.1.25](https://img.shields.io/badge/Version-0.1.25-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) +![Version: 0.1.26](https://img.shields.io/badge/Version-0.1.26-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) A Helm chart for gen3 Guppy Service @@ -73,6 +73,7 @@ A Helm chart for gen3 Guppy Service | indices | list | `[{"index":"dev_case","type":"case"},{"index":"dev_file","type":"file"}]` | Elasticsearch index configurations | | metricsEnabled | bool | `nil` | Whether Metrics are enabled. | | partOf | string | `"Explorer-Tab"` | Label to help organize pods and their use. Any value is valid, but use "_" or "-" to divide words. | +| podSecurityContext | map | `{}` | Security context for the pod | | release | string | `"production"` | Valid options are "production" or "dev". If invalid option is set- the value will default to "dev". | | replicaCount | int | `1` | Number of replicas for the deployment. | | resources | map | `{"limits":{"memory":"2Gi"},"requests":{"memory":"500Mi"}}` | Resource requests and limits for the containers in the pod | @@ -84,8 +85,9 @@ A Helm chart for gen3 Guppy Service | secrets | map | `{"awsAccessKeyId":null,"awsSecretAccessKey":null}` | Secret information to access the db restore job S3 bucket. | | secrets.awsAccessKeyId | str | `nil` | AWS access key ID. Overrides global key. | | secrets.awsSecretAccessKey | str | `nil` | AWS secret access key ID. Overrides global key. | +| securityContext | map | `{}` | Security context for the containers in the pod | | selectorLabels | map | `nil` | Will completely override the selectorLabels defined in the common chart's _label_setup.tpl | -| service | map | `{"port":[{"name":"http","port":80,"protocol":"TCP","targetPort":8000}],"type":"ClusterIP"}` | Kubernetes service information. | +| service | map | `{"port":[{"name":"http","port":80,"protocol":"TCP","targetPort":8000}],"targetPort":8000,"type":"ClusterIP"}` | Kubernetes service information. | | service.port | int | `[{"name":"http","port":80,"protocol":"TCP","targetPort":8000}]` | The port number that the service exposes. | | service.type | string | `"ClusterIP"` | Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". | | strategy | map | `{"rollingUpdate":{"maxSurge":1,"maxUnavailable":0},"type":"RollingUpdate"}` | Rolling update deployment strategy | diff --git a/helm/guppy/templates/deployment.yaml b/helm/guppy/templates/deployment.yaml index a80ae0fb4..f6ba7ef18 100644 --- a/helm/guppy/templates/deployment.yaml +++ b/helm/guppy/templates/deployment.yaml @@ -43,6 +43,8 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} automountServiceAccountToken: {{ .Values.automountServiceAccountToken }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} {{- with .Values.volumes}} volumes: {{- toYaml . | nindent 8}} @@ -50,10 +52,12 @@ spec: containers: - name: {{ .Chart.Name }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} livenessProbe: httpGet: path: /_status - port: 8000 + port: http initialDelaySeconds: 30 periodSeconds: 60 timeoutSeconds: 30 @@ -61,9 +65,10 @@ spec: readinessProbe: httpGet: path: /_status - port: 8000 + port: http ports: - - containerPort: 8000 + - containerPort: {{ .Values.service.targetPort }} + name: http env: - name: GUPPY_PORT value: "8000" diff --git a/helm/guppy/values.yaml b/helm/guppy/values.yaml index 505e38218..746c2002b 100644 --- a/helm/guppy/values.yaml +++ b/helm/guppy/values.yaml @@ -174,6 +174,7 @@ resources: service: # -- (string) Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". type: ClusterIP + targetPort: 8000 # -- (int) The port number that the service exposes. port: - protocol: TCP @@ -211,3 +212,18 @@ partOf: "Explorer-Tab" selectorLabels: # -- (map) Will completely override the commonLabels defined in the common chart's _label_setup.tpl commonLabels: + + +# -- (map) Security context for the pod +podSecurityContext: {} + +# -- (map) Security context for the containers in the pod +securityContext: + {} + + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 diff --git a/helm/hatchery/Chart.yaml b/helm/hatchery/Chart.yaml index 71b6f38de..ef63577b7 100644 --- a/helm/hatchery/Chart.yaml +++ b/helm/hatchery/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.52 +version: 0.1.53 # This is the version number of the application being deployed. This version number should be diff --git a/helm/hatchery/README.md b/helm/hatchery/README.md index 97695daac..6a156902b 100644 --- a/helm/hatchery/README.md +++ b/helm/hatchery/README.md @@ -1,6 +1,6 @@ # hatchery -![Version: 0.1.52](https://img.shields.io/badge/Version-0.1.52-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) +![Version: 0.1.53](https://img.shields.io/badge/Version-0.1.53-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) A Helm chart for gen3 Hatchery @@ -107,7 +107,7 @@ A Helm chart for gen3 Hatchery | resources.requests | map | `{"memory":"12Mi"}` | The amount of resources that the container requests | | resources.requests.memory | string | `"12Mi"` | The amount of memory requested | | selectorLabels | map | `nil` | Will completely override the selectorLabels defined in the common chart's _label_setup.tpl | -| service | map | `{"port":80,"type":"ClusterIP"}` | Kubernetes service information. | +| service | map | `{"port":80,"targetPort":8000,"type":"ClusterIP"}` | Kubernetes service information. | | service.port | int | `80` | The port number that the service exposes. | | service.type | string | `"ClusterIP"` | Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". | | serviceAccount | map | `{"annotations":{},"create":true,"name":"hatchery-sa"}` | Service account to use or create. | diff --git a/helm/hatchery/templates/deployment.yaml b/helm/hatchery/templates/deployment.yaml index 85f67c2af..100e9b293 100644 --- a/helm/hatchery/templates/deployment.yaml +++ b/helm/hatchery/templates/deployment.yaml @@ -53,7 +53,7 @@ spec: imagePullPolicy: {{ .Values.image.pullPolicy }} ports: - name: http - containerPort: 8000 + containerPort: {{ .Values.service.targetPort }} protocol: TCP livenessProbe: httpGet: diff --git a/helm/hatchery/values.yaml b/helm/hatchery/values.yaml index 6f08d6ab4..d62d049e5 100644 --- a/helm/hatchery/values.yaml +++ b/helm/hatchery/values.yaml @@ -138,6 +138,7 @@ service: type: ClusterIP # -- (int) The port number that the service exposes. port: 80 + targetPort: 8000 # -- (map) Resource requests and limits for the containers in the pod resources: diff --git a/helm/indexd/Chart.yaml b/helm/indexd/Chart.yaml index aee6db3ca..46c78c539 100644 --- a/helm/indexd/Chart.yaml +++ b/helm/indexd/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.33 +version: 0.1.34 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/indexd/README.md b/helm/indexd/README.md index 7b023109b..ea318a9e3 100644 --- a/helm/indexd/README.md +++ b/helm/indexd/README.md @@ -1,6 +1,6 @@ # indexd -![Version: 0.1.33](https://img.shields.io/badge/Version-0.1.33-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) +![Version: 0.1.34](https://img.shields.io/badge/Version-0.1.34-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) A Helm chart for gen3 indexd @@ -97,7 +97,7 @@ A Helm chart for gen3 indexd | secrets.awsSecretAccessKey | str | `nil` | AWS secret access key ID to access the db restore job S3 bucket. Overrides global key. | | securityContext | map | `{}` | Security context for the containers in the pod | | selectorLabels | map | `nil` | Will completely override the selectorLabels defined in the common chart's _label_setup.tpl | -| service | map | `{"port":80,"type":"ClusterIP"}` | Kubernetes service information. | +| service | map | `{"port":80,"targetPort":80,"type":"ClusterIP"}` | Kubernetes service information. | | service.port | int | `80` | The port number that the service exposes. | | service.type | string | `"ClusterIP"` | Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". | | serviceAccount | map | `{"annotations":{},"create":false,"name":""}` | Service account to use or create. | diff --git a/helm/indexd/templates/deployment.yaml b/helm/indexd/templates/deployment.yaml index 968ce9b55..7789daa5e 100644 --- a/helm/indexd/templates/deployment.yaml +++ b/helm/indexd/templates/deployment.yaml @@ -92,7 +92,7 @@ spec: {{- end }} ports: - name: http - containerPort: 80 + containerPort: {{ .Values.service.targetPort }} protocol: TCP livenessProbe: httpGet: diff --git a/helm/indexd/templates/pre-install.yaml b/helm/indexd/templates/pre-install.yaml index 8e18baf1c..bbbb7dae6 100644 --- a/helm/indexd/templates/pre-install.yaml +++ b/helm/indexd/templates/pre-install.yaml @@ -15,6 +15,8 @@ spec: app: gen3job spec: automountServiceAccountToken: false + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} {{- with .Values.volumes }} volumes: {{- toYaml . | nindent 8 }} diff --git a/helm/indexd/values.yaml b/helm/indexd/values.yaml index fab646876..79766cce5 100644 --- a/helm/indexd/values.yaml +++ b/helm/indexd/values.yaml @@ -213,6 +213,7 @@ service: type: ClusterIP # -- (int) The port number that the service exposes. port: 80 + targetPort: 80 # -- (map) Resource requests and limits for the containers in the pod resources: diff --git a/helm/manifestservice/Chart.yaml b/helm/manifestservice/Chart.yaml index 901139629..392601b03 100644 --- a/helm/manifestservice/Chart.yaml +++ b/helm/manifestservice/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.32 +version: 0.1.33 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/manifestservice/README.md b/helm/manifestservice/README.md index aa03515f9..35b3cc665 100644 --- a/helm/manifestservice/README.md +++ b/helm/manifestservice/README.md @@ -1,6 +1,6 @@ # manifestservice -![Version: 0.1.32](https://img.shields.io/badge/Version-0.1.32-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) +![Version: 0.1.33](https://img.shields.io/badge/Version-0.1.33-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) A Helm chart for Kubernetes @@ -67,6 +67,7 @@ A Helm chart for Kubernetes | manifestserviceG3auto.prefix | string | `"test"` | Directory name to use within the s3 bucket. | | metricsEnabled | bool | `nil` | Whether Metrics are enabled. | | partOf | string | `"Workspace-tab"` | Label to help organize pods and their use. Any value is valid, but use "_" or "-" to divide words. | +| podSecurityContext | map | `{}` | Security context for the pod | | release | string | `"production"` | Valid options are "production" or "dev". If invalid option is set- the value will default to "dev". | | replicaCount | int | `1` | Number of replicas for the deployment. | | resources | map | `{"limits":{"memory":"512Mi"},"requests":{"memory":"12Mi"}}` | Resource requests and limits for the containers in the pod | @@ -78,8 +79,9 @@ A Helm chart for Kubernetes | secrets | map | `{"awsAccessKeyId":null,"awsSecretAccessKey":null}` | Secret information for External Secrets. | | secrets.awsAccessKeyId | str | `nil` | AWS access key ID. Overrides global key. | | secrets.awsSecretAccessKey | str | `nil` | AWS secret access key ID. Overrides global key. | +| securityContext | map | `{}` | Security context for the containers in the pod | | selectorLabels | map | `nil` | Will completely override the selectorLabels defined in the common chart's _label_setup.tpl | -| service | map | `{"port":80,"type":"ClusterIP"}` | Kubernetes service information. | +| service | map | `{"port":80,"targetPort":80,"type":"ClusterIP"}` | Kubernetes service information. | | service.port | int | `80` | The port number that the service exposes. | | service.type | string | `"ClusterIP"` | Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". | | serviceAccount | map | `{"annotations":{},"create":true,"name":""}` | Service account to use or create. | diff --git a/helm/manifestservice/templates/deployment.yaml b/helm/manifestservice/templates/deployment.yaml index 9c0b0faf3..129d02b80 100644 --- a/helm/manifestservice/templates/deployment.yaml +++ b/helm/manifestservice/templates/deployment.yaml @@ -37,6 +37,8 @@ spec: {{- end }} spec: serviceAccountName: {{ include "manifestservice.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} {{- with .Values.affinity }} affinity: {{- toYaml . | nindent 8 }} @@ -87,15 +89,16 @@ spec: resources: {{- toYaml .Values.resources | nindent 12 }} ports: - - containerPort: 80 + - containerPort: {{ .Values.service.targetPort }} + name: http livenessProbe: httpGet: path: /_status - port: 80 + port: http initialDelaySeconds: 10 periodSeconds: 60 timeoutSeconds: 30 readinessProbe: httpGet: path: /_status - port: 80 + port: http diff --git a/helm/manifestservice/templates/service.yaml b/helm/manifestservice/templates/service.yaml index 173ba48c2..41e3b0e28 100644 --- a/helm/manifestservice/templates/service.yaml +++ b/helm/manifestservice/templates/service.yaml @@ -8,7 +8,7 @@ spec: type: {{ .Values.service.type }} ports: - port: {{ .Values.service.port }} - targetPort: 80 + targetPort: http protocol: TCP name: http selector: diff --git a/helm/manifestservice/values.yaml b/helm/manifestservice/values.yaml index 981fe70d6..f24f49be3 100644 --- a/helm/manifestservice/values.yaml +++ b/helm/manifestservice/values.yaml @@ -99,6 +99,7 @@ service: type: ClusterIP # -- (int) The port number that the service exposes. port: 80 + targetPort: 80 # -- (map) Service account to use or create. serviceAccount: @@ -200,3 +201,19 @@ partOf: "Workspace-tab" selectorLabels: # -- (map) Will completely override the commonLabels defined in the common chart's _label_setup.tpl commonLabels: + + +# -- (map) Security context for the pod +podSecurityContext: + {} + # fsGroup: 2000 + +# -- (map) Security context for the containers in the pod +securityContext: + {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 \ No newline at end of file diff --git a/helm/metadata/Chart.yaml b/helm/metadata/Chart.yaml index 55dd19282..41b4be7de 100644 --- a/helm/metadata/Chart.yaml +++ b/helm/metadata/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.30 +version: 0.1.31 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/metadata/README.md b/helm/metadata/README.md index c631cddbc..7da7f2737 100644 --- a/helm/metadata/README.md +++ b/helm/metadata/README.md @@ -1,6 +1,6 @@ # metadata -![Version: 0.1.30](https://img.shields.io/badge/Version-0.1.30-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) +![Version: 0.1.31](https://img.shields.io/badge/Version-0.1.31-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) A Helm chart for gen3 Metadata Service @@ -79,10 +79,10 @@ A Helm chart for gen3 Metadata Service | global.publicDataSets | bool | `true` | Whether public datasets are enabled. | | global.revproxyArn | string | `"arn:aws:acm:us-east-1:123456:certificate"` | ARN of the reverse proxy certificate. | | global.tierAccessLevel | string | `"libre"` | Access level for tiers. acceptable values for `tier_access_level` are: `libre`, `regular` and `private`. If omitted, by default common will be treated as `private` | -| image | map | `{"pullPolicy":"Always","repository":"quay.io/cdis/metadata-service","tag":"feat_es-7"}` | Docker image information. | +| image | map | `{"pullPolicy":"Always","repository":"quay.io/cdis/metadata-service","tag":"master"}` | Docker image information. | | image.pullPolicy | string | `"Always"` | Docker pull policy. | | image.repository | string | `"quay.io/cdis/metadata-service"` | Docker repository. | -| image.tag | string | `"feat_es-7"` | Overrides the image tag whose default is the chart appVersion. | +| image.tag | string | `"master"` | Overrides the image tag whose default is the chart appVersion. | | initContainerName | string | `"metadata-db-migrate"` | Name of the init container. | | initResources | map | `{"requests":{"memory":"100Mi"}}` | Resource limits for the init container. | | initResources.requests | map | `{"memory":"100Mi"}` | The maximum amount of resources that the container is allowed to use | @@ -112,7 +112,7 @@ A Helm chart for gen3 Metadata Service | secrets.awsAccessKeyId | str | `nil` | AWS access key ID. Overrides global key. | | secrets.awsSecretAccessKey | str | `nil` | AWS secret access key ID. Overrides global key. | | selectorLabels | map | `nil` | Will completely override the selectorLabels defined in the common chart's _label_setup.tpl | -| service | map | `{"port":[{"name":"http","port":80,"protocol":"TCP","targetPort":80}],"type":"ClusterIP"}` | Kubernetes service information. | +| service | map | `{"port":[{"name":"http","port":80,"protocol":"TCP","targetPort":80}],"targetPort":80,"type":"ClusterIP"}` | Kubernetes service information. | | service.port | int | `[{"name":"http","port":80,"protocol":"TCP","targetPort":80}]` | The port number that the service exposes. | | service.type | string | `"ClusterIP"` | Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". | | serviceAnnotations."getambassador.io/config" | string | `"---\napiVersion: ambassador/v1\nambassador_id: \"gen3\"\nkind: Mapping\nname: metadata_mapping\nprefix: /index/\nservice: http://metadata-service:80\n"` | | diff --git a/helm/metadata/templates/deployment.yaml b/helm/metadata/templates/deployment.yaml index d0706daae..8bc95ee1f 100644 --- a/helm/metadata/templates/deployment.yaml +++ b/helm/metadata/templates/deployment.yaml @@ -43,6 +43,8 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} automountServiceAccountToken: {{ .Values.automountServiceAccountToken }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} volumes: - name: config-volume-g3auto secret: @@ -103,16 +105,17 @@ spec: livenessProbe: httpGet: path: /_status - port: 80 + port: http initialDelaySeconds: 30 periodSeconds: 60 timeoutSeconds: 30 readinessProbe: httpGet: path: /_status - port: 80 + port: http ports: - - containerPort: 80 + - containerPort: {{ .Values.service.targetPort }} + name: http {{- with .Values.volumeMounts }} volumeMounts: {{- toYaml . | nindent 10 }} diff --git a/helm/metadata/templates/secrets.yaml b/helm/metadata/templates/secrets.yaml index 0bd639d73..e69de29bb 100644 --- a/helm/metadata/templates/secrets.yaml +++ b/helm/metadata/templates/secrets.yaml @@ -1,16 +0,0 @@ -{{- if or (not .Values.global.externalSecrets.deploy) (and .Values.global.externalSecrets.deploy .Values.externalSecrets.createK8sMetadataSecret) }} -apiVersion: v1 -kind: Secret -metadata: - name: metadata-g3auto -stringData: - {{- $randomPass := printf "%s%s" "gateway:" (randAlphaNum 32) }} - base64Authz.txt: {{ $randomPass | quote | b64enc }} - metadata.env: | - DEBUG={{ .Values.debug}} - DB_HOST={{ .Values.postgres.host }} - DB_USER={{ .Values.postgres.user }} - DB_PASSWORD={{ include "metadata.postgres.password" . }} - DB_DATABASE={{ .Values.postgres.dbname }} - ADMIN_LOGINS={{ $randomPass }} -{{- end }} \ No newline at end of file diff --git a/helm/metadata/values.yaml b/helm/metadata/values.yaml index 3db3bae4e..b00cbb33a 100644 --- a/helm/metadata/values.yaml +++ b/helm/metadata/values.yaml @@ -169,7 +169,7 @@ image: # -- (string) Docker pull policy. pullPolicy: Always # -- (string) Overrides the image tag whose default is the chart appVersion. - tag: "feat_es-7" + tag: "master" debug: false @@ -323,6 +323,7 @@ serviceAnnotations: service: # -- (string) Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". type: ClusterIP + targetPort: 80 # -- (int) The port number that the service exposes. port: - protocol: TCP diff --git a/helm/ohif-viewer/Chart.yaml b/helm/ohif-viewer/Chart.yaml index 6ea132776..d3f04be86 100644 --- a/helm/ohif-viewer/Chart.yaml +++ b/helm/ohif-viewer/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.3 +version: 0.1.4 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/ohif-viewer/README.md b/helm/ohif-viewer/README.md index 85e308582..610e68f0a 100644 --- a/helm/ohif-viewer/README.md +++ b/helm/ohif-viewer/README.md @@ -1,6 +1,6 @@ # ohif-viewer -![Version: 0.1.3](https://img.shields.io/badge/Version-0.1.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) +![Version: 0.1.4](https://img.shields.io/badge/Version-0.1.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) A Helm chart for gen3 Ohif Viewer diff --git a/helm/ohif-viewer/templates/deployment.yaml b/helm/ohif-viewer/templates/deployment.yaml index 5b1452fde..73b119977 100644 --- a/helm/ohif-viewer/templates/deployment.yaml +++ b/helm/ohif-viewer/templates/deployment.yaml @@ -47,19 +47,20 @@ spec: readinessProbe: httpGet: path: / - port: 8080 + port: http initialDelaySeconds: 5 periodSeconds: 20 timeoutSeconds: 30 livenessProbe: httpGet: path: / - port: 8080 + port: http initialDelaySeconds: 5 periodSeconds: 60 timeoutSeconds: 30 ports: - - containerPort: 8080 + - containerPort: {{ .Values.service.targetPort }} + name: http env: - name: PORT value: "8080" diff --git a/helm/orthanc/Chart.yaml b/helm/orthanc/Chart.yaml index ec4f4ac2b..46be7e3d2 100644 --- a/helm/orthanc/Chart.yaml +++ b/helm/orthanc/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.4 +version: 0.1.5 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/orthanc/README.md b/helm/orthanc/README.md index c850f3afb..cf0472b5c 100644 --- a/helm/orthanc/README.md +++ b/helm/orthanc/README.md @@ -1,6 +1,6 @@ # orthanc -![Version: 0.1.4](https://img.shields.io/badge/Version-0.1.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) +![Version: 0.1.5](https://img.shields.io/badge/Version-0.1.5-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) A Helm chart for gen3 Dicom Server diff --git a/helm/orthanc/templates/deployment.yaml b/helm/orthanc/templates/deployment.yaml index 492706a2d..f70575a97 100644 --- a/helm/orthanc/templates/deployment.yaml +++ b/helm/orthanc/templates/deployment.yaml @@ -67,7 +67,8 @@ spec: periodSeconds: 60 timeoutSeconds: 30 ports: - - containerPort: 8042 + - containerPort: {{ .Values.service.targetPort }} + name: http env: - name: PGHOST valueFrom: diff --git a/helm/peregrine/Chart.yaml b/helm/peregrine/Chart.yaml index 26d6a504c..a6a8a8797 100644 --- a/helm/peregrine/Chart.yaml +++ b/helm/peregrine/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.31 +version: 0.1.32 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/peregrine/README.md b/helm/peregrine/README.md index 742375e3b..64d8d8319 100644 --- a/helm/peregrine/README.md +++ b/helm/peregrine/README.md @@ -1,6 +1,6 @@ # peregrine -![Version: 0.1.31](https://img.shields.io/badge/Version-0.1.31-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) +![Version: 0.1.32](https://img.shields.io/badge/Version-0.1.32-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) A Helm chart for gen3 Peregrine service @@ -92,7 +92,7 @@ A Helm chart for gen3 Peregrine service | secrets.awsSecretAccessKey | str | `nil` | AWS secret access key ID. Overrides global key. | | securityContext | map | `{}` | Security context for the containers in the pod | | selectorLabels | map | `nil` | Will completely override the selectorLabels defined in the common chart's _label_setup.tpl | -| service | map | `{"port":80,"type":"ClusterIP"}` | Kubernetes service information. | +| service | map | `{"port":80,"targetPort":80,"type":"ClusterIP"}` | Kubernetes service information. | | service.port | int | `80` | The port number that the service exposes. | | service.type | string | `"ClusterIP"` | Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". | | serviceAccount | map | `{"annotations":{},"create":true,"name":""}` | Service account to use or create. | diff --git a/helm/peregrine/templates/deployment.yaml b/helm/peregrine/templates/deployment.yaml index 512a93eb9..dfae900d5 100644 --- a/helm/peregrine/templates/deployment.yaml +++ b/helm/peregrine/templates/deployment.yaml @@ -159,7 +159,7 @@ spec: {{- end }} ports: - name: http - containerPort: 80 + containerPort: {{ .Values.service.targetPort }} protocol: TCP livenessProbe: httpGet: diff --git a/helm/peregrine/values.yaml b/helm/peregrine/values.yaml index bd1a155dd..8aa604c57 100644 --- a/helm/peregrine/values.yaml +++ b/helm/peregrine/values.yaml @@ -173,6 +173,7 @@ service: type: ClusterIP # -- (int) The port number that the service exposes. port: 80 + targetPort: 80 # -- (map) Configuration for network policies created by this chart. Only relevant if "global.netPolicy.enabled" is set to true netPolicy: diff --git a/helm/portal/Chart.yaml b/helm/portal/Chart.yaml index a19d55762..5d3b54f2a 100644 --- a/helm/portal/Chart.yaml +++ b/helm/portal/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.45 +version: 0.1.46 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/portal/README.md b/helm/portal/README.md index e41465d81..a435dcf10 100644 --- a/helm/portal/README.md +++ b/helm/portal/README.md @@ -1,6 +1,6 @@ # portal -![Version: 0.1.45](https://img.shields.io/badge/Version-0.1.45-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) +![Version: 0.1.46](https://img.shields.io/badge/Version-0.1.46-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) A Helm chart for gen3 data-portal @@ -96,7 +96,7 @@ A Helm chart for gen3 data-portal | revisionHistoryLimit | int | `2` | Number of old revisions to retain | | securityContext | map | `{}` | Security context to apply to the container | | selectorLabels | map | `nil` | Will completely override the selectorLabels defined in the common chart's _label_setup.tpl | -| service | map | `{"port":80,"type":"ClusterIP"}` | Kubernetes service information. | +| service | map | `{"port":80,"targetPort":80,"type":"ClusterIP"}` | Kubernetes service information. | | service.port | int | `80` | The port number that the service exposes. | | service.type | string | `"ClusterIP"` | Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". | | serviceAccount | map | `{"annotations":{},"create":true,"name":""}` | Service account to use or create. | diff --git a/helm/portal/templates/deployment.yaml b/helm/portal/templates/deployment.yaml index 9b26eed7e..a21add5f4 100644 --- a/helm/portal/templates/deployment.yaml +++ b/helm/portal/templates/deployment.yaml @@ -39,6 +39,8 @@ spec: {{- include "common.extraLabels" . | nindent 8 }} spec: serviceAccountName: {{ include "portal.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} {{- with .Values.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} @@ -64,6 +66,9 @@ spec: name: "privacy-policy" optional: true {{- end }} + - name: nginx-config + configMap: + name: portal-nginx - name: extra-images-config configMap: name: portal-extra-images @@ -130,6 +135,8 @@ spec: containers: - name: portal image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} imagePullPolicy: {{ .Values.image.pullPolicy }} # livenessProbe: # httpGet: @@ -146,14 +153,15 @@ spec: {{- else }} path: / {{- end }} - port: 80 + port: http initialDelaySeconds: 5 periodSeconds: 10 timeoutSeconds: 30 resources: {{- toYaml .Values.resources | nindent 12 }} ports: - - containerPort: 80 + - containerPort: {{ .Values.service.targetPort }} + name: http - containerPort: 443 # command: # - /bin/bash @@ -260,6 +268,12 @@ spec: - name: "config-volume" mountPath: "/data-portal/data/config/gitops.json" subPath: "gitops.json" + - name: "nginx-config" + mountPath: "/etc/nginx/conf.d/nginx.conf" + subPath: "nginx.conf" + - name: "nginx-config" + mountPath: "/etc/nginx/nginx.conf" + subPath: "main" - name: "config-volume" mountPath: "/data-portal/custom/logo/gitops-logo.png" subPath: "gitops-logo.png" diff --git a/helm/portal/templates/job.yaml b/helm/portal/templates/job.yaml index bd78d8005..2af65deb9 100644 --- a/helm/portal/templates/job.yaml +++ b/helm/portal/templates/job.yaml @@ -102,14 +102,15 @@ spec: {{- else }} path: / {{- end }} - port: 80 + port: http initialDelaySeconds: 5 periodSeconds: 10 timeoutSeconds: 30 resources: {{- toYaml .Values.resources | nindent 12 }} ports: - - containerPort: 80 + - containerPort: {{ .Values.service.targetPort }} + name: http - containerPort: 443 command: - /bin/bash diff --git a/helm/portal/templates/nginx-conf.yaml b/helm/portal/templates/nginx-conf.yaml new file mode 100644 index 000000000..143272c5b --- /dev/null +++ b/helm/portal/templates/nginx-conf.yaml @@ -0,0 +1,180 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: portal-nginx +data: + main: | + # For more information on configuration, see: + # * Official English Documentation: http://nginx.org/en/docs/ + # * Official Russian Documentation: http://nginx.org/ru/docs/ + + user nginx; + worker_processes auto; + error_log /var/log/nginx/error.log notice; + pid /run/nginx.pid; + + # Load dynamic modules. See /usr/share/doc/nginx/README.dynamic. + include /usr/share/nginx/modules/*.conf; + + events { + worker_connections 1024; + } + + http { + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + access_log /var/log/nginx/access.log main; + + sendfile on; + tcp_nopush on; + keepalive_timeout 65; + types_hash_max_size 4096; + + include /etc/nginx/mime.types; + default_type application/octet-stream; + + # Load modular configuration files from the /etc/nginx/conf.d directory. + # See http://nginx.org/en/docs/ngx_core_module.html#include + # for more information. + include /etc/nginx/conf.d/*.conf; + + server { + listen 8000; + listen [::]:8000; + server_name _; + root /usr/share/nginx/html; + + # Load configuration files for the default server block. + include /etc/nginx/default.d/*.conf; + + error_page 404 /404.html; + location = /404.html { + } + + error_page 500 502 503 504 /50x.html; + location = /50x.html { + } + } + + # Settings for a TLS enabled server. + # + # server { + # listen 443 ssl; + # listen [::]:443 ssl; + # http2 on; + # server_name _; + # root /usr/share/nginx/html; + # + # ssl_certificate "/etc/pki/nginx/server.crt"; + # ssl_certificate_key "/etc/pki/nginx/private/server.key"; + # ssl_session_cache shared:SSL:1m; + # ssl_session_timeout 10m; + # ssl_ciphers PROFILE=SYSTEM; + # ssl_prefer_server_ciphers on; + # + # # Load configuration files for the default server block. + # include /etc/nginx/default.d/*.conf; + # + # error_page 404 /404.html; + # location = /404.html { + # } + # + # error_page 500 502 503 504 /50x.html; + # location = /50x.html { + # } + # } + } + nginx.conf: | + ## + # Note that this file actually winds up at + # /etc/nginx/conf.d/nginx.conf + # , and is loaded by /etc/nginx/nginx.conf in an http{} block + ## + + ## + # Logging Settings + # The http_x_* headers are set by the gen3 reverse proxy: + # kube/services/revproxy/ + ## + log_format json '{"gen3log": "nginx", ' + '"date_access": "$time_iso8601", ' + '"user_id": "$http_x_userid", ' + '"request_id": "$http_x_reqid", ' + '"session_id": "$http_x_sessionid", ' + '"visitor_id": "$http_x_visitorid", ' + '"network_client_ip": "$http_x_forwarded_for", ' + '"network_bytes_write": $body_bytes_sent, ' + '"http_response_time": "$request_time", ' + '"http_status_code": $status, ' + '"http_request": "$request_uri", ' + '"http_verb": "$request_method", ' + '"http_referer": "$http_referer", ' + '"http_useragent": "$http_user_agent", ' + '"message": "$request"}'; + + log_format aws '$http_x_forwarded_for - $http_x_userid [$time_local] ' + '"$request" $status $body_bytes_sent ' + '"$http_referer" "$http_user_agent"'; + + access_log /dev/stdout json; + + server { + listen 8080 default_server; + ssl_certificate /mnt/ssl/nginx.crt; + ssl_certificate_key /mnt/ssl/nginx.key; + server_tokens off; + + root /data-portal; + index index.html index.htm; + + # dev.html signals dev mode - for developer testing + rewrite ^(\/\w+)?\/dev.html.+$ $1/dev.html; + + # Block all access to things like .git or .htaccess + location ~ /\. { + deny all; + } + + # Block all access to package and config files + # Note if WAF is deployed this should already be handled by WAF + location ~ package.json$ { + deny all; + } + location ~ package-lock.json$ { + deny all; + } + location ^~ /npm-debug.log { + deny all; + } + location ^~ /tsconfig.json { + deny all; + } + location ^~ /webpack.config.js { + deny all; + } + location ^~ /yarn.lock { + deny all; + } + location ^~ /nginx.conf { + deny all; + } + + location ~* \.(?:manifest|appcache|html?|xml|json)$ { + expires -1; + # access_log logs/static.log; # I don't usually include a static log + } + + location ~* \.(?:css|js)$ { + try_files $uri =404; + expires 1y; + access_log off; + add_header Cache-Control "public"; + } + + # Any route that doesn't have a file extension (e.g. /devices) + location / { + try_files $uri /index.html; + } + } \ No newline at end of file diff --git a/helm/portal/templates/service.yaml b/helm/portal/templates/service.yaml index 971503f49..182b26638 100644 --- a/helm/portal/templates/service.yaml +++ b/helm/portal/templates/service.yaml @@ -8,7 +8,7 @@ spec: type: {{ .Values.service.type }} ports: - port: {{ .Values.service.port }} - targetPort: 80 + targetPort: http protocol: TCP name: http selector: diff --git a/helm/portal/values.yaml b/helm/portal/values.yaml index 575ae2080..796870808 100644 --- a/helm/portal/values.yaml +++ b/helm/portal/values.yaml @@ -144,6 +144,7 @@ service: type: ClusterIP # -- (int) The port number that the service exposes. port: 80 + targetPort: 80 # -- (map) Node selector to apply to the pod nodeSelector: {} diff --git a/helm/requestor/Chart.yaml b/helm/requestor/Chart.yaml index 2bd172287..83346cf21 100644 --- a/helm/requestor/Chart.yaml +++ b/helm/requestor/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.24 +version: 0.1.25 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/requestor/README.md b/helm/requestor/README.md index ea206d03a..e20b4c33b 100644 --- a/helm/requestor/README.md +++ b/helm/requestor/README.md @@ -1,6 +1,6 @@ # requestor -![Version: 0.1.24](https://img.shields.io/badge/Version-0.1.24-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) +![Version: 0.1.25](https://img.shields.io/badge/Version-0.1.25-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) A Helm chart for gen3 Requestor Service @@ -104,7 +104,7 @@ A Helm chart for gen3 Requestor Service | secrets.awsAccessKeyId | str | `nil` | AWS access key ID. Overrides global key. | | secrets.awsSecretAccessKey | str | `nil` | AWS secret access key ID. Overrides global key. | | selectorLabels | map | `nil` | Will completely override the selectorLabels defined in the common chart's _label_setup.tpl | -| service | map | `{"port":[{"name":"http","port":80,"protocol":"TCP","targetPort":80}],"type":"ClusterIP"}` | Kubernetes service information. | +| service | map | `{"port":[{"name":"http","port":80,"protocol":"TCP","targetPort":80}],"targetPort":80,"type":"ClusterIP"}` | Kubernetes service information. | | service.port | int | `[{"name":"http","port":80,"protocol":"TCP","targetPort":80}]` | The port number that the service exposes. | | service.type | string | `"ClusterIP"` | Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". | | strategy | map | `{"rollingUpdate":{"maxSurge":1,"maxUnavailable":0},"type":"RollingUpdate"}` | Rolling update deployment strategy | diff --git a/helm/requestor/templates/deployment.yaml b/helm/requestor/templates/deployment.yaml index 18e9d0b02..8f079cda8 100644 --- a/helm/requestor/templates/deployment.yaml +++ b/helm/requestor/templates/deployment.yaml @@ -93,16 +93,17 @@ spec: livenessProbe: httpGet: path: /_status - port: 80 + port: http initialDelaySeconds: 30 periodSeconds: 60 timeoutSeconds: 30 readinessProbe: httpGet: path: /_status - port: 80 + port: http ports: - - containerPort: 80 + - containerPort: {{ .Values.service.targetPort }} + name: http {{- with .Values.volumeMounts }} volumeMounts: {{- toYaml . | nindent 10 }} diff --git a/helm/requestor/values.yaml b/helm/requestor/values.yaml index 7b196c724..2187029c8 100644 --- a/helm/requestor/values.yaml +++ b/helm/requestor/values.yaml @@ -230,6 +230,7 @@ args: service: # -- (string) Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". type: ClusterIP + targetPort: 80 # -- (int) The port number that the service exposes. port: - protocol: TCP diff --git a/helm/revproxy/Chart.yaml b/helm/revproxy/Chart.yaml index 43f6cc588..baf4fe07b 100644 --- a/helm/revproxy/Chart.yaml +++ b/helm/revproxy/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.43 +version: 0.1.44 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/revproxy/README.md b/helm/revproxy/README.md index 8d8279a1a..660b0686c 100644 --- a/helm/revproxy/README.md +++ b/helm/revproxy/README.md @@ -1,6 +1,6 @@ # revproxy -![Version: 0.1.43](https://img.shields.io/badge/Version-0.1.43-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) +![Version: 0.1.44](https://img.shields.io/badge/Version-0.1.44-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) A Helm chart for gen3 revproxy @@ -73,6 +73,8 @@ A Helm chart for gen3 revproxy | netPolicy | map | `{"egressApps":["portal","sowerjob"],"ingressApps":["portal","sowerjob"]}` | Configuration for network policies created by this chart. Only relevant if "global.netPolicy.enabled" is set to true | | netPolicy.egressApps | array | `["portal","sowerjob"]` | List of apps that this app requires egress to | | netPolicy.ingressApps | array | `["portal","sowerjob"]` | List of app labels that require ingress to this service | +| nginx.resolver | string | `"kube-dns.kube-system.svc.cluster.local"` | | +| nginx.user | string | `"nginx"` | | | nodeSelector | map | `{}` | Node selector labels. | | partOf | string | `"Front-End"` | Label to help organize pods and their use. Any value is valid, but use "_" or "-" to divide words. | | podAnnotations | map | `{}` | Annotations to add to the pod. | @@ -95,7 +97,7 @@ A Helm chart for gen3 revproxy | revproxyElb | map | `{"gen3SecretsFolder":"Gen3Secrets","sslCert":"","targetPortHTTP":80,"targetPortHTTPS":443}` | Configuration for depricated revproxy service ELB. | | securityContext | map | `{}` | Container-level security context. | | selectorLabels | map | `nil` | Will completely override the selectorLabels defined in the common chart's _label_setup.tpl | -| service | map | `{"port":80,"type":"NodePort"}` | Kubernetes service information. | +| service | map | `{"port":80,"targetPort":80,"type":"NodePort"}` | Kubernetes service information. | | service.port | int | `80` | The port number that the service exposes. | | service.type | string | `"NodePort"` | Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". | | serviceAccount | map | `{"annotations":{},"create":true,"name":""}` | Service account to use or create. | diff --git a/helm/revproxy/nginx/nginx.conf b/helm/revproxy/nginx/nginx.conf index c38743d93..61a798cd7 100644 --- a/helm/revproxy/nginx/nginx.conf +++ b/helm/revproxy/nginx/nginx.conf @@ -1,4 +1,4 @@ -user nginx; +user {{ .Values.nginx.user }}; worker_processes 4; pid /var/run/nginx.pid; @@ -38,6 +38,13 @@ http { port_in_redirect off; server_tokens off; + client_body_temp_path /tmp/client_temp; + proxy_temp_path /tmp/proxy_temp_path; + fastcgi_temp_path /tmp/fastcgi_temp; + uwsgi_temp_path /tmp/uwsgi_temp; + scgi_temp_path /tmp/scgi_temp; + + # For websockets map $http_upgrade $connection_upgrade { default upgrade; @@ -214,7 +221,7 @@ map $http_user_agent $loggable { # see https://www.nginx.com/blog/dns-service-discovery-nginx-plus/ # https://distinctplace.com/2017/04/19/nginx-resolver-explained/ # - resolver kube-dns.kube-system.svc.cluster.local ipv6=off; + resolver {{ .Values.nginx.resolver }} ipv6=off; set $access_token ""; set $csrf_check "ok-tokenauth"; diff --git a/helm/revproxy/templates/configMaps.yaml b/helm/revproxy/templates/configMaps.yaml index eb0d5655e..ff7b802bb 100644 --- a/helm/revproxy/templates/configMaps.yaml +++ b/helm/revproxy/templates/configMaps.yaml @@ -40,5 +40,5 @@ metadata: data: {{- range $path, $bytes := .Files.Glob "nginx/*" }} {{ ($a := split "/" $path)._1 }}: | - {{- $bytes | toString | nindent 4 }} + {{- tpl ($bytes | toString) $ | nindent 4 }} {{- end}} diff --git a/helm/revproxy/templates/deployment.yaml b/helm/revproxy/templates/deployment.yaml index e5a50b5c9..95b765b71 100644 --- a/helm/revproxy/templates/deployment.yaml +++ b/helm/revproxy/templates/deployment.yaml @@ -57,6 +57,8 @@ spec: topologyKey: "kubernetes.io/hostname" automountServiceAccountToken: false volumes: + - emptyDir: {} + name: nginx-logs - name: revproxy-conf configMap: name: revproxy-nginx-conf @@ -74,19 +76,20 @@ spec: image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} ports: - - containerPort: 80 + - containerPort: {{ .Values.service.targetPort }} + name: http - containerPort: 443 - containerPort: 6567 livenessProbe: httpGet: path: /_status - port: 80 + port: http initialDelaySeconds: 5 periodSeconds: 3000 readinessProbe: httpGet: path: /_status - port: 80 + port: http resources: {{- toYaml .Values.resources | nindent 12 }} env: @@ -123,6 +126,8 @@ spec: key: base64Authz.txt optional: true volumeMounts: + - mountPath: /var/log/nginx + name: nginx-logs - name: "revproxy-conf" readOnly: true mountPath: "/etc/nginx/nginx.conf" diff --git a/helm/revproxy/templates/service.yaml b/helm/revproxy/templates/service.yaml index c752de6b8..71878a5e7 100644 --- a/helm/revproxy/templates/service.yaml +++ b/helm/revproxy/templates/service.yaml @@ -8,7 +8,7 @@ spec: type: {{ .Values.service.type }} ports: - port: {{ .Values.service.port }} - targetPort: 80 + targetPort: http protocol: TCP name: http selector: diff --git a/helm/revproxy/values.yaml b/helm/revproxy/values.yaml index 5434c4a42..e485ec4ab 100644 --- a/helm/revproxy/values.yaml +++ b/helm/revproxy/values.yaml @@ -154,6 +154,7 @@ securityContext: service: # -- (string) Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". type: NodePort + targetPort: 80 # -- (int) The port number that the service exposes. port: 80 @@ -254,3 +255,7 @@ extraServices: # - name: "protein-paint" # path: /protein-paint # serviceName: protein-paint + +nginx: + user: nginx + resolver: kube-dns.kube-system.svc.cluster.local \ No newline at end of file diff --git a/helm/sheepdog/Chart.yaml b/helm/sheepdog/Chart.yaml index 3edb55d31..6a759b52a 100644 --- a/helm/sheepdog/Chart.yaml +++ b/helm/sheepdog/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.29 +version: 0.1.30 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/sheepdog/README.md b/helm/sheepdog/README.md index b16a9da11..45ace6fdb 100644 --- a/helm/sheepdog/README.md +++ b/helm/sheepdog/README.md @@ -1,6 +1,6 @@ # sheepdog -![Version: 0.1.29](https://img.shields.io/badge/Version-0.1.29-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) +![Version: 0.1.30](https://img.shields.io/badge/Version-0.1.30-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) A Helm chart for gen3 Sheepdog Service @@ -65,13 +65,14 @@ A Helm chart for gen3 Sheepdog Service | global.publicDataSets | bool | `true` | Whether public datasets are enabled. | | global.revproxyArn | string | `"arn:aws:acm:us-east-1:123456:certificate"` | ARN of the reverse proxy certificate. | | global.tierAccessLevel | string | `"libre"` | Access level for tiers. acceptable values for `tier_access_level` are: `libre`, `regular` and `private`. If omitted, by default common will be treated as `private` | -| image | map | `{"pullPolicy":"Always","repository":"quay.io/cdis/sheepdog","tag":"bug_auth-audience"}` | Docker image information. | +| image | map | `{"pullPolicy":"Always","repository":"quay.io/cdis/sheepdog","tag":"master"}` | Docker image information. | | image.pullPolicy | string | `"Always"` | Docker pull policy. | | image.repository | string | `"quay.io/cdis/sheepdog"` | Docker repository. | -| image.tag | string | `"bug_auth-audience"` | Overrides the image tag whose default is the chart appVersion. | +| image.tag | string | `"master"` | Overrides the image tag whose default is the chart appVersion. | | metricsEnabled | bool | `nil` | Whether Metrics are enabled. | | partOf | string | `"Core-Service"` | Label to help organize pods and their use. Any value is valid, but use "_" or "-" to divide words. | | podAnnotations | map | `{"gen3.io/network-ingress":"sheepdog"}` | Annotations to add to the pod | +| podSecurityContext | map | `{}` | Security context for the pod | | postgres | map | `{"database":null,"dbCreate":null,"dbRestore":false,"host":null,"password":null,"port":"5432","separate":false,"username":null}` | Postgres database configuration. If db does not exist in postgres cluster and dbCreate is set ot true then these databases will be created for you | | postgres.database | string | `nil` | Database name for postgres. This is a service override, defaults to - | | postgres.dbCreate | bool | `nil` | Whether the database should be created. Default to global.postgres.dbCreate | @@ -94,8 +95,9 @@ A Helm chart for gen3 Sheepdog Service | secrets | map | `{"awsAccessKeyId":null,"awsSecretAccessKey":null}` | Values for sheepdog secret. | | secrets.awsAccessKeyId | str | `nil` | AWS access key ID to access the db restore job S3 bucket. Overrides global key. | | secrets.awsSecretAccessKey | str | `nil` | AWS secret access key ID to access the db restore job S3 bucket. Overrides global key. | +| securityContext | map | `{}` | Security context for the containers in the pod | | selectorLabels | map | `nil` | Will completely override the selectorLabels defined in the common chart's _label_setup.tpl | -| service | map | `{"port":80,"type":"ClusterIP"}` | Kubernetes service information. | +| service | map | `{"port":80,"targetPort":80,"type":"ClusterIP"}` | Kubernetes service information. | | service.port | int | `80` | The port number that the service exposes. | | service.type | string | `"ClusterIP"` | Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". | | strategy | map | `{"rollingUpdate":{"maxSurge":1,"maxUnavailable":0},"type":"RollingUpdate"}` | Rolling update deployment strategy | diff --git a/helm/sheepdog/templates/deployment.yaml b/helm/sheepdog/templates/deployment.yaml index 2ec3ab2df..74d98408b 100644 --- a/helm/sheepdog/templates/deployment.yaml +++ b/helm/sheepdog/templates/deployment.yaml @@ -48,6 +48,8 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} automountServiceAccountToken: {{ .Values.automountServiceAccountToken }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} volumes: - name: config-volume secret: @@ -119,12 +121,13 @@ spec: image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} ports: - - containerPort: 80 + - containerPort: {{ .Values.service.targetPort }} + name: http - containerPort: 443 livenessProbe: httpGet: path: /_status?timeout=20 - port: 80 + port: http initialDelaySeconds: 30 periodSeconds: 60 timeoutSeconds: 30 @@ -132,7 +135,7 @@ spec: initialDelaySeconds: 30 httpGet: path: /_status?timeout=2 - port: 80 + port: http # command: ["/bin/bash" ] # args: # - "-c" diff --git a/helm/sheepdog/templates/service.yaml b/helm/sheepdog/templates/service.yaml index eff84f425..accebdecc 100644 --- a/helm/sheepdog/templates/service.yaml +++ b/helm/sheepdog/templates/service.yaml @@ -8,7 +8,7 @@ spec: type: {{ .Values.service.type }} ports: - port: {{ .Values.service.port }} - targetPort: {{ .Values.service.port }} + targetPort: {{ .Values.service.targetPort }} protocol: TCP name: http selector: diff --git a/helm/sheepdog/values.yaml b/helm/sheepdog/values.yaml index 65a24e738..0e6eb9b55 100644 --- a/helm/sheepdog/values.yaml +++ b/helm/sheepdog/values.yaml @@ -173,7 +173,7 @@ image: # -- (string) Docker pull policy. pullPolicy: Always # -- (string) Overrides the image tag whose default is the chart appVersion. - tag: "bug_auth-audience" + tag: "master" # Environment Variables authNamespace: "" @@ -205,6 +205,7 @@ resources: service: # -- (string) Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". type: ClusterIP + targetPort: 80 # -- (int) The port number that the service exposes. port: 80 @@ -227,3 +228,19 @@ partOf: "Core-Service" selectorLabels: # -- (map) Will completely override the commonLabels defined in the common chart's _label_setup.tpl commonLabels: + + +# -- (map) Security context for the pod +podSecurityContext: + {} + # fsGroup: 2000 + +# -- (map) Security context for the containers in the pod +securityContext: + {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 \ No newline at end of file diff --git a/helm/sower/Chart.yaml b/helm/sower/Chart.yaml index 3a0826bb8..9794929cf 100644 --- a/helm/sower/Chart.yaml +++ b/helm/sower/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.34 +version: 0.1.35 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/sower/README.md b/helm/sower/README.md index bfb84ea0c..c4ada911a 100644 --- a/helm/sower/README.md +++ b/helm/sower/README.md @@ -1,6 +1,6 @@ # sower -![Version: 0.1.34](https://img.shields.io/badge/Version-0.1.34-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) +![Version: 0.1.35](https://img.shields.io/badge/Version-0.1.35-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) A Helm chart for gen3 sower @@ -103,7 +103,7 @@ A Helm chart for gen3 sower | secrets.awsSecretAccessKey | str | `nil` | AWS access key ID. Overrides global key. | | securityContext | map | `{}` | Security context for the containers in the pod | | selectorLabels | map | `nil` | Will completely override the selectorLabels defined in the common chart's _label_setup.tpl | -| service | map | `{"port":80,"type":"ClusterIP"}` | Kubernetes service information. | +| service | map | `{"port":80,"targetPort":8000,"type":"ClusterIP"}` | Kubernetes service information. | | service.port | int | `80` | The port number that the service exposes. | | service.type | string | `"ClusterIP"` | Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". | | serviceAccount | map | `{"annotations":{},"create":true,"name":"sower-service-account"}` | Service account to use or create. | diff --git a/helm/sower/templates/deployment.yaml b/helm/sower/templates/deployment.yaml index 5d7f6f059..e94da3071 100644 --- a/helm/sower/templates/deployment.yaml +++ b/helm/sower/templates/deployment.yaml @@ -61,19 +61,19 @@ spec: value: {{ .Values.global.hostname }} ports: - name: http - containerPort: 8000 + containerPort: {{ .Values.service.targetPort }} protocol: TCP livenessProbe: httpGet: path: /_status - port: 8000 + port: http initialDelaySeconds: 5 periodSeconds: 60 timeoutSeconds: 30 readinessProbe: httpGet: path: /_status - port: 8000 + port: http resources: {{- toYaml .Values.resources | nindent 12 }} {{- with .Values.nodeSelector }} diff --git a/helm/sower/values.yaml b/helm/sower/values.yaml index 346bb5d69..d11d666ad 100644 --- a/helm/sower/values.yaml +++ b/helm/sower/values.yaml @@ -148,6 +148,7 @@ securityContext: service: # -- (string) Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". type: ClusterIP + targetPort: 8000 # -- (int) The port number that the service exposes. port: 80 diff --git a/helm/ssjdispatcher/Chart.yaml b/helm/ssjdispatcher/Chart.yaml index 2d6a4dd77..585a09300 100644 --- a/helm/ssjdispatcher/Chart.yaml +++ b/helm/ssjdispatcher/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.31 +version: 0.1.32 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/ssjdispatcher/README.md b/helm/ssjdispatcher/README.md index 2ddd1feb7..1e4b9c8f9 100644 --- a/helm/ssjdispatcher/README.md +++ b/helm/ssjdispatcher/README.md @@ -1,6 +1,6 @@ # ssjdispatcher -![Version: 0.1.31](https://img.shields.io/badge/Version-0.1.31-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) +![Version: 0.1.32](https://img.shields.io/badge/Version-0.1.32-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) A Helm chart for gen3 ssjdispatcher @@ -97,7 +97,7 @@ A Helm chart for gen3 ssjdispatcher | resources.requests.memory | string | `"128Mi"` | The amount of memory requested | | securityContext | map | `{}` | Security context for the containers in the pod | | selectorLabels | map | `nil` | Will completely override the selectorLabels defined in the common chart's _label_setup.tpl | -| service | map | `{"port":80,"type":"ClusterIP"}` | Kubernetes service information. | +| service | map | `{"port":80,"targetPort":8000,"type":"ClusterIP"}` | Kubernetes service information. | | service.port | int | `80` | The port number that the service exposes. | | service.type | string | `"ClusterIP"` | Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". | | serviceAccount | map | `{"annotations":{},"create":true,"name":"ssjdispatcher-sa"}` | Service account to use or create. | diff --git a/helm/ssjdispatcher/templates/deployment.yaml b/helm/ssjdispatcher/templates/deployment.yaml index 85305e5f0..8baa9e64e 100644 --- a/helm/ssjdispatcher/templates/deployment.yaml +++ b/helm/ssjdispatcher/templates/deployment.yaml @@ -69,19 +69,19 @@ spec: key: job_images ports: - name: http - containerPort: 8000 + containerPort: {{ .Values.service.targetPort }} protocol: TCP livenessProbe: httpGet: path: /_status - port: 8000 + port: http initialDelaySeconds: 5 periodSeconds: 60 timeoutSeconds: 30 readinessProbe: httpGet: path: /_status - port: 8000 + port: http resources: {{- toYaml .Values.resources | nindent 12 }} {{- with .Values.nodeSelector }} diff --git a/helm/ssjdispatcher/values.yaml b/helm/ssjdispatcher/values.yaml index f445a4834..a89d5ec23 100644 --- a/helm/ssjdispatcher/values.yaml +++ b/helm/ssjdispatcher/values.yaml @@ -128,6 +128,7 @@ securityContext: service: # -- (string) Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". type: ClusterIP + targetPort: 8000 # -- (int) The port number that the service exposes. port: 80 diff --git a/helm/wts/Chart.yaml b/helm/wts/Chart.yaml index 16ceea0d9..582066a88 100644 --- a/helm/wts/Chart.yaml +++ b/helm/wts/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.30 +version: 0.1.31 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/helm/wts/README.md b/helm/wts/README.md index ba122bdfa..695ae20ea 100644 --- a/helm/wts/README.md +++ b/helm/wts/README.md @@ -1,6 +1,6 @@ # wts -![Version: 0.1.30](https://img.shields.io/badge/Version-0.1.30-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) +![Version: 0.1.31](https://img.shields.io/badge/Version-0.1.31-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: master](https://img.shields.io/badge/AppVersion-master-informational?style=flat-square) A Helm chart for gen3 workspace token service @@ -98,7 +98,7 @@ A Helm chart for gen3 workspace token service | secrets.awsSecretAccessKey | str | `nil` | AWS secret access key ID. Overrides global key. | | securityContext | map | `{}` | Security context for the containers in the pod | | selectorLabels | map | `nil` | Will completely override the selectorLabels defined in the common chart's _label_setup.tpl | -| service | map | `{"httpPort":80,"httpsPort":443,"type":"ClusterIP"}` | Configuration for the service | +| service | map | `{"httpPort":80,"httpsPort":443,"targetPort":80,"type":"ClusterIP"}` | Configuration for the service | | service.httpPort | int | `80` | Port on which the service is exposed | | service.httpsPort | int | `443` | Secure port on which the service is exposed | | service.type | string | `"ClusterIP"` | Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". | diff --git a/helm/wts/templates/deployment.yaml b/helm/wts/templates/deployment.yaml index 779eee7e6..d98cd2e5c 100644 --- a/helm/wts/templates/deployment.yaml +++ b/helm/wts/templates/deployment.yaml @@ -79,18 +79,18 @@ spec: subPath: appcreds.json ports: - name: http - containerPort: 80 + containerPort: {{ .Values.service.targetPort }} protocol: TCP livenessProbe: httpGet: path: /_status - port: 80 + port: http failureThreshold: 10 initialDelaySeconds: 5 readinessProbe: httpGet: path: /_status - port: 80 + port: http env: - name: OIDC_CLIENT_ID valueFrom: diff --git a/helm/wts/values.yaml b/helm/wts/values.yaml index 1c32ed396..f9f9207c6 100644 --- a/helm/wts/values.yaml +++ b/helm/wts/values.yaml @@ -184,6 +184,7 @@ securityContext: service: # -- (string) Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". type: ClusterIP + targetPort: 80 # -- (int) Port on which the service is exposed httpPort: 80 # -- (int) Secure port on which the service is exposed From a352afd6d1ba1650c8c44a938b4e396b074bfb7d Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Wed, 1 Oct 2025 15:32:06 -0700 Subject: [PATCH 106/126] Update Helm values and portal configs for new features Updated container image tags for gearbox, guppy, and portal. Added command and args overrides to amanuensis Helm chart. Enhanced portal configuration with new filter, table, and field mapping options, including survival analysis and unit calculation configs. Modified database setup job template for improved host referencing. Disabled amanuensis and PostgreSQL persistence by default, and adjusted tier access limit. --- gearbox-default-values.yaml | 4 +- helm/amanuensis/templates/deployment.yaml | 12 +- helm/amanuensis/values.yaml | 10 + helm/common/templates/_db_setup_job.tpl | 8 +- pcdc-default-values.yaml | 442 +++++++++++++++++++++- 5 files changed, 446 insertions(+), 30 deletions(-) diff --git a/gearbox-default-values.yaml b/gearbox-default-values.yaml index 5fc0e9e93..fff27ee43 100644 --- a/gearbox-default-values.yaml +++ b/gearbox-default-values.yaml @@ -451,7 +451,7 @@ gearbox: enabled: true image: repository: quay.io/pcdc/gearbox_be - tag: "pcdc_dev_2025-08-22T09_51_08-05_00" + tag: "pcdc_dev_2025-08-26T14_03_39-05_00" pullPolicy: Always podSecurityContext: # runAsNonRoot: true @@ -471,6 +471,8 @@ gearbox: gearbox-middleware: enabled: true + gearboxMiddlewareG3auto: + testing: False image: repository: quay.io/pcdc/gearbox-middleware tag: "pcdc_dev_Fri__15_Aug_2025_17_57_50_GMT" diff --git a/helm/amanuensis/templates/deployment.yaml b/helm/amanuensis/templates/deployment.yaml index 729674633..b8a3add1b 100644 --- a/helm/amanuensis/templates/deployment.yaml +++ b/helm/amanuensis/templates/deployment.yaml @@ -65,12 +65,14 @@ spec: port: http resources: {{- toYaml .Values.resources | nindent 12 }} - command: ["/bin/bash"] + {{- if .Values.command }} + command: + {{- toYaml .Values.command | nindent 12 }} + {{- end }} + {{- if .Values.args }} args: - - "-c" - - | - python /var/www/amanuensis/yaml_merge.py /var/www/amanuensis/amanuensis-config-public.yaml /var/www/amanuensis/amanuensis-config-secret.yaml /var/www/amanuensis/amanuensis-config.yaml - if [[ -f /amanuensis/dockerrun.bash ]]; then bash /amanuensis/dockerrun.bash; elif [[ -f /dockerrun.sh ]]; then bash /dockerrun.sh; else echo 'Error: Neither /amanuensis/dockerrun.bash nor /dockerrun.sh exists.' >&2; exit 1; fi + {{- toYaml .Values.args | nindent 12 }} + {{- end }} env: {{- toYaml .Values.env | nindent 12 }} volumeMounts: diff --git a/helm/amanuensis/values.yaml b/helm/amanuensis/values.yaml index f80a2da80..89eb4efae 100644 --- a/helm/amanuensis/values.yaml +++ b/helm/amanuensis/values.yaml @@ -368,3 +368,13 @@ amanuensisJobs: clearFilterSetCronjob: false dbMigrateJob: true validateFilterSetsJob: false + +# -- (list) Override the default Command to run in the container. +command: ["/bin/bash"] + +# -- (list) Default Command and arguments to run in the container. +args: + - "-c" + - | + python /var/www/amanuensis/yaml_merge.py /var/www/amanuensis/amanuensis-config-public.yaml /var/www/amanuensis/amanuensis-config-secret.yaml /var/www/amanuensis/amanuensis-config.yaml + if [[ -f /amanuensis/dockerrun.bash ]]; then bash /amanuensis/dockerrun.bash; elif [[ -f /dockerrun.sh ]]; then bash /dockerrun.sh; else echo 'Error: Neither /amanuensis/dockerrun.bash nor /dockerrun.sh exists.' >&2; exit 1; fi diff --git a/helm/common/templates/_db_setup_job.tpl b/helm/common/templates/_db_setup_job.tpl index 4239c33b8..3b4cdbe99 100644 --- a/helm/common/templates/_db_setup_job.tpl +++ b/helm/common/templates/_db_setup_job.tpl @@ -56,7 +56,7 @@ spec: {{- if $.Values.global.dev }} valueFrom: secretKeyRef: - name: {{ .Release.Name }}-postgresql + name: {{ .Values.postgres.host | default (printf "%s-postgresql" .Release.Name) }} key: postgres-password optional: false {{- else if $.Values.global.postgres.externalSecret }} @@ -66,7 +66,7 @@ spec: key: password optional: false {{- else }} - value: {{ .Values.global.postgres.master.password | quote}} + value: {{ .Values.global.postgres.master.password | quote }} {{- end }} - name: PGUSER {{- if $.Values.global.postgres.externalSecret }} @@ -90,7 +90,7 @@ spec: {{- end }} - name: PGHOST {{- if $.Values.global.dev }} - value: "{{ .Release.Name }}-postgresql" + value: {{ .Values.postgres.host | default (printf "%s-postgresql" .Release.Name) }} {{- else if $.Values.global.postgres.externalSecret }} valueFrom: secretKeyRef: @@ -196,7 +196,7 @@ data: port: {{ $.Values.postgres.port | b64enc | quote }} password: {{ include "gen3.service-postgres" (dict "key" "password" "service" $.Chart.Name "context" $) | b64enc | quote }} {{- if $.Values.global.dev }} - host: {{ (printf "%s-%s" $.Release.Name "postgresql" ) | b64enc | quote }} + host: {{ ($.Values.postgres.host | default (printf "%s-%s" $.Release.Name "postgresql") ) | b64enc | quote }} {{- else }} host: {{ ( $.Values.postgres.host | default ( $.Values.global.postgres.master.host)) | b64enc | quote }} {{- end }} diff --git a/pcdc-default-values.yaml b/pcdc-default-values.yaml index e4ac9c657..7a8e81d43 100644 --- a/pcdc-default-values.yaml +++ b/pcdc-default-values.yaml @@ -5,7 +5,7 @@ global: dictionaryUrl: https://pcdc-staging-dictionaries.s3.amazonaws.com/pcdc-schema-staging-20250114.json authz_entity_name: "subject" tierAccessLevel: "granular" - tierAccessLimit: "5" + tierAccessLimit: "1" tls: cert: | -----BEGIN CERTIFICATE----- @@ -62,7 +62,7 @@ arborist: tag: "2025.07" amanuensis: - enabled: true + enabled: false image: repository: "quay.io/pcdc/amanuensis" tag: "2.26.2" @@ -82,6 +82,110 @@ fence: repository: "quay.io/pcdc/fence" tag: "helm-test" pullPolicy: Always + # -- (list) Volumes to attach to the container. + volumes: + - name: old-config-volume + secret: + secretName: "fence-secret" + - name: json-secret-volume + secret: + secretName: "fence-json-secret" + optional: true + - name: creds-volume + secret: + secretName: "fence-creds" + - name: config-helper + configMap: + name: config-helper + optional: true + - name: logo-volume + configMap: + name: "logo-config" + - name: config-volume + secret: + secretName: "fence-config" + - name: fence-google-app-creds-secret-volume + secret: + secretName: "fence-google-app-creds-secret" + - name: fence-google-storage-creds-secret-volume + secret: + secretName: "fence-google-storage-creds-secret" + - name: fence-jwt-keys + secret: + secretName: "fence-jwt-keys" + - name: privacy-policy + configMap: + name: "privacy-policy" + - name: yaml-merge + configMap: + name: "fence-yaml-merge" + - name: amanuensis-jwt-keys + secret: + secretName: "amanuensis-jwt-keys" + items: + - key: jwt_public_key.pem + path: jwt_public_key.pem + optional: true + - name: config-volume-public + configMap: + name: "manifest-fence" + optional: true + + # -- (list) Volumes to mount to the container. + volumeMounts: + - name: "old-config-volume" + readOnly: true + mountPath: "/var/www/fence/local_settings.py" + subPath: local_settings.py + - name: "json-secret-volume" + readOnly: true + mountPath: "/var/www/fence/fence_credentials.json" + subPath: fence_credentials.json + - name: "creds-volume" + readOnly: true + mountPath: "/var/www/fence/creds.json" + subPath: creds.json + - name: "config-helper" + readOnly: true + mountPath: "/var/www/fence/config_helper.py" + subPath: config_helper.py + - name: "logo-volume" + readOnly: true + mountPath: "/fence/fence/static/img/logo.svg" + subPath: "logo.svg" + - name: "privacy-policy" + readOnly: true + mountPath: "/fence/fence/static/privacy_policy.md" + subPath: "privacy_policy.md" + - name: "config-volume" + readOnly: true + mountPath: "/var/www/fence/fence-config-secret.yaml" + subPath: fence-config.yaml + - name: "yaml-merge" + readOnly: true + mountPath: "/var/www/fence/yaml_merge.py" + subPath: yaml_merge.py + - name: "fence-google-app-creds-secret-volume" + readOnly: true + mountPath: "/var/www/fence/fence_google_app_creds_secret.json" + subPath: fence_google_app_creds_secret.json + - name: "fence-google-storage-creds-secret-volume" + readOnly: true + mountPath: "/var/www/fence/fence_google_storage_creds_secret.json" + subPath: fence_google_storage_creds_secret.json + - name: "fence-jwt-keys" + readOnly: true + mountPath: "/fence/keys/key/jwt_private_key.pem" + subPath: "jwt_private_key.pem" + - name: "amanuensis-jwt-keys" + readOnly: true + mountPath: "/amanuensis/jwt_public_key.pem" + subPath: "jwt_public_key.pem" + - name: "config-volume-public" + readOnly: true + mountPath: "/var/www/fence/fence-config-public.yaml" + subPath: fence-config-public.yaml + USER_YAML: | authz: @@ -300,8 +404,12 @@ guppy: enabled: true image: repository: "quay.io/pcdc/guppy" - tag: "1.10.3" + tag: "1.11.0" pullPolicy: "IfNotPresent" + # image: + # repository: "guppy" + # tag: "test" + # pullPolicy: "Never" authFilterField: "auth_resource_path" manifestservice: @@ -325,7 +433,7 @@ portal: #enabled: false image: repository: "quay.io/pcdc/windmill" - tag: "1.41.0" + tag: "1.43.0" pullPolicy: IfNotPresent resources: requests: @@ -486,8 +594,7 @@ portal: "age_at_censor_status", "medical_histories.medical_history", "medical_histories.medical_history_status", - "external_references.external_resource_name", - "biospecimen_status" + "external_references.external_resource_name" ] }, { @@ -565,7 +672,10 @@ portal: "subject_responses.interim_response", "subject_responses.response_method", "minimal_residual_diseases.mrd_result", - "minimal_residual_diseases.mrd_result_numeric" + "minimal_residual_diseases.mrd_result_numeric", + "minimal_residual_diseases.mrd_result_unit", + "minimal_residual_diseases.mrd_method", + "minimal_residual_diseases.mrd_sample_source" ] }, { @@ -602,7 +712,61 @@ portal: "stem_cell_transplants.sct_donor_relationship" ] } - ] + ], + "unitCalcConfig": { + "ageUnits": { + "quantity": "age", + "desiredUnit": "days", + "selectUnits": { "months": 30, "years": 365.25 } + }, + "calculatorMapping": { + "number": [ + "year_at_disease_phase", + "tumor_assessments.longest_diam_dim1", + "radiation_therapies.rt_dose", + "tumor_assessments.necrosis_pct", + "labs.lab_result_numeric" + ], + "age": [ + "age_at_censor_status", + "tumor_assessments.age_at_tumor_assessment", + "molecular_analysis.age_at_molecular_analysis", + "secondary_malignant_neoplasm.age_at_smn", + "radiation_therapies.age_at_rt_start", + "subject_responses.age_at_response" + ] + } + }, + "filterDependencyConfig": { + "relations": { + "molecular_abnormality": [ + "molecular_analysis.molecular_abnormality", + "molecular_analysis.molecular_abnormality_result" + ], + "tumor_site_state": [ + "tumor_assessments.tumor_state", + "tumor_assessments.tumor_site" + ], + "stage": ["stagings.stage_system", "stagings.stage"], + "mrd_result": [ + "minimal_residual_diseases.mrd_result_numeric", + "minimal_residual_diseases.mrd_result_unit" + ], + "lab_result": ["labs.lab_result_numeric", "labs.lab_result_unit"] + }, + "filterToRelation": { + "molecular_analysis.molecular_abnormality": "molecular_abnormality", + "molecular_analysis.molecular_abnormality_result": "molecular_abnormality", + "tumor_assessments.tumor_state": "tumor_site_state", + "tumor_assessments.tumor_site": "tumor_site_state", + "stagings.stage_system": "stage", + "stagings.stage": "stage", + "minimal_residual_diseases.mrd_result_numeric": "mrd_result", + "minimal_residual_diseases.mrd_result_unit": "mrd_result", + "labs.lab_result_numeric": "lab_result", + "labs.lab_result_unit": "lab_result" + } + } }, "projectId": "search", "graphqlField": "subject", @@ -640,8 +804,10 @@ portal: ] }, "patientIds": { - "filter": false, - "export": true + "filter": true, + "export": true, + "filterName": "subject_submitter_id", + "displayName": "Subject Submitter Ids" }, "survivalAnalysis": { "result": { @@ -681,6 +847,10 @@ portal: "field": "survival_characteristics.lkss_obfuscated", "name": "Last Known Survival Status (LKSS)" }, + { + "field": "age_at_censor_status", + "name": "Age At Censor Status (days)" + }, { "field": "medical_histories.medical_history", "name": "Medical History" @@ -693,10 +863,6 @@ portal: "field": "external_references.external_resource_name", "name": "External Resource Name" }, - { - "field": "biospecimen_status", - "name": "Biospecimen" - }, { "field": "histologies.histology", "name": "Histology" @@ -711,7 +877,7 @@ portal: }, { "field": "tumor_assessments.age_at_tumor_assessment", - "name": "Age at Tumor Assessment" + "name": "Age at Tumor Assessment (days)" }, { "field": "tumor_assessments.tumor_classification", @@ -851,7 +1017,7 @@ portal: }, { "field": "molecular_analysis.age_at_molecular_analysis", - "name": "Age at Molecular Analysis" + "name": "Age at Molecular Analysis (days)" }, { "field": "molecular_analysis.mitoses", @@ -883,7 +1049,7 @@ portal: }, { "field": "radiation_therapies.age_at_rt_start", - "name": "Age at Radiation Therapy" + "name": "Age at Radiation Therapy (days)" }, { "field": "radiation_therapies.rt_site", @@ -903,7 +1069,7 @@ portal: }, { "field": "subject_responses.age_at_response", - "name": "Age at Response" + "name": "Age at Response (days)" }, { "field": "subject_responses.tx_prior_response", @@ -929,13 +1095,25 @@ portal: "field": "minimal_residual_diseases.mrd_result_numeric", "name": "MRD Result Numeric" }, + { + "field": "minimal_residual_diseases.mrd_result_unit", + "name": "MRD Result Unit" + }, + { + "field": "minimal_residual_diseases.mrd_method", + "name": "MRD Method" + }, + { + "field": "minimal_residual_diseases.mrd_sample_source", + "name": "MRD Sample Source" + }, { "field": "subject_responses.necrosis", "name": "Necrosis" }, { "field": "secondary_malignant_neoplasm.age_at_smn", - "name": "Age at SMN" + "name": "Age at SMN (days)" }, { "field": "secondary_malignant_neoplasm.smn_site", @@ -995,6 +1173,230 @@ portal: "enabled": false }, "getAccessButtonLink": "https://docs.pedscommons.org/PCDCProjectRequestForm/" + }, + { + "id": 2, + "label": "data - survival", + "charts": { + "sex": { + "chartType": "bar", + "title": "Sex" + }, + "race": { + "chartType": "bar", + "title": "Race" + }, + "ethnicity": { + "chartType": "bar", + "title": "Ethnicity" + } + }, + "adminAppliedPreFilters": { + "consortium": { + "selectedValues": ["INSTRuCT"] + } + }, + "filters": { + "anchor": { + "field": "disease_phase", + "options": ["Initial Diagnosis", "Relapse"], + "tabs": ["Disease", "Molecular"], + "tooltip": "You can describe this filter here" + }, + "tabs": [ + { + "title": "Subject", + "fields": [ + "consortium", + "sex", + "race", + "ethnicity", + "survival_characteristics.lkss", + "survival_characteristics.age_at_lkss" + ] + }, + { + "title": "Disease", + "fields": [ + "histologies.histology", + "tumor_assessments.age_at_tumor_assessment", + "tumor_assessments.tumor_classification", + "tumor_assessments.tumor_site", + "tumor_assessments.longest_diam_dim1", + "tumor_assessments.invasiveness", + "tumor_assessments.nodal_clinical", + "tumor_assessments.nodal_pathology", + "tumor_assessments.parameningeal_extension", + "stagings.irs_group", + "stagings.tnm_finding" + ] + }, + { + "title": "Molecular", + "fields": [ + "molecular_analysis.anaplasia", + "molecular_analysis.anaplasia_extent", + "molecular_analysis.molecular_abnormality", + "molecular_analysis.molecular_abnormality_result", + "molecular_analysis.gene1", + "molecular_analysis.gene2" + ] + } + ] + }, + "projectId": "search", + "graphqlField": "subject", + "index": "", + "buttons": [ + { + "enabled": true, + "type": "export-to-pfb", + "title": "Export to PFB", + "leftIcon": "datafile", + "rightIcon": "download" + }, + { + "enabled": false, + "type": "data", + "title": "Download Data", + "leftIcon": "user", + "rightIcon": "download", + "fileName": "data.json", + "tooltipText": "You can only download data accessible to you" + } + ], + "table": { + "enabled": true, + "fields": [ + "external_references.external_links", + "consortium", + "data_contributor_id", + "subject_submitter_id", + "sex", + "race", + "ethnicity", + "survival_characteristics.lkss", + "survival_characteristics.age_at_lkss" + ] + }, + "patientIds": { + "filter": false, + "export": true + }, + "survivalAnalysis": { + "result": { + "pval": false, + "risktable": true, + "survival": true + } + }, + "guppyConfig": { + "dataType": "subject", + "nodeCountTitle": "Subjects", + "fieldMapping": [ + { + "field": "survival_characteristics.lkss", + "name": "Last Known Survival Status (LKSS)", + "tooltip": "test tooltip" + }, + { + "field": "survival_characteristics.age_at_lkss", + "name": "Age at LKSS" + }, + { + "field": "external_references.external_resource_name", + "name": "External Resource Name" + }, + { + "field": "tumor_assessments.age_at_tumor_assessment", + "name": "Age at Tumor Assessment" + }, + { + "field": "tumor_assessments.tumor_classification", + "name": "Tumor Classification" + }, + { + "field": "tumor_assessments.tumor_site", + "name": "Tumor Site" + }, + { + "field": "tumor_assessments.tumor_size", + "name": "Tumor Size" + }, + { + "field": "tumor_assessments.longest_diam_dim1", + "name": "Longest Diameter Dimension 1" + }, + { + "field": "tumor_assessments.invasiveness", + "name": "Invasiveness" + }, + { + "field": "tumor_assessments.nodal_clinical", + "name": "Nodal Clinical" + }, + { + "field": "tumor_assessments.nodal_pathology", + "name": "Nodal Pathology" + }, + { + "field": "tumor_assessments.parameningeal_extension", + "name": "Parameningeal Extension" + }, + { + "field": "histologies.histology", + "name": "Histology" + }, + { + "field": "histologies.histology_grade", + "name": "Histology Grade" + }, + { + "field": "histologies.histology_inpc", + "name": "Histology Inpc" + }, + { + "field": "molecular_analysis.anaplasia", + "name": "Anaplasia" + }, + { + "field": "molecular_analysis.anaplasia_extent", + "name": "Anaplasia Extent" + }, + { + "field": "molecular_analysis.molecular_abnormality", + "name": "Molecular Abnormality" + }, + { + "field": "molecular_analysis.molecular_abnormality_result", + "name": "Molecular Abnormality Result" + }, + { + "field": "molecular_analysis.gene1", + "name": "Gene 1" + }, + { + "field": "molecular_analysis.gene2", + "name": "Gene 2" + }, + { + "field": "project_id", + "name": "Data Release Version" + }, + { + "field": "stagings.irs_group", + "name": "IRS Group" + }, + { + "field": "stagings.tnm_finding", + "name": "TNM Finding" + } + ] + }, + "dataRequests": { + "enabled": false + }, + "getAccessButtonLink": "https://docs.pedscommons.org/PCDCProjectRequestForm/" } ] } @@ -1074,7 +1476,7 @@ wts: postgresql: primary: persistence: - enabled: true + enabled: false size: 5Gi elasticsearch: From 8724358bef84bedf2f2b0bc940512bc34fa66b40 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Wed, 1 Oct 2025 15:34:00 -0700 Subject: [PATCH 107/126] Set jwt_public_key.pem secret as required in Fence config Changed the 'optional' field for the jwt_public_key.pem secret from true to false in pcdc-default-values.yaml, ensuring the key must be present for Fence to function properly. --- pcdc-default-values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pcdc-default-values.yaml b/pcdc-default-values.yaml index 7a8e81d43..f30f4f38d 100644 --- a/pcdc-default-values.yaml +++ b/pcdc-default-values.yaml @@ -125,7 +125,7 @@ fence: items: - key: jwt_public_key.pem path: jwt_public_key.pem - optional: true + optional: false - name: config-volume-public configMap: name: "manifest-fence" From 35a136e1b1a6410d5c2190586f8a469e27074c56 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Wed, 1 Oct 2025 15:44:40 -0700 Subject: [PATCH 108/126] Add biospecimen_status field and update external references Added the 'biospecimen_status' field to the portal configuration in pcdc-default-values.yaml, including it in relevant field lists and table columns. Also updated several subject submitter_id values in pcdc_data/external/external_reference.json. Additionally, removed the 'data - survival' block from the portal configuration. --- pcdc-default-values.yaml | 235 +-------------------- pcdc_data/external/external_reference.json | 8 +- 2 files changed, 12 insertions(+), 231 deletions(-) diff --git a/pcdc-default-values.yaml b/pcdc-default-values.yaml index f30f4f38d..c477a7678 100644 --- a/pcdc-default-values.yaml +++ b/pcdc-default-values.yaml @@ -594,7 +594,8 @@ portal: "age_at_censor_status", "medical_histories.medical_history", "medical_histories.medical_history_status", - "external_references.external_resource_name" + "external_references.external_resource_name", + "biospecimen_status" ] }, { @@ -804,7 +805,7 @@ portal: ] }, "patientIds": { - "filter": true, + "filter": false, "export": true, "filterName": "subject_submitter_id", "displayName": "Subject Submitter Ids" @@ -863,6 +864,10 @@ portal: "field": "external_references.external_resource_name", "name": "External Resource Name" }, + { + "field": "biospecimen_status", + "name": "Biospecimen Availability" + }, { "field": "histologies.histology", "name": "Histology" @@ -1106,7 +1111,7 @@ portal: { "field": "minimal_residual_diseases.mrd_sample_source", "name": "MRD Sample Source" - }, + }, { "field": "subject_responses.necrosis", "name": "Necrosis" @@ -1173,230 +1178,6 @@ portal: "enabled": false }, "getAccessButtonLink": "https://docs.pedscommons.org/PCDCProjectRequestForm/" - }, - { - "id": 2, - "label": "data - survival", - "charts": { - "sex": { - "chartType": "bar", - "title": "Sex" - }, - "race": { - "chartType": "bar", - "title": "Race" - }, - "ethnicity": { - "chartType": "bar", - "title": "Ethnicity" - } - }, - "adminAppliedPreFilters": { - "consortium": { - "selectedValues": ["INSTRuCT"] - } - }, - "filters": { - "anchor": { - "field": "disease_phase", - "options": ["Initial Diagnosis", "Relapse"], - "tabs": ["Disease", "Molecular"], - "tooltip": "You can describe this filter here" - }, - "tabs": [ - { - "title": "Subject", - "fields": [ - "consortium", - "sex", - "race", - "ethnicity", - "survival_characteristics.lkss", - "survival_characteristics.age_at_lkss" - ] - }, - { - "title": "Disease", - "fields": [ - "histologies.histology", - "tumor_assessments.age_at_tumor_assessment", - "tumor_assessments.tumor_classification", - "tumor_assessments.tumor_site", - "tumor_assessments.longest_diam_dim1", - "tumor_assessments.invasiveness", - "tumor_assessments.nodal_clinical", - "tumor_assessments.nodal_pathology", - "tumor_assessments.parameningeal_extension", - "stagings.irs_group", - "stagings.tnm_finding" - ] - }, - { - "title": "Molecular", - "fields": [ - "molecular_analysis.anaplasia", - "molecular_analysis.anaplasia_extent", - "molecular_analysis.molecular_abnormality", - "molecular_analysis.molecular_abnormality_result", - "molecular_analysis.gene1", - "molecular_analysis.gene2" - ] - } - ] - }, - "projectId": "search", - "graphqlField": "subject", - "index": "", - "buttons": [ - { - "enabled": true, - "type": "export-to-pfb", - "title": "Export to PFB", - "leftIcon": "datafile", - "rightIcon": "download" - }, - { - "enabled": false, - "type": "data", - "title": "Download Data", - "leftIcon": "user", - "rightIcon": "download", - "fileName": "data.json", - "tooltipText": "You can only download data accessible to you" - } - ], - "table": { - "enabled": true, - "fields": [ - "external_references.external_links", - "consortium", - "data_contributor_id", - "subject_submitter_id", - "sex", - "race", - "ethnicity", - "survival_characteristics.lkss", - "survival_characteristics.age_at_lkss" - ] - }, - "patientIds": { - "filter": false, - "export": true - }, - "survivalAnalysis": { - "result": { - "pval": false, - "risktable": true, - "survival": true - } - }, - "guppyConfig": { - "dataType": "subject", - "nodeCountTitle": "Subjects", - "fieldMapping": [ - { - "field": "survival_characteristics.lkss", - "name": "Last Known Survival Status (LKSS)", - "tooltip": "test tooltip" - }, - { - "field": "survival_characteristics.age_at_lkss", - "name": "Age at LKSS" - }, - { - "field": "external_references.external_resource_name", - "name": "External Resource Name" - }, - { - "field": "tumor_assessments.age_at_tumor_assessment", - "name": "Age at Tumor Assessment" - }, - { - "field": "tumor_assessments.tumor_classification", - "name": "Tumor Classification" - }, - { - "field": "tumor_assessments.tumor_site", - "name": "Tumor Site" - }, - { - "field": "tumor_assessments.tumor_size", - "name": "Tumor Size" - }, - { - "field": "tumor_assessments.longest_diam_dim1", - "name": "Longest Diameter Dimension 1" - }, - { - "field": "tumor_assessments.invasiveness", - "name": "Invasiveness" - }, - { - "field": "tumor_assessments.nodal_clinical", - "name": "Nodal Clinical" - }, - { - "field": "tumor_assessments.nodal_pathology", - "name": "Nodal Pathology" - }, - { - "field": "tumor_assessments.parameningeal_extension", - "name": "Parameningeal Extension" - }, - { - "field": "histologies.histology", - "name": "Histology" - }, - { - "field": "histologies.histology_grade", - "name": "Histology Grade" - }, - { - "field": "histologies.histology_inpc", - "name": "Histology Inpc" - }, - { - "field": "molecular_analysis.anaplasia", - "name": "Anaplasia" - }, - { - "field": "molecular_analysis.anaplasia_extent", - "name": "Anaplasia Extent" - }, - { - "field": "molecular_analysis.molecular_abnormality", - "name": "Molecular Abnormality" - }, - { - "field": "molecular_analysis.molecular_abnormality_result", - "name": "Molecular Abnormality Result" - }, - { - "field": "molecular_analysis.gene1", - "name": "Gene 1" - }, - { - "field": "molecular_analysis.gene2", - "name": "Gene 2" - }, - { - "field": "project_id", - "name": "Data Release Version" - }, - { - "field": "stagings.irs_group", - "name": "IRS Group" - }, - { - "field": "stagings.tnm_finding", - "name": "TNM Finding" - } - ] - }, - "dataRequests": { - "enabled": false - }, - "getAccessButtonLink": "https://docs.pedscommons.org/PCDCProjectRequestForm/" } ] } diff --git a/pcdc_data/external/external_reference.json b/pcdc_data/external/external_reference.json index d743f2984..3da4e2daa 100644 --- a/pcdc_data/external/external_reference.json +++ b/pcdc_data/external/external_reference.json @@ -9,7 +9,7 @@ "external_subject_url": "https://portal.gdc.cancer.gov/cases/4e824cfb-d887-57b3-bff2-95e2e7b4d410", "subjects": [ { - "submitter_id": "subject_misparse_arthrodynic" + "submitter_id": "subject_Oreodoxa_hendness" } ], "submitter_id": "external_reference_isomaltose_unmingling", @@ -25,7 +25,7 @@ "external_subject_url": "https://portal.gdc.cancer.gov/cases/448a7c70-73e8-5b2f-b226-83e4065dc6ef", "subjects": [ { - "submitter_id": "subject_acalycinous_indemnificatory" + "submitter_id": "subject_unadventurously_oturia" } ], "submitter_id": "external_reference_homemaking_antivibrator", @@ -41,7 +41,7 @@ "external_subject_url": "https://portal.kidsfirstdrc.org/participant/PT_72AZK0JR", "subjects": [ { - "submitter_id": "subject_palebelly_telfairic" + "submitter_id": "subject_springly_quartet" } ], "submitter_id": "external_reference_irreverendly_subtrifid", @@ -57,7 +57,7 @@ "external_subject_url": "https://portal.kidsfirstdrc.org/participant/PT_N5N59J9M", "subjects": [ { - "submitter_id": "subject_crystallology_Dacrydium" + "submitter_id": "subject_autoschediastically_contraflexure" } ], "submitter_id": "external_reference_communicative_syntactics", From b884a46916fe50ed3834c65d72920423449e1204 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Fri, 3 Oct 2025 12:12:16 -0700 Subject: [PATCH 109/126] Update Helm job and enable services with DB configs Set imagePullPolicy to IfNotPresent for the cleanup Helm hook job. Enabled multiple services (arborist, amanuensis, fence, peregrine, portal, revproxy, sheepdog, elasticsearch) and added or updated their Postgres password configurations. Increased global tierAccessLimit from 1 to 5 and enabled persistence for PostgreSQL primary. --- .../templates/cleanup-helm-hooks-job.yaml | 1 + pcdc-default-values.yaml | 40 +++++++++++++++++-- 2 files changed, 37 insertions(+), 4 deletions(-) diff --git a/helm/gen3/templates/cleanup-helm-hooks-job.yaml b/helm/gen3/templates/cleanup-helm-hooks-job.yaml index fdf7cdc01..33cfeaa61 100644 --- a/helm/gen3/templates/cleanup-helm-hooks-job.yaml +++ b/helm/gen3/templates/cleanup-helm-hooks-job.yaml @@ -73,6 +73,7 @@ spec: containers: - name: cleanup image: bitnamisecure/kubectl:latest + imagePullPolicy: IfNotPresent command: - /bin/bash - -c diff --git a/pcdc-default-values.yaml b/pcdc-default-values.yaml index c477a7678..1e6ed66fb 100644 --- a/pcdc-default-values.yaml +++ b/pcdc-default-values.yaml @@ -5,7 +5,21 @@ global: dictionaryUrl: https://pcdc-staging-dictionaries.s3.amazonaws.com/pcdc-schema-staging-20250114.json authz_entity_name: "subject" tierAccessLevel: "granular" - tierAccessLimit: "1" + tierAccessLimit: "5" + postgres: + # -- (bool) Whether the database create job should run. + dbCreate: true + # -- (string) Name of external secret of the postgres master credentials. Disabled if empty + externalSecret: "" + master: + # -- global postgres master username + username: postgres + # -- global postgres master password + password: + # -- global postgres master host + host: + # -- global postgres master port + port: "5432" tls: cert: | -----BEGIN CERTIFICATE----- @@ -57,18 +71,26 @@ global: -----END RSA PRIVATE KEY----- arborist: + enabled: true + postgres: + password: "arborist_thisisaweakpassword" image: repository: quay.io/pcdc/arborist tag: "2025.07" amanuensis: - enabled: false + enabled: true + postgres: + password: "amanuensis_thisisaweakpassword" image: repository: "quay.io/pcdc/amanuensis" tag: "2.26.2" pullPolicy: IfNotPresent fence: + enabled: true + postgres: + password: "fence_thisisaweakpassword" FENCE_CONFIG: DEBUG: true MOCK_STORAGE: true @@ -425,12 +447,15 @@ pcdcanalysistools: tag: "1.10.1" peregrine: + enabled: true + postgres: + password: "peregrine_thisisaweakpassword" image: repository: quay.io/pcdc/peregrine tag: "1.4.1" portal: - #enabled: false + enabled: true image: repository: "quay.io/pcdc/windmill" tag: "1.43.0" @@ -1183,11 +1208,15 @@ portal: } revproxy: + enabled: true image: repository: quay.io/cdis/nginx tag: "2025.08" sheepdog: + enabled: true + postgres: + password: "sheepdog_thisisaweakpassword" dictionaryUrl: https://pcdc-staging-dictionaries.s3.amazonaws.com/pcdc-schema-staging-20250114.json image: repository: quay.io/pcdc/sheepdog @@ -1255,12 +1284,15 @@ wts: pullPolicy: Always postgresql: + auth: + postgresPassword: "thisisaterriblepassword" primary: persistence: - enabled: false + enabled: true size: 5Gi elasticsearch: + enabled: true clusterName: gen3-elasticsearch maxUnavailable: 0 singleNode: true From a624c30b91fff43bdbdd46a69cb4f6a7af338f68 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Fri, 3 Oct 2025 12:19:01 -0700 Subject: [PATCH 110/126] Add default postgres passwords for services Introduced default postgres passwords for arborist, fence, and gearbox services in the configuration file to facilitate local development and testing. --- gearbox-default-values.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/gearbox-default-values.yaml b/gearbox-default-values.yaml index fff27ee43..a12fc87dd 100644 --- a/gearbox-default-values.yaml +++ b/gearbox-default-values.yaml @@ -55,11 +55,15 @@ global: -----END RSA PRIVATE KEY----- arborist: + postgres: + password: "arborist_thisisaweakpassword" image: repository: quay.io/cdis/arborist tag: 2025.07 fence: + postgres: + password: "fence_thisisaweakpassword" FENCE_CONFIG: DEBUG: true MOCK_STORAGE: true @@ -448,6 +452,8 @@ revproxy: tag: 2025.08 gearbox: + postgres: + password: "gearbox_thisisaweakpassword" enabled: true image: repository: quay.io/pcdc/gearbox_be From 68a87bf8a4b1ca42f4f9b35754916fd4536d97ed Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Fri, 3 Oct 2025 12:28:09 -0700 Subject: [PATCH 111/126] Add default PostgreSQL configuration values Introduced default values for PostgreSQL authentication and persistence, including a sample password and storage size, to the gearbox-default-values.yaml file. --- gearbox-default-values.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/gearbox-default-values.yaml b/gearbox-default-values.yaml index a12fc87dd..898031721 100644 --- a/gearbox-default-values.yaml +++ b/gearbox-default-values.yaml @@ -488,6 +488,13 @@ gearbox-middleware: # runAsUser: 1000 # runAsGroup: 1000 +postgresql: + auth: + postgresPassword: "thisisaterriblepassword" + primary: + persistence: + enabled: true + size: 1Gi ######################################################################################## # DISABLED SERVICES # ######################################################################################## From fbf6d86b9357a143b0568ed2bd9aa85af7828d9c Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Fri, 3 Oct 2025 13:23:37 -0700 Subject: [PATCH 112/126] Set PostgreSQL image repository to bitnamilegacy/postgresql Updated the postgresql.image.repository field in both gearbox-default-values.yaml and pcdc-default-values.yaml to use the bitnamilegacy/postgresql image. This ensures consistency in the PostgreSQL image source across both configuration files. --- gearbox-default-values.yaml | 2 ++ pcdc-default-values.yaml | 2 ++ 2 files changed, 4 insertions(+) diff --git a/gearbox-default-values.yaml b/gearbox-default-values.yaml index 898031721..29f723a47 100644 --- a/gearbox-default-values.yaml +++ b/gearbox-default-values.yaml @@ -489,6 +489,8 @@ gearbox-middleware: # runAsGroup: 1000 postgresql: + image: + repository: bitnamilegacy/postgresql auth: postgresPassword: "thisisaterriblepassword" primary: diff --git a/pcdc-default-values.yaml b/pcdc-default-values.yaml index 1e6ed66fb..e26d43041 100644 --- a/pcdc-default-values.yaml +++ b/pcdc-default-values.yaml @@ -1284,6 +1284,8 @@ wts: pullPolicy: Always postgresql: + image: + repository: bitnamilegacy/postgresql auth: postgresPassword: "thisisaterriblepassword" primary: From fba0363d6124af58504408cea6fa355d6c488ac3 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Fri, 3 Oct 2025 13:44:52 -0700 Subject: [PATCH 113/126] Add dev-only comments to weak password fields Inserted '#local Dev Only' comments above hardcoded weak passwords in postgres configurations across multiple services in gearbox-default-values.yaml and pcdc-default-values.yaml to clarify these values are for local development use only. --- gearbox-default-values.yaml | 4 ++++ pcdc-default-values.yaml | 6 ++++++ 2 files changed, 10 insertions(+) diff --git a/gearbox-default-values.yaml b/gearbox-default-values.yaml index 29f723a47..ac3dd90ab 100644 --- a/gearbox-default-values.yaml +++ b/gearbox-default-values.yaml @@ -55,6 +55,7 @@ global: -----END RSA PRIVATE KEY----- arborist: + #local Dev Only postgres: password: "arborist_thisisaweakpassword" image: @@ -62,6 +63,7 @@ arborist: tag: 2025.07 fence: + #local Dev Only postgres: password: "fence_thisisaweakpassword" FENCE_CONFIG: @@ -452,6 +454,7 @@ revproxy: tag: 2025.08 gearbox: + #local Dev Only postgres: password: "gearbox_thisisaweakpassword" enabled: true @@ -491,6 +494,7 @@ gearbox-middleware: postgresql: image: repository: bitnamilegacy/postgresql + #local Dev Only auth: postgresPassword: "thisisaterriblepassword" primary: diff --git a/pcdc-default-values.yaml b/pcdc-default-values.yaml index e26d43041..079fc20a1 100644 --- a/pcdc-default-values.yaml +++ b/pcdc-default-values.yaml @@ -72,6 +72,7 @@ global: arborist: enabled: true + #local Dev Only postgres: password: "arborist_thisisaweakpassword" image: @@ -80,6 +81,7 @@ arborist: amanuensis: enabled: true + #local Dev Only postgres: password: "amanuensis_thisisaweakpassword" image: @@ -89,6 +91,7 @@ amanuensis: fence: enabled: true + #local Dev Only postgres: password: "fence_thisisaweakpassword" FENCE_CONFIG: @@ -448,6 +451,7 @@ pcdcanalysistools: peregrine: enabled: true + #local Dev Only postgres: password: "peregrine_thisisaweakpassword" image: @@ -1215,6 +1219,7 @@ revproxy: sheepdog: enabled: true + #local Dev Only postgres: password: "sheepdog_thisisaweakpassword" dictionaryUrl: https://pcdc-staging-dictionaries.s3.amazonaws.com/pcdc-schema-staging-20250114.json @@ -1286,6 +1291,7 @@ wts: postgresql: image: repository: bitnamilegacy/postgresql + #local Dev Only auth: postgresPassword: "thisisaterriblepassword" primary: From 4fd09d11d0c6edd0541b3f52d659af09747a0507 Mon Sep 17 00:00:00 2001 From: Jawad Qureshi Date: Mon, 6 Oct 2025 15:51:57 -0500 Subject: [PATCH 114/126] Add postgresql 15+ support in dbcreate job, fix metadata and fence cronjob to work with openshift --- helm/common/templates/_db_setup_job.tpl | 26 ++++++++------ .../fence-delete-expired-clients-cron.yaml | 20 +++-------- helm/metadata/README.md | 4 +-- helm/metadata/templates/deployment.yaml | 8 ++--- helm/metadata/values.yaml | 34 +++++++++---------- 5 files changed, 44 insertions(+), 48 deletions(-) diff --git a/helm/common/templates/_db_setup_job.tpl b/helm/common/templates/_db_setup_job.tpl index d637057ad..89d8c2b0c 100644 --- a/helm/common/templates/_db_setup_job.tpl +++ b/helm/common/templates/_db_setup_job.tpl @@ -151,19 +151,25 @@ spec: if psql -lqt | cut -d \| -f 1 | grep -qw $SERVICE_PGDB; then gen3_log_info "Database exists" PGPASSWORD=$SERVICE_PGPASS psql -d $SERVICE_PGDB -h $PGHOST -p $PGPORT -U $SERVICE_PGUSER -c "\conninfo" - - # Update secret to signal that db is ready, and services can start kubectl patch secret/{{ .Chart.Name }}-dbcreds -p '{"data":{"dbcreated":"dHJ1ZQo="}}' else - echo "database does not exist" - psql -tc "SELECT 1 FROM pg_database WHERE datname = '$SERVICE_PGDB'" | grep -q 1 || psql -c "CREATE DATABASE \"$SERVICE_PGDB\";" - gen3_log_info psql -tc "SELECT 1 FROM pg_user WHERE usename = '$SERVICE_PGUSER'" | grep -q 1 || psql -c "CREATE USER \"$SERVICE_PGUSER\" WITH PASSWORD '$SERVICE_PGPASS';" - psql -tc "SELECT 1 FROM pg_user WHERE usename = '$SERVICE_PGUSER'" | grep -q 1 || psql -c "CREATE USER \"$SERVICE_PGUSER\" WITH PASSWORD '$SERVICE_PGPASS';" - psql -c "GRANT ALL ON DATABASE \"$SERVICE_PGDB\" TO \"$SERVICE_PGUSER\" WITH GRANT OPTION;" - psql -d $SERVICE_PGDB -c "CREATE EXTENSION ltree; ALTER ROLE \"$SERVICE_PGUSER\" WITH LOGIN" - PGPASSWORD=$SERVICE_PGPASS psql -d $SERVICE_PGDB -h $PGHOST -p $PGPORT -U $SERVICE_PGUSER -c "\conninfo" + echo "Database does not exist — creating..." + psql -tc "SELECT 1 FROM pg_database WHERE datname = '$SERVICE_PGDB'" | grep -q 1 || \ + psql -c "CREATE DATABASE \"$SERVICE_PGDB\";" + psql -tc "SELECT 1 FROM pg_user WHERE usename = '$SERVICE_PGUSER'" | grep -q 1 || \ + psql -c "CREATE USER \"$SERVICE_PGUSER\" WITH PASSWORD '$SERVICE_PGPASS';" + + echo "Granting privileges to $SERVICE_PGUSER..." + psql -c "GRANT ALL PRIVILEGES ON DATABASE \"$SERVICE_PGDB\" TO \"$SERVICE_PGUSER\";" + psql -d $SERVICE_PGDB -c "ALTER SCHEMA public OWNER TO \"$SERVICE_PGUSER\";" + psql -d $SERVICE_PGDB -c "GRANT ALL ON SCHEMA public TO \"$SERVICE_PGUSER\";" + psql -d $SERVICE_PGDB -c "GRANT ALL ON ALL TABLES IN SCHEMA public TO \"$SERVICE_PGUSER\";" + psql -d $SERVICE_PGDB -c "ALTER ROLE \"$SERVICE_PGUSER\" WITH LOGIN;" - # Update secret to signal that db has been created, and services can start + echo "Creating ltree extension..." + psql -d $SERVICE_PGDB -c "CREATE EXTENSION IF NOT EXISTS ltree;" + + PGPASSWORD=$SERVICE_PGPASS psql -d $SERVICE_PGDB -h $PGHOST -p $PGPORT -U $SERVICE_PGUSER -c "\conninfo" kubectl patch secret/{{ .Chart.Name }}-dbcreds -p '{"data":{"dbcreated":"dHJ1ZQo="}}' fi {{- end}} diff --git a/helm/fence/templates/fence-delete-expired-clients-cron.yaml b/helm/fence/templates/fence-delete-expired-clients-cron.yaml index de3c214df..dc7aaef17 100644 --- a/helm/fence/templates/fence-delete-expired-clients-cron.yaml +++ b/helm/fence/templates/fence-delete-expired-clients-cron.yaml @@ -18,13 +18,10 @@ spec: labels: app: gen3job spec: + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 12 }} volumes: - - name: yaml-merge - configMap: - name: "fence-yaml-merge" - - name: config-volume - secret: - secretName: "fence-config" + {{- toYaml .Values.volumes | nindent 10 }} containers: - name: fence image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" @@ -44,19 +41,12 @@ spec: optional: true {{- toYaml .Values.env | nindent 16 }} volumeMounts: - - name: "config-volume" - readOnly: true - mountPath: "/var/www/fence/fence-config-secret.yaml" - subPath: fence-config.yaml - - name: "yaml-merge" - readOnly: true - mountPath: "/var/www/fence/yaml_merge.py" - subPath: yaml_merge.py + {{- toYaml .Values.initVolumeMounts | nindent 12 }} command: ["/bin/bash"] args: - "-c" - | - python /var/www/fence/yaml_merge.py /var/www/fence/fence-config-public.yaml /var/www/fence/fence-config-secret.yaml /var/www/fence/fence-config.yaml + python /var/www/fence/yaml_merge.py /var/www/fence/fence-config-secret.yaml /var/www/fence/fence-config-public.yaml /var/www/fence/fence-config.yaml if [[ "$slackWebHook" =~ ^http ]]; then fence-create client-delete-expired --slack-webhook $slackWebHook --warning-days 7 else diff --git a/helm/metadata/README.md b/helm/metadata/README.md index 7da7f2737..ffd4b5d29 100644 --- a/helm/metadata/README.md +++ b/helm/metadata/README.md @@ -87,7 +87,7 @@ A Helm chart for gen3 Metadata Service | initResources | map | `{"requests":{"memory":"100Mi"}}` | Resource limits for the init container. | | initResources.requests | map | `{"memory":"100Mi"}` | The maximum amount of resources that the container is allowed to use | | initResources.requests.memory | string | `"100Mi"` | The maximum amount of memory the container can use | -| initVolumeMounts | list | `[{"mountPath":"/src/.env","name":"config-volume-g3auto","readOnly":true,"subPath":"metadata.env"},{"mountPath":"/mds/.env","name":"config-volume-g3auto","readOnly":true,"subPath":"metadata.env"}]` | Volumes to mount to the init container. | +| initVolumeMounts | list | `nil` | Volumes to mount to the init container. | | metricsEnabled | bool | `nil` | Whether Metrics are enabled. | | partOf | string | `"Discovery-Tab"` | Label to help organize pods and their use. Any value is valid, but use "_" or "-" to divide words. | | postgres | map | `{"database":null,"dbCreate":null,"dbRestore":false,"host":null,"password":null,"port":"5432","separate":false,"username":null}` | Postgres database configuration. If db does not exist in postgres cluster and dbCreate is set ot true then these databases will be created for you | @@ -120,4 +120,4 @@ A Helm chart for gen3 Metadata Service | strategy.rollingUpdate.maxSurge | int | `1` | Number of additional replicas to add during rollout. | | strategy.rollingUpdate.maxUnavailable | int | `0` | Maximum amount of pods that can be unavailable during the update. | | useAggMds | bool | `"False"` | Set to true to aggregate metadata from multiple other Metadata Service instances. | -| volumeMounts | list | `[{"mountPath":"/src/.env","name":"config-volume-g3auto","readOnly":true,"subPath":"metadata.env"},{"mountPath":"/mds/.env","name":"config-volume-g3auto","readOnly":true,"subPath":"metadata.env"},{"mountPath":"/aggregate_config.json","name":"config-volume","readOnly":true,"subPath":"aggregate_config.json"}]` | Volumes to mount to the container. | +| volumeMounts | list | `[{"mountPath":"/aggregate_config.json","name":"config-volume","readOnly":true,"subPath":"aggregate_config.json"}]` | Volumes to mount to the container. | diff --git a/helm/metadata/templates/deployment.yaml b/helm/metadata/templates/deployment.yaml index 8bc95ee1f..878e8b5e4 100644 --- a/helm/metadata/templates/deployment.yaml +++ b/helm/metadata/templates/deployment.yaml @@ -46,9 +46,9 @@ spec: securityContext: {{- toYaml .Values.podSecurityContext | nindent 8 }} volumes: - - name: config-volume-g3auto - secret: - secretName: metadata-g3auto + # - name: config-volume-g3auto + # secret: + # secretName: metadata-g3auto - name: config-volume configMap: name: agg-mds-config @@ -128,7 +128,6 @@ spec: - name: {{ .Values.initContainerName }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} - {{- with .Values.initVolumeMounts }} env: - name: DB_HOST valueFrom: @@ -160,6 +159,7 @@ spec: name: metadata-dbcreds key: dbcreated optional: false + {{- with .Values.initVolumeMounts }} volumeMounts: {{- toYaml . | nindent 10 }} {{- end }} diff --git a/helm/metadata/values.yaml b/helm/metadata/values.yaml index b00cbb33a..e7da6976d 100644 --- a/helm/metadata/values.yaml +++ b/helm/metadata/values.yaml @@ -254,15 +254,15 @@ aggMdsConfig: | # -- (list) Volumes to mount to the container. volumeMounts: - - name: config-volume-g3auto - readOnly: true - mountPath: /src/.env - subPath: metadata.env + # - name: config-volume-g3auto + # readOnly: true + # mountPath: /src/.env + # subPath: metadata.env # Added an additional volume mount for new images using the / directory, while retaining the 'src' mount for backward compatibility. - - name: config-volume-g3auto - readOnly: true - mountPath: /mds/.env - subPath: metadata.env + # - name: config-volume-g3auto + # readOnly: true + # mountPath: /mds/.env + # subPath: metadata.env - name: config-volume readOnly: true mountPath: /aggregate_config.json @@ -284,15 +284,15 @@ resources: initContainerName: metadata-db-migrate # -- (list) Volumes to mount to the init container. initVolumeMounts: - - name: config-volume-g3auto - readOnly: true - mountPath: /src/.env - subPath: metadata.env - # Added an additional volume mount for new images using the / directory, while retaining the 'src' mount for backward compatibility. - - name: config-volume-g3auto - readOnly: true - mountPath: /mds/.env - subPath: metadata.env + # - name: config-volume-g3auto + # readOnly: true + # mountPath: /src/.env + # subPath: metadata.env + # # Added an additional volume mount for new images using the / directory, while retaining the 'src' mount for backward compatibility. + # - name: config-volume-g3auto + # readOnly: true + # mountPath: /mds/.env + # subPath: metadata.env # -- (map) Resource limits for the init container. initResources: # -- (map) The maximum amount of resources that the container is allowed to use From 9df8f70594609a6da856432cf09f96f716d7b18c Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Fri, 17 Oct 2025 08:16:30 -0700 Subject: [PATCH 115/126] Parameterize nginx pid file and listen port in Helm chart Updated nginx.conf to use values from values.yaml for the pid file location and listen port. Added 'pidFile' to the nginx section in values.yaml to allow configuration of the pid file path. --- helm/revproxy/nginx/nginx.conf | 4 ++-- helm/revproxy/values.yaml | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/helm/revproxy/nginx/nginx.conf b/helm/revproxy/nginx/nginx.conf index 2115d2f7a..27bd7c9a3 100644 --- a/helm/revproxy/nginx/nginx.conf +++ b/helm/revproxy/nginx/nginx.conf @@ -1,6 +1,6 @@ user {{ .Values.nginx.user }}; worker_processes 4; -pid /var/run/nginx.pid; +pid {{ .Values.nginx.pidFile }}; load_module modules/ngx_http_js_module.so; load_module modules/ngx_http_perl_module.so; @@ -181,7 +181,7 @@ map $http_user_agent $loggable { } server { - listen 80; + listen {{ .Values.service.targetPort }}; # this is here for gearbox I believe location /login { diff --git a/helm/revproxy/values.yaml b/helm/revproxy/values.yaml index e485ec4ab..098b452b1 100644 --- a/helm/revproxy/values.yaml +++ b/helm/revproxy/values.yaml @@ -258,4 +258,5 @@ extraServices: nginx: user: nginx - resolver: kube-dns.kube-system.svc.cluster.local \ No newline at end of file + resolver: kube-dns.kube-system.svc.cluster.local + pidFile: /var/run/nginx.pid \ No newline at end of file From 5955b9b094bddcb43490c95ea8d1ce84f04d0f7a Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Fri, 17 Oct 2025 12:20:32 -0700 Subject: [PATCH 116/126] Remove duplicate securityContext in deployment spec Eliminates a redundant securityContext definition in the fence-deployment.yaml template, ensuring only a single securityContext is set for the pod spec. --- helm/fence/templates/fence-deployment.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/helm/fence/templates/fence-deployment.yaml b/helm/fence/templates/fence-deployment.yaml index d56b23de3..3eac9adb1 100644 --- a/helm/fence/templates/fence-deployment.yaml +++ b/helm/fence/templates/fence-deployment.yaml @@ -38,8 +38,6 @@ spec: {{- include "common.extraLabels" . | nindent 8 }} spec: enableServiceLinks: false - securityContext: - {{- toYaml .Values.podSecurityContext | nindent 8 }} serviceAccountName: {{ include "fence.serviceAccountName" . }} securityContext: {{- toYaml .Values.podSecurityContext | nindent 8 }} From dfbc3a602dbb71194aa2a774f56ba8791b4f6ebd Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Mon, 20 Oct 2025 17:29:01 -0700 Subject: [PATCH 117/126] Refactor HPA templates and update autoscaling values Replaced custom HPA logic in amanuensis, gearbox, and gearbox-middleware Helm charts with a shared 'common.hpa' template for consistency and maintainability. Updated autoscaling configuration in values.yaml files: fixed typo 'minAvialable' to 'minAvailable', reduced maxReplicas from 100 to 10, and switched CPU/memory autoscaling from utilization percentage to average value. --- helm/amanuensis/templates/hpa.yaml | 31 +--------------------- helm/amanuensis/values.yaml | 8 +++--- helm/gearbox-middleware/templates/hpa.yaml | 31 +--------------------- helm/gearbox-middleware/values.yaml | 8 +++--- helm/gearbox/templates/hpa.yaml | 31 +--------------------- helm/gearbox/values.yaml | 8 +++--- 6 files changed, 15 insertions(+), 102 deletions(-) diff --git a/helm/amanuensis/templates/hpa.yaml b/helm/amanuensis/templates/hpa.yaml index b12e046e8..c3dee2ad8 100644 --- a/helm/amanuensis/templates/hpa.yaml +++ b/helm/amanuensis/templates/hpa.yaml @@ -1,32 +1,3 @@ {{- if default .Values.global.autoscaling.enabled .Values.autoscaling.enabled }} -apiVersion: autoscaling/v2 -kind: HorizontalPodAutoscaler -metadata: - name: amanuensis-deployment - labels: - {{- include "amanuensis.labels" . | nindent 4 }} -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: amanuensis-deployment - minReplicas: {{ default .Values.global.autoscaling.minReplicas .Values.autoscaling.minReplicas }} - maxReplicas: {{ default .Values.global.autoscaling.maxReplicas .Values.autoscaling.maxReplicas }} - metrics: - {{- if default .Values.global.autoscaling.targetCPUUtilizationPercentage .Values.autoscaling.targetCPUUtilizationPercentage }} - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: {{ default .Values.global.autoscaling.targetCPUUtilizationPercentage .Values.autoscaling.targetCPUUtilizationPercentage}} - {{- end }} - {{- if default .Values.global.autoscaling.targetMemoryUtilizationPercentage .Values.autoscaling.targetMemoryUtilizationPercentage }} - - type: Resource - resource: - name: memory - target: - averageUtilization: {{ default .Values.global.autoscaling.targetMemoryUtilizationPercentage .Values.autoscaling.targetMemoryUtilizationPercentage }} - type: Utilization - {{- end }} +{{ include "common.hpa" . }} {{- end }} \ No newline at end of file diff --git a/helm/amanuensis/values.yaml b/helm/amanuensis/values.yaml index 89eb4efae..a5989192d 100644 --- a/helm/amanuensis/values.yaml +++ b/helm/amanuensis/values.yaml @@ -82,7 +82,7 @@ global: # -- (bool) If the service will be deployed with a Pod Disruption Budget. Note- you need to have more than 2 replicas for the pdb to be deployed. pdb: false # -- (int) The minimum amount of pods that are available at all times if the PDB is deployed. - minAvialable: 1 + minAvailable: 1 # -- (map) Kubernetes configuration crossplane: # -- (bool) Set to true if deploying to AWS and want to use crossplane for AWS resources. @@ -102,9 +102,9 @@ global: autoscaling: enabled: false minReplicas: 1 - maxReplicas: 100 - targetCPUUtilizationPercentage: 80 - targetMemoryUtilizationPercentage: 80 + maxReplicas: 10 + averageCPUValue: 500m + averageMemoryValue: 500Mi # -- (map) This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/ autoscaling: {} diff --git a/helm/gearbox-middleware/templates/hpa.yaml b/helm/gearbox-middleware/templates/hpa.yaml index 765725d72..c3dee2ad8 100644 --- a/helm/gearbox-middleware/templates/hpa.yaml +++ b/helm/gearbox-middleware/templates/hpa.yaml @@ -1,32 +1,3 @@ {{- if default .Values.global.autoscaling.enabled .Values.autoscaling.enabled }} -apiVersion: autoscaling/v2 -kind: HorizontalPodAutoscaler -metadata: - name: gearbox-middleware-deployment - labels: - {{- include "gearbox-middleware.labels" . | nindent 4 }} -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: gearbox-middleware-deployment - minReplicas: {{ default .Values.global.autoscaling.minReplicas .Values.autoscaling.minReplicas }} - maxReplicas: {{ default .Values.global.autoscaling.maxReplicas .Values.autoscaling.maxReplicas }} - metrics: - {{- if default .Values.global.autoscaling.targetCPUUtilizationPercentage .Values.autoscaling.targetCPUUtilizationPercentage }} - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: {{ default .Values.global.autoscaling.targetCPUUtilizationPercentage .Values.autoscaling.targetCPUUtilizationPercentage}} - {{- end }} - {{- if default .Values.global.autoscaling.targetMemoryUtilizationPercentage .Values.autoscaling.targetMemoryUtilizationPercentage }} - - type: Resource - resource: - name: memory - target: - averageUtilization: {{ default .Values.global.autoscaling.targetMemoryUtilizationPercentage .Values.autoscaling.targetMemoryUtilizationPercentage }} - type: Utilization - {{- end }} +{{ include "common.hpa" . }} {{- end }} \ No newline at end of file diff --git a/helm/gearbox-middleware/values.yaml b/helm/gearbox-middleware/values.yaml index 866028e95..9a42e0504 100644 --- a/helm/gearbox-middleware/values.yaml +++ b/helm/gearbox-middleware/values.yaml @@ -28,7 +28,7 @@ global: # -- (bool) If the service will be deployed with a Pod Disruption Budget. Note- you need to have more than 2 replicas for the pdb to be deployed. pdb: false # -- (int) The minimum amount of pods that are available at all times if the PDB is deployed. - minAvialable: 1 + minAvailable: 1 # -- (map) External Secrets settings. externalSecrets: # -- (bool) Will use ExternalSecret resources to pull secrets from Secrets Manager instead of creating them locally. Be cautious as this will override any gearbox-middleware secrets you have deployed. @@ -54,9 +54,9 @@ global: autoscaling: enabled: false minReplicas: 1 - maxReplicas: 100 - targetCPUUtilizationPercentage: 80 - targetMemoryUtilizationPercentage: 80 + maxReplicas: 10 + averageCPUValue: 500m + averageMemoryValue: 500Mi # -- (map) This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/ autoscaling: {} diff --git a/helm/gearbox/templates/hpa.yaml b/helm/gearbox/templates/hpa.yaml index 081704e0e..c3dee2ad8 100644 --- a/helm/gearbox/templates/hpa.yaml +++ b/helm/gearbox/templates/hpa.yaml @@ -1,32 +1,3 @@ {{- if default .Values.global.autoscaling.enabled .Values.autoscaling.enabled }} -apiVersion: autoscaling/v2 -kind: HorizontalPodAutoscaler -metadata: - name: gearbox-deployment - labels: - {{- include "gearbox.labels" . | nindent 4 }} -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: gearbox-deployment - minReplicas: {{ default .Values.global.autoscaling.minReplicas .Values.autoscaling.minReplicas }} - maxReplicas: {{ default .Values.global.autoscaling.maxReplicas .Values.autoscaling.maxReplicas }} - metrics: - {{- if default .Values.global.autoscaling.targetCPUUtilizationPercentage .Values.autoscaling.targetCPUUtilizationPercentage }} - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: {{ default .Values.global.autoscaling.targetCPUUtilizationPercentage .Values.autoscaling.targetCPUUtilizationPercentage}} - {{- end }} - {{- if default .Values.global.autoscaling.targetMemoryUtilizationPercentage .Values.autoscaling.targetMemoryUtilizationPercentage }} - - type: Resource - resource: - name: memory - target: - averageUtilization: {{ default .Values.global.autoscaling.targetMemoryUtilizationPercentage .Values.autoscaling.targetMemoryUtilizationPercentage }} - type: Utilization - {{- end }} +{{ include "common.hpa" . }} {{- end }} \ No newline at end of file diff --git a/helm/gearbox/values.yaml b/helm/gearbox/values.yaml index af7c8353a..4a9f31904 100644 --- a/helm/gearbox/values.yaml +++ b/helm/gearbox/values.yaml @@ -28,7 +28,7 @@ global: # -- (bool) If the service will be deployed with a Pod Disruption Budget. Note- you need to have more than 2 replicas for the pdb to be deployed. pdb: false # -- (int) The minimum amount of pods that are available at all times if the PDB is deployed. - minAvialable: 1 + minAvailable: 1 # -- (map) External Secrets settings. externalSecrets: # -- (bool) Will use ExternalSecret resources to pull secrets from Secrets Manager instead of creating them locally. Be cautious as this will override any gearbox secrets you have deployed. @@ -54,9 +54,9 @@ global: autoscaling: enabled: false minReplicas: 1 - maxReplicas: 100 - targetCPUUtilizationPercentage: 80 - targetMemoryUtilizationPercentage: 80 + maxReplicas: 10 + averageCPUValue: 500m + averageMemoryValue: 500Mi # -- (map) Controls network policy settings netPolicy: From 91f22cd89b8bd8db1c5ff9338ec1627ef80dabde Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Fri, 24 Oct 2025 16:44:58 -0700 Subject: [PATCH 118/126] Add DB credentials to environment variables Introduced environment variables for database connection (DB_DATABASE, DB_HOST, DB_USER, DB_PASSWORD, DBREADY) sourced from the gearbox-dbcreds secret in values.yaml. This enables the application to securely access database credentials via Kubernetes secrets. --- helm/gearbox-middleware/values.yaml | 30 +++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/helm/gearbox-middleware/values.yaml b/helm/gearbox-middleware/values.yaml index 9a42e0504..dd7b71e50 100644 --- a/helm/gearbox-middleware/values.yaml +++ b/helm/gearbox-middleware/values.yaml @@ -160,6 +160,36 @@ terminationGracePeriodSeconds: 50 env: - name: GEN3_DEBUG value: "False" + - name: DB_DATABASE + valueFrom: + secretKeyRef: + name: gearbox-dbcreds + key: database + optional: false + - name: DB_HOST + valueFrom: + secretKeyRef: + name: gearbox-dbcreds + key: host + optional: false + - name: DB_USER + valueFrom: + secretKeyRef: + name: gearbox-dbcreds + key: username + optional: false + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: gearbox-dbcreds + key: password + optional: false + - name: DBREADY + valueFrom: + secretKeyRef: + name: gearbox-dbcreds + key: dbcreated + optional: false # -- (list) Volumes to mount to the container. volumeMounts: From ac316d989356f7644a919ca7d18afa2270900b76 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Fri, 7 Nov 2025 14:23:51 -0600 Subject: [PATCH 119/126] update user yaml and fix admin logins --- gearbox-default-values.yaml | 269 +++--------------- .../templates/gearbox-middleware-creds.yaml | 2 +- helm/gearbox/templates/gearbox-creds.yaml | 3 +- 3 files changed, 42 insertions(+), 232 deletions(-) diff --git a/gearbox-default-values.yaml b/gearbox-default-values.yaml index ac3dd90ab..62792237e 100644 --- a/gearbox-default-values.yaml +++ b/gearbox-default-values.yaml @@ -4,55 +4,6 @@ global: portal_app: gitops dictionaryUrl: https://pcdc-gen3-dictionaries.s3.amazonaws.com/pcdc-schema-demo-amia.json authz_entity_name: "subject" - tls: - cert: | - -----BEGIN CERTIFICATE----- - MIIDDTCCAfWgAwIBAgIQcMmHCSPIuchREDNi1OpQ5DANBgkqhkiG9w0BAQsFADAP - MQ0wCwYDVQQDEwRnZW4zMB4XDTI0MDMyNTIyMDgwNFoXDTI1MDMyNTIyMDgwNFow - FDESMBAGA1UEAxMJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB - CgKCAQEAoTT4Y1GJ5aTwA8emKdCiPL9RCOIALYygX9lmpQ6loSE5rS4u6bgbztSg - 9EV5mvV+oNA1g473rgwyjNlJEIQJvEO323okDjXI47j09N5a9aGhRc0bhfGlwmt/ - gE6NZUuI/u9wjIh8nLysh4rOYrN2+Us9WRfBL8wjmRZWNFRygrEMB9lHzPefEQoU - p4BcOh4oLbu8//x3Of5fwDMitL5gQtSmZFnLMbK0HD7CJWNz9DQA1n9e8rRg5KWA - N7Tu6MIaY/H/DVCL3LfswIBSF6eQ73umGHf1zhS7PLSHPxYxkEeg35KKriN+7492 - 38Ra3bi0CzTey0It+EK+zjtt1fd7XQIDAQABo2AwXjAOBgNVHQ8BAf8EBAMCBaAw - HQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHwYD - VR0jBBgwFoAUIK7MtOCIs/DygzZ1/vR3ieAwplAwDQYJKoZIhvcNAQELBQADggEB - AIWgFxpLfZ9LUc90qdiC2yHaLKyTN/LMdkUjw82ULVLYJ0ot0JJaJvO4iDFe/Ra9 - t13LUhcE+W4ChentUHvwYKO5zghf4UtiryM8/wqmcZ03xyswdVaKuk5Ov39/9sTJ - 6rfzMpf3mJZDO6JYC475TCQQ3hKAUUzOiFg41CMeqAy9vn0zgBk58IzZmruvdn43 - YH6N/ooqVTj3CnkmVkWoB4zBjDzX9DuxpYvqI3seD7qLtXK2cm2X+Pqv90UoPsB/ - XegALjODFpTbN5Scvbpb3npXEKbvR7X9+xy7BbVYD2K0FQ9+S1UTU8Rz7Dh9SDHM - Ixy5W9o6gVFhB5mxceOxKNc= - -----END CERTIFICATE----- - key: | - -----BEGIN RSA PRIVATE KEY----- - MIIEogIBAAKCAQEAoTT4Y1GJ5aTwA8emKdCiPL9RCOIALYygX9lmpQ6loSE5rS4u - 6bgbztSg9EV5mvV+oNA1g473rgwyjNlJEIQJvEO323okDjXI47j09N5a9aGhRc0b - hfGlwmt/gE6NZUuI/u9wjIh8nLysh4rOYrN2+Us9WRfBL8wjmRZWNFRygrEMB9lH - zPefEQoUp4BcOh4oLbu8//x3Of5fwDMitL5gQtSmZFnLMbK0HD7CJWNz9DQA1n9e - 8rRg5KWAN7Tu6MIaY/H/DVCL3LfswIBSF6eQ73umGHf1zhS7PLSHPxYxkEeg35KK - riN+749238Ra3bi0CzTey0It+EK+zjtt1fd7XQIDAQABAoIBAG+AhfWcZncSuHjE - 1SfDCRRfeBtexqS6ygrCAn9UPDfRFWW1tfkuwP00kQekLlKCT03H9dvDPYmDIkvN - 1v23dkxjwn3qY5n4lbT9L2FXky6m1zfCgLEKzY5GcHA85QwVTPKYhw6NMTPwRJ2T - 4uDeJQKVih9fkN4Qoua2TnXvmyzNU49ffgFMJ0Ec7vNmS7MCUtlGec1Y0xKgTflt - yqhChpG2MBxdX8tLNgSC+lgRBZSzRaP/0oGZuV3FQ7W4fuXLNN8CdhSzHbVlbK+D - CO1f6nqZ8PZKJ/7SGwB2Q05EqscNAF3tl1dsGpnLqOLpnqJ2+f/H4W6/uB2tAILd - ySaC53kCgYEAwOHrNulo7HLgSXEs4/Ta9nLPoZxpDxkNKCRENDJCsoakQ2O33aL4 - mrHdSmxGqecpoCvkifx/ZCN073ZykVIoSY70+N7f79EytGjkAzDxEAnYMKSU7SSJ - TGA+c8Juqh6uvbMuJy/ZiQE6DZsweqhxopov7xSd89RIvNaBZdXq3QcCgYEA1fWJ - VHCEeQGl6eMtSWOfiADUnmOG70xlvmpzlD18pTTCIF7V1rFaAXjJl0ldI3mASJy/ - usiHZq54bUWcvof8DjI7YJ0OS8e7pmUZK9+O9fGTLIf8TIz6qq0PfERk+SyWGdAo - Z8HQMJBKWX809KPkJ9isd62wfREHVazfljxdL3sCgYBwxKTsWdKKSy9uQMjqDcHm - zIEwD24s8YyLp4hoq+nqzmVDMQ3SevG2H78tP9ighRIFHyRiuEkSlthLGIkrBUmg - mAAJcOSkJT7r01dbtkV6BwdqiQ65Bt9u0+Yvb8GbnIy1RAj7yDH6s8jpI45YaBrn - 4hWcRgWDBN3x6ceFbmf+CQKBgA5vwNJnvSiFCfLcF0Qqaqs8kxwUzxf6aasNd7r6 - 4xwqkSap/3e7A72xrrh8hMJOAm/j07QAr9In14xX9BmPB1zV2tfMARjv4yN5Ete4 - /+ZsZGfOzSFFKey2PKM/4ihF7+LR/sfxdeCw+7NKOAKBxHVD029H0u69ZWdMgNGc - RRVdAoGAFH7huA61ylOGh/W6IMU0wvJqg9SeT53JoZTr++0MS+0sdYoRGrq4RzSZ - bXKuvqZaSrXMRB9JZ72FfpjwZhDPZtNOXJV00K4yjZIui6h+TPsDk4lnxVSPYMpP - My/zrtJTCPM+Gqa6mhYTz4fyITv7igxqyECakrCa/Ct0SVDZbSI= - -----END RSA PRIVATE KEY----- arborist: #local Dev Only @@ -209,226 +160,76 @@ fence: authz: resources: - - name: 'gearbox_gateway' - - name: 'data_file' - description: 'data files, stored in S3' - - name: 'sower' - description: 'sower resource' - - name: workspace - description: jupyter notebooks - - name: analysis - description: analysis tool service - name: portal description: data portal service - - name: privacy - description: User privacy policy - name: 'services' subresources: - - name: 'sheepdog' + - name: 'gearbox' subresources: - - name: 'submission' - subresources: - - name: 'program' - - name: 'project' - - name: 'amanuensis' + - name: 'data-admin' + - name: 'data-manager' - name: 'fence' subresources: - name: 'admin' - - name: programs - subresources: - - name: pcdc policies: - - id: gearbox_admin - resource_paths: ['/gearbox_gateway'] - role_ids: ['gearbox_user'] - - id: 'data_upload' - description: 'upload raw data files to S3' + - id: base_access + resource_paths: ['/portal'] + role_ids: ['base_access'] + - id: 'services.gearbox-data-manager' + description: 'admin access to most gearbox data endpoints' resource_paths: - - /data_file + - /services/gearbox/data-manager role_ids: - - file_uploader - - id: 'services.amanuensis-admin' - description: 'admin access to amanuensis' + - 'data_manager' + - id: 'services.gearbox-data-admin' + description: 'admin access to gearbox data' role_ids: - - 'amanuensis_admin' + - 'data_admin' resource_paths: - - '/services/amanuensis' + - '/services/gearbox/data-admin' - id: 'services.fence-admin' description: 'admin access to fence' role_ids: - 'fence_admin' resource_paths: - '/services/fence/admin' - - id: workspace - description: be able to use workspace - resource_paths: - - /workspace - role_ids: - - workspace_user - - id: analysis - description: be able to use analysis tool service - resource_paths: - - /analysis - role_ids: - - analysis_user - - id: privacy_policy - description: User agreed on the privacy policy - resource_paths: - - /privacy - role_ids: - - reader - - id: indexd_admin - description: full access to indexd API - role_ids: - - indexd_admin - resource_paths: - - /programs - - description: be able to use sower job - id: sower - resource_paths: [/sower] - role_ids: [sower_user] - - id: 'services.sheepdog-admin' - description: 'CRUD access to programs and projects' - role_ids: - - 'sheepdog_admin' - resource_paths: - - '/services/sheepdog/submission/program' - - '/services/sheepdog/submission/project' - - id: all_programs_reader - role_ids: - - reader - - storage_reader - resource_paths: - - /programs - - id: login_no_access - role_ids: - - reader - resource_paths: - - /portal - - id: 'data_admin' - description: 'policy test, should write a policy per resource and assign to user in order to avoid duplicating policies' - role_ids: - - admin - resource_paths: - - /programs - - /programs/pcdc - roles: - - id: 'gearbox_user' + - id: fence_admin permissions: - - id: 'gearbox_access' + - id: fence_admin_permission action: - service: '*' - method: '*' - - id: 'file_uploader' - description: 'can upload data files' - permissions: - - id: 'file_upload' - action: - service: 'fence' - method: 'file_upload' - - id: 'amanuensis_admin' - description: 'can do admin work on project/data request' - permissions: - - id: 'amanuensis_admin_action' - action: - service: 'amanuensis' - method: '*' - - id: 'fence_admin' - description: 'can use the admin endpoint in Fence' - permissions: - - id: 'fence_admin_permission' - action: - service: 'fence' - method: '*' - - id: workspace_user - permissions: - - action: {method: access, service: jupyterhub} - id: workspace_access - - id: sower_user - permissions: - - action: {method: access, service: job} - id: sower_access - - id: analysis_user - permissions: - - action: {method: access, service: analysis} - id: analysis_access - # Sheepdog admin role - - id: 'sheepdog_admin' - description: 'sheepdog admin role for program project crud' - permissions: - - id: 'sheepdog_admin_action' - action: - service: 'sheepdog' + service: 'fence' method: '*' - - id: indexd_admin - description: full access to indexd API - permissions: - - id: indexd_admin - action: - service: indexd - method: '*' - - id: admin + - id: base_access permissions: - - id: admin + - id: gearbox_access action: service: '*' - method: '*' - - id: creator + method: 'read' + - id: data_manager permissions: - - id: creator + - id: gearbox_data_manager action: - service: '*' - method: create - - id: reader - permissions: - - id: reader - action: - service: '*' - method: read - - id: updater - permissions: - - id: updater - action: - service: '*' - method: update - - id: deleter - permissions: - - id: deleter - action: - service: '*' - method: delete - - id: storage_writer - permissions: - - id: storage_creator - action: - service: '*' - method: write-storage - - id: storage_reader + service: 'gearbox' + method: '*' + - id: data_admin permissions: - - id: storage_reader + - id: gearbox_data_admin action: service: '*' - method: read-storage - + method: '*' users: ### BEGIN INTERNS SECTION ### ### END INTERNS SECTION ### pmurdoch@uchicago.edu: admin: true policies: - - gearbox_admin - - data_upload - - workspace - - services.sheepdog-admin - - services.amanuensis-admin - - data_admin - - analysis - - privacy_policy - - login_no_access - - sower + - base_access + - services.gearbox-data-manager + - services.fence-admin + - services.gearbox-data-admin portal: enabled: true @@ -462,6 +263,10 @@ gearbox: repository: quay.io/pcdc/gearbox_be tag: "pcdc_dev_2025-08-26T14_03_39-05_00" pullPolicy: Always + # image: + # repository: gearbox-be + # tag: "test" + # pullPolicy: Never podSecurityContext: # runAsNonRoot: true # runAsUser: 1000 @@ -486,6 +291,10 @@ gearbox-middleware: repository: quay.io/pcdc/gearbox-middleware tag: "pcdc_dev_Fri__15_Aug_2025_17_57_50_GMT" pullPolicy: Always + # image: + # repository: gearbox-middleware + # tag: "test" + # pullPolicy: Never podSecurityContext: # runAsNonRoot: true # runAsUser: 1000 diff --git a/helm/gearbox-middleware/templates/gearbox-middleware-creds.yaml b/helm/gearbox-middleware/templates/gearbox-middleware-creds.yaml index 9c07b41e1..d10b5f2b6 100644 --- a/helm/gearbox-middleware/templates/gearbox-middleware-creds.yaml +++ b/helm/gearbox-middleware/templates/gearbox-middleware-creds.yaml @@ -7,7 +7,7 @@ type: Opaque stringData: {{- $existingSecret := lookup "v1" "Secret" .Release.Namespace "gearbox-middleware-g3auto" }} {{- $randomPass := printf "%s%s" "gateway:" (randAlphaNum 32) }} - base64Authz.txt: {{ if and $existingSecret (index $existingSecret.data "base64Authz.txt") }}{{ index $existingSecret.data "base64Authz.txt" | b64dec | quote }}{{ else }}{{ $randomPass | quote | b64enc }}{{ end }} + base64Authz.txt: {{ if and $existingSecret (index $existingSecret.data "base64Authz.txt") }}{{ (index $existingSecret.data "base64Authz.txt") | b64dec | quote }}{{ else }}{{ $randomPass | quote }}{{ end }} gearbox-middleware.env: | HOSTNAME={{ .Values.global.hostname }} {{ if and .Values.gearboxMiddlewareG3auto.awsaccesskey .Values.gearboxMiddlewareG3auto.awssecretkey }} diff --git a/helm/gearbox/templates/gearbox-creds.yaml b/helm/gearbox/templates/gearbox-creds.yaml index 229d159c5..7496d70f1 100644 --- a/helm/gearbox/templates/gearbox-creds.yaml +++ b/helm/gearbox/templates/gearbox-creds.yaml @@ -7,7 +7,8 @@ type: Opaque stringData: {{- $existingSecret := lookup "v1" "Secret" .Release.Namespace "gearbox-g3auto" }} {{- $randomPass := printf "%s%s" "gateway:" (randAlphaNum 32) }} - base64Authz.txt: {{ if and $existingSecret (index $existingSecret.data "base64Authz.txt") }}{{ index $existingSecret.data "base64Authz.txt" | b64dec | quote }}{{ else }}{{ $randomPass | quote | b64enc }}{{ end }} + # existing secret data are base64; decode them before placing in stringData + base64Authz.txt: {{ if and $existingSecret (index $existingSecret.data "base64Authz.txt") }}{{ (index $existingSecret.data "base64Authz.txt") | b64dec | quote }}{{ else }}{{ $randomPass | quote }}{{ end }} gearbox.env: | HOSTNAME={{ .Values.global.hostname }} {{ if and .Values.gearboxG3auto.awsaccesskey .Values.gearboxG3auto.awssecretkey }} From 4cfe61c6a045a5019e08748480a8b9ada9472891 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Mon, 1 Dec 2025 10:29:56 -0800 Subject: [PATCH 120/126] changes for openshift --- dev-gearbox-default-values-openshift.yaml | 731 ++++++++++++++++++ gearbox-default-values.yaml | 379 ++++++--- .../arbrorsit-expired-access-cronjob.yaml | 2 + helm/arborist/templates/deployment.yaml | 2 +- helm/common/templates/_db_setup_job.tpl | 8 +- helm/common/templates/_jwt_key_pairs.tpl | 2 + .../templates/openshift-override-files.yaml | 44 ++ .../templates/deployment.yaml | 10 +- .../gearbox-middleware/templates/service.yaml | 2 +- helm/gearbox-middleware/values.yaml | 15 + helm/gearbox/templates/deployment.yaml | 10 +- helm/gearbox/templates/service.yaml | 2 +- helm/gearbox/values.yaml | 16 + .../templates/cleanup-helm-hooks-job.yaml | 9 +- helm/portal/templates/deployment.yaml | 20 +- pcdc-default-values.yaml | 5 + 16 files changed, 1128 insertions(+), 129 deletions(-) create mode 100644 dev-gearbox-default-values-openshift.yaml create mode 100644 helm/fence/templates/openshift-override-files.yaml diff --git a/dev-gearbox-default-values-openshift.yaml b/dev-gearbox-default-values-openshift.yaml new file mode 100644 index 000000000..9d1aef98d --- /dev/null +++ b/dev-gearbox-default-values-openshift.yaml @@ -0,0 +1,731 @@ +# Global configuration +global: + portal_app: gitops + authz_entity_name: "subject" + + # -- (map) Pod security context settings + compatibility: + # -- (map) OpenShift Settings + openshift: + # -- (bool) Set to force if deploying to OpenShift + adaptSecurityContext: force + + +arborist: + enabled: false + # -- (map) Resource requests and limits for the containers in the pod + resources: + # -- (map) The amount of resources that the container requests + requests: + # -- (string) The amount of memory requested + memory: 12Mi + # -- (string) The amount of CPU requested (e.g., 100m = 0.1 CPU) + cpu: 100m + # -- (map) The maximum amount of resources that the container is allowed to use + limits: + # -- (string) The maximum amount of memory the container can use + memory: 512Mi + # -- (string) The maximum amount of CPU the container can use + cpu: 500m + #local Dev Only + # postgres: + # password: "arborist_thisisaweakpassword" + image: + # repository: quay.io/pcdc/arborist + # tag: 2025.09 + repository: "973342646972.dkr.ecr.us-east-1.amazonaws.com/openshift-pcdc/arborist" + tag: "latest" + pullPolicy: Always + service: + # -- (string) Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". + type: ClusterIP + # -- (int) The port number that the service exposes. + port: 80 + targetPort: 8080 + podSecurityContext: + runAsNonRoot: true + runAsUser: 1000970000 + runAsGroup: 1000970000 + fsGroup: 1000970000 + +fence: + #local Dev Only + # postgres: + # password: "fence_thisisaweakpassword" + FENCE_CONFIG: + DEBUG: true + MOCK_STORAGE: true + #fill in + AMANUENSIS_PUBLIC_KEY_PATH: "/fence/keys/key/jwt_public_key.pem" + MOCK_GOOGLE_AUTH: true + mock_default_user: "test@example.com" + ENABLE_VISA_UPDATE_CRON: false + ENABLE_DELETE_EXPIRED_CLIENTS: false + ENABLE_FENCE_DEPLOYMENT: true + ENABLE_PRESIGNED_URL_FENCE: false + USER_YAML_ENABLED: false + volumes: + - name: old-config-volume + secret: + secretName: "fence-secret" + - name: json-secret-volume + secret: + secretName: "fence-json-secret" + optional: true + - name: creds-volume + secret: + secretName: "fence-creds" + - name: config-helper + configMap: + name: config-helper + optional: true + - name: logo-volume + configMap: + name: "logo-config" + - name: config-volume + secret: + secretName: "fence-config" + - name: fence-google-app-creds-secret-volume + secret: + secretName: "fence-google-app-creds-secret" + - name: fence-google-storage-creds-secret-volume + secret: + secretName: "fence-google-storage-creds-secret" + - name: fence-jwt-keys + secret: + secretName: "fence-jwt-keys" + - name: privacy-policy + configMap: + name: "privacy-policy" + - name: yaml-merge + configMap: + name: "fence-yaml-merge" + optional: true + - name: config-volume-public + configMap: + name: "manifest-fence" + optional: true + - name: nginx-config + configMap: + name: nginx-config + items: + - key: nginx.conf + path: nginx.conf + + volumeMounts: + - name: "old-config-volume" + readOnly: true + mountPath: "/var/www/fence/local_settings.py" + subPath: local_settings.py + - name: "json-secret-volume" + readOnly: true + mountPath: "/var/www/fence/fence_credentials.json" + subPath: fence_credentials.json + - name: "creds-volume" + readOnly: true + mountPath: "/var/www/fence/creds.json" + subPath: creds.json + - name: "config-helper" + readOnly: true + mountPath: "/var/www/fence/config_helper.py" + subPath: config_helper.py + - name: "logo-volume" + readOnly: true + mountPath: "/fence/fence/static/img/logo.svg" + subPath: "logo.svg" + - name: "privacy-policy" + readOnly: true + mountPath: "/fence/fence/static/privacy_policy.md" + subPath: "privacy_policy.md" + - name: "config-volume" + readOnly: true + mountPath: "/var/www/fence/fence-config.yaml" + subPath: fence-config.yaml + - name: "yaml-merge" + readOnly: true + mountPath: "/var/www/fence/yaml_merge.py" + subPath: yaml_merge.py + - name: "fence-google-app-creds-secret-volume" + readOnly: true + mountPath: "/var/www/fence/fence_google_app_creds_secret.json" + subPath: fence_google_app_creds_secret.json + - name: "fence-google-storage-creds-secret-volume" + readOnly: true + mountPath: "/var/www/fence/fence_google_storage_creds_secret.json" + subPath: fence_google_storage_creds_secret.json + - name: "fence-jwt-keys" + readOnly: true + mountPath: "/fence/keys/key/jwt_private_key.pem" + subPath: "jwt_private_key.pem" + - name: "fence-jwt-keys" + readOnly: true + mountPath: "/fence/keys/key/jwt_public_key.pem" + subPath: "jwt_public_key.pem" + - name: "config-volume-public" + readOnly: true + mountPath: "/var/www/fence/fence-config-public.yaml" + subPath: fence-config-public.yaml + - name: nginx-config + mountPath: /etc/nginx/nginx.conf + subPath: nginx.conf + readOnly: true + + # -- (list) Volumes to mount to the init container. + + initVolumeMounts: + - name: "config-volume" + readOnly: true + mountPath: "/var/www/fence/fence-config.yaml" + subPath: fence-config.yaml + - name: "config-volume-public" + readOnly: true + mountPath: "/var/www/fence/fence-config-public.yaml" + subPath: fence-config-public.yaml + - name: "yaml-merge" + readOnly: true + mountPath: "/var/www/fence/yaml_merge.py" + subPath: yaml_merge.py + - name: "fence-google-app-creds-secret-volume" + readOnly: true + mountPath: "/var/www/fence/fence_google_app_creds_secret.json" + subPath: fence_google_app_creds_secret.json + - name: "fence-google-storage-creds-secret-volume" + readOnly: true + mountPath: "/var/www/fence/fence_google_storage_creds_secret.json" + subPath: fence_google_storage_creds_secret.json + + service: + # -- (string) Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". + type: ClusterIP + # -- (int) The port number that the service exposes. + port: 80 + targetPort: 8080 + podSecurityContext: + runAsNonRoot: true + # runAsUser: 1000970000 + # runAsGroup: 1000970000 + # fsGroup: 1000970000 + supplementalGroups: + - 1000 + # -- (map) Resource requests and limits for the containers in the pod + resources: + # -- (map) The amount of resources that the container requests + requests: + # -- (string) The amount of memory requested + memory: 12Mi + # -- (string) The amount of CPU requested (e.g., 100m = 0.1 CPU) + cpu: 100m + # -- (map) The maximum amount of resources that the container is allowed to use + limits: + # -- (string) The maximum amount of memory the container can use + memory: 512Mi + # -- (string) The maximum amount of CPU the container can use + cpu: 500m + image: + # repository: quay.io/pcdc/fence + # tag: "3.6.0" + # pullPolicy: IfNotPresent + repository: "973342646972.dkr.ecr.us-east-1.amazonaws.com/openshift-pcdc/fence" + tag: "latest" + pullPolicy: Always + USER_YAML: | + authz: + + resources: + - name: 'gearbox_gateway' + - name: 'data_file' + description: 'data files, stored in S3' + - name: 'sower' + description: 'sower resource' + - name: workspace + description: jupyter notebooks + - name: analysis + description: analysis tool service + - name: portal + description: data portal service + - name: privacy + description: User privacy policy + - name: 'services' + subresources: + - name: 'sheepdog' + subresources: + - name: 'submission' + subresources: + - name: 'program' + - name: 'project' + - name: 'amanuensis' + - name: 'fence' + subresources: + - name: 'admin' + - name: programs + subresources: + - name: pcdc + + policies: + - id: gearbox_admin + resource_paths: ['/gearbox_gateway'] + role_ids: ['gearbox_user'] + - id: 'data_upload' + description: 'upload raw data files to S3' + resource_paths: + - /data_file + role_ids: + - file_uploader + - id: 'services.amanuensis-admin' + description: 'admin access to amanuensis' + role_ids: + - 'amanuensis_admin' + resource_paths: + - '/services/amanuensis' + - id: 'services.fence-admin' + description: 'admin access to fence' + role_ids: + - 'fence_admin' + resource_paths: + - '/services/fence/admin' + - id: workspace + description: be able to use workspace + resource_paths: + - /workspace + role_ids: + - workspace_user + - id: analysis + description: be able to use analysis tool service + resource_paths: + - /analysis + role_ids: + - analysis_user + - id: privacy_policy + description: User agreed on the privacy policy + resource_paths: + - /privacy + role_ids: + - reader + - id: indexd_admin + description: full access to indexd API + role_ids: + - indexd_admin + resource_paths: + - /programs + - description: be able to use sower job + id: sower + resource_paths: [/sower] + role_ids: [sower_user] + - id: 'services.sheepdog-admin' + description: 'CRUD access to programs and projects' + role_ids: + - 'sheepdog_admin' + resource_paths: + - '/services/sheepdog/submission/program' + - '/services/sheepdog/submission/project' + - id: all_programs_reader + role_ids: + - reader + - storage_reader + resource_paths: + - /programs + - id: login_no_access + role_ids: + - reader + resource_paths: + - /portal + - id: 'data_admin' + description: 'policy test, should write a policy per resource and assign to user in order to avoid duplicating policies' + role_ids: + - admin + resource_paths: + - /programs + - /programs/pcdc + + + roles: + - id: 'gearbox_user' + permissions: + - id: 'gearbox_access' + action: + service: '*' + method: '*' + - id: 'file_uploader' + description: 'can upload data files' + permissions: + - id: 'file_upload' + action: + service: 'fence' + method: 'file_upload' + - id: 'amanuensis_admin' + description: 'can do admin work on project/data request' + permissions: + - id: 'amanuensis_admin_action' + action: + service: 'amanuensis' + method: '*' + - id: 'fence_admin' + description: 'can use the admin endpoint in Fence' + permissions: + - id: 'fence_admin_permission' + action: + service: 'fence' + method: '*' + - id: workspace_user + permissions: + - action: {method: access, service: jupyterhub} + id: workspace_access + - id: sower_user + permissions: + - action: {method: access, service: job} + id: sower_access + - id: analysis_user + permissions: + - action: {method: access, service: analysis} + id: analysis_access + # Sheepdog admin role + - id: 'sheepdog_admin' + description: 'sheepdog admin role for program project crud' + permissions: + - id: 'sheepdog_admin_action' + action: + service: 'sheepdog' + method: '*' + - id: indexd_admin + description: full access to indexd API + permissions: + - id: indexd_admin + action: + service: indexd + method: '*' + - id: admin + permissions: + - id: admin + action: + service: '*' + method: '*' + - id: creator + permissions: + - id: creator + action: + service: '*' + method: create + - id: reader + permissions: + - id: reader + action: + service: '*' + method: read + - id: updater + permissions: + - id: updater + action: + service: '*' + method: update + - id: deleter + permissions: + - id: deleter + action: + service: '*' + method: delete + - id: storage_writer + permissions: + - id: storage_creator + action: + service: '*' + method: write-storage + - id: storage_reader + permissions: + - id: storage_reader + action: + service: '*' + method: read-storage + + users: + ### BEGIN INTERNS SECTION ### + ### END INTERNS SECTION ### + pmurdoch@uchicago.edu: + admin: true + policies: + - gearbox_admin + - data_upload + - workspace + - services.sheepdog-admin + - services.amanuensis-admin + - data_admin + - analysis + - privacy_policy + - login_no_access + - sower + +portal: + enabled: false + image: + repository: "973342646972.dkr.ecr.us-east-1.amazonaws.com/openshift-pcdc/gearbox-fe" + tag: "latest" + pullPolicy: Always + # -- (map) Resource requests and limits for the containers in the pod + resources: + # -- (map) The amount of resources that the container requests + requests: + cpu: 1.0 + memory: 1024Mi + limits: + cpu: 2.0 + memory: 4096Mi + # -- (map) The maximum amount of resources that the container is allowed to use + service: + # -- (string) Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". + type: ClusterIP + # -- (int) The port number that the service exposes. + port: 80 + targetPort: 8080 + gitops: + json: | + { + "s3_bucket": "https://test-compose-gearbox-data-bucket-with-versioning.s3.amazonaws.com" + } + podSecurityContext: + runAsNonRoot: true + runAsUser: 1000970000 + runAsGroup: 1000970000 + fsGroup: 1000970000 + gearboxS3Bucket: "https://test-compose-gearbox-data-bucket-with-versioning.s3.amazonaws.com" + +revproxy: + enabled: true + nginx: + user: "nginx" + resolver: "dns-default.openshift-dns.svc.cluster.local" + service: + # -- (string) Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". + type: ClusterIP + # -- (int) The port number that the service exposes. + port: 80 + targetPort: 8080 + image: + repository: quay.io/cdis/nginx + tag: 2025.09 + # -- (map) Resource requests and limits for the containers in the pod + resources: + # -- (map) The amount of resources that the container requests + requests: + # -- (string) The amount of CPU requested (e.g., 100m = 0.1 CPU) + cpu: 100m + # -- (string) The amount of memory requested + memory: 12Mi + # -- (map) The maximum amount of resources that the container is allowed to use + limits: + # -- (string) The maximum amount of memory the container can use + memory: 512Mi + # -- (string) The maximum amount of CPU the container can use + cpu: 500m + +gearbox: + enabled: false + image: + # repository: quay.io/pcdc/gearbox_be + # tag: "pcdc_dev_2025-08-26T14_03_39-05_00" + # pullPolicy: Always + repository: "973342646972.dkr.ecr.us-east-1.amazonaws.com/openshift-pcdc/gearbox-matching" + tag: "latest" + pullPolicy: Always + service: + # -- (string) Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". + type: ClusterIP + # -- (int) The port number that the service exposes. + port: 80 + targetPort: 8080 + podSecurityContext: + runAsNonRoot: true + runAsUser: 1000970000 + runAsGroup: 1000970000 + fsGroup: 1000970000 + volumes: + - name: config-volume + secret: + secretName: "gearbox-g3auto" + - name: gearbox-middleware-jwt-keys + secret: + secretName: "gearbox-middleware-jwt-keys" + items: + - key: jwt_public_key.pem + path: jwt_public_key.pem + optional: false + - name: nginx-config + configMap: + name: nginx-config + items: + - key: nginx.conf + path: nginx.conf + volumeMounts: + - name: "gearbox-middleware-jwt-keys" + readOnly: true + mountPath: "/gearbox/src/gearbox/keys/jwt_public_key.pem" + subPath: jwt_public_key.pem + - name: config-volume + readOnly: true + mountPath: /gearbox/.env + subPath: gearbox.env + - name: nginx-config + mountPath: /etc/nginx/nginx.conf + subPath: nginx.conf + readOnly: true + resources: + # -- (map) The amount of resources that the container requests + requests: + # -- (string) The amount of memory requested + memory: 12Mi + # -- (string) The amount of CPU requested (e.g., 100m = 0.1 CPU) + cpu: 100m + # -- (map) The maximum amount of resources that the container is allowed to use + limits: + # -- (string) The maximum amount of memory the container can use + memory: 512Mi + # -- (string) The maximum amount of CPU the container can use + cpu: 500m +gearbox-middleware: + enabled: false + gearboxMiddlewareG3auto: + testing: False + service: + # -- (string) Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". + type: ClusterIP + # -- (int) The port number that the service exposes. + port: 80 + targetPort: 8080 + image: + # repository: quay.io/pcdc/gearbox-middleware + # tag: "pcdc_dev_Fri__15_Aug_2025_17_57_50_GMT" + # pullPolicy: Always + repository: "973342646972.dkr.ecr.us-east-1.amazonaws.com/openshift-pcdc/gearbox-middleware" + tag: "latest" + pullPolicy: Always + # -- (list) Volumes to attach to the container. + volumes: + - name: config-volume + secret: + secretName: "gearbox-middleware-g3auto" + - name: gearbox-middleware-jwt-keys + secret: + secretName: "gearbox-middleware-jwt-keys" + items: + - key: jwt_private_key.pem + path: jwt_private_key.pem + optional: false + - name: nginx-config + configMap: + name: nginx-config + items: + - key: nginx.conf + path: nginx.conf + volumeMounts: + - name: "gearbox-middleware-jwt-keys" + readOnly: true + mountPath: "/gearbox-middleware/gearbox_middleware/keys/jwt_private_key.pem" + subPath: jwt_private_key.pem + - name: config-volume + readOnly: true + mountPath: /gearbox-middleware/.env + subPath: gearbox-middleware.env + - name: nginx-config + mountPath: /etc/nginx/nginx.conf + subPath: nginx.conf + readOnly: true + resources: + # -- (map) The amount of resources that the container requests + requests: + # -- (string) The amount of memory requested + memory: 12Mi + # -- (string) The amount of CPU requested (e.g., 100m = 0.1 CPU) + cpu: 100m + # -- (map) The maximum amount of resources that the container is allowed to use + limits: + # -- (string) The maximum amount of memory the container can use + memory: 512Mi + # -- (string) The maximum amount of CPU the container can use + cpu: 500m + podSecurityContext: + runAsNonRoot: true + runAsUser: 1000970000 + runAsGroup: 1000970000 + fsGroup: 1000970000 + +postgresql: + image: + repository: bitnamilegacy/postgresql + primary: + networkPolicy: + enabled: false + readReplicas: + networkPolicy: + enabled: false + #local Dev Only + # auth: + # postgresPassword: "thisisaterriblepassword" + # primary: + # persistence: + # enabled: true + # size: 1Gi +######################################################################################## +# DISABLED SERVICES # +######################################################################################## +elasticsearch: + enabled: false + +amanuensis: + enabled: false + +guppy: + enabled: false + +manifestservice: + enabled: false + +pcdcanalysistools: + enabled: false + +peregrine: + enabled: false + +sheepdog: + enabled: false + +sower: + enabled: false + +wts: + enabled: false + +ambassador: + # -- (bool) Whether to deploy the ambassador subchart. + enabled: false + +argo-wrapper: + # -- (bool) Whether to deploy the argo-wrapper subchart. + enabled: false + +audit: + # -- (bool) Whether to deploy the audit subchart. + enabled: false + +aws-es-proxy: + enabled: false + +metadata: + # -- (bool) Whether to deploy the metadata subchart. + enabled: false + +pidgin: + # -- (bool) Whether to deploy the pidgin subchart. + enabled: false + +indexd: + enabled: false + +hatchery: + enabled: false + +cohort-middleware: + enabled: false + +etl: + enabled: false + +gen3-network-policies: + enabled: false diff --git a/gearbox-default-values.yaml b/gearbox-default-values.yaml index ac3dd90ab..7fad7635e 100644 --- a/gearbox-default-values.yaml +++ b/gearbox-default-values.yaml @@ -1,78 +1,60 @@ +# Global configuration global: - dev: true - hostname: localhost + dev: false portal_app: gitops - dictionaryUrl: https://pcdc-gen3-dictionaries.s3.amazonaws.com/pcdc-schema-demo-amia.json authz_entity_name: "subject" - tls: - cert: | - -----BEGIN CERTIFICATE----- - MIIDDTCCAfWgAwIBAgIQcMmHCSPIuchREDNi1OpQ5DANBgkqhkiG9w0BAQsFADAP - MQ0wCwYDVQQDEwRnZW4zMB4XDTI0MDMyNTIyMDgwNFoXDTI1MDMyNTIyMDgwNFow - FDESMBAGA1UEAxMJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB - CgKCAQEAoTT4Y1GJ5aTwA8emKdCiPL9RCOIALYygX9lmpQ6loSE5rS4u6bgbztSg - 9EV5mvV+oNA1g473rgwyjNlJEIQJvEO323okDjXI47j09N5a9aGhRc0bhfGlwmt/ - gE6NZUuI/u9wjIh8nLysh4rOYrN2+Us9WRfBL8wjmRZWNFRygrEMB9lHzPefEQoU - p4BcOh4oLbu8//x3Of5fwDMitL5gQtSmZFnLMbK0HD7CJWNz9DQA1n9e8rRg5KWA - N7Tu6MIaY/H/DVCL3LfswIBSF6eQ73umGHf1zhS7PLSHPxYxkEeg35KKriN+7492 - 38Ra3bi0CzTey0It+EK+zjtt1fd7XQIDAQABo2AwXjAOBgNVHQ8BAf8EBAMCBaAw - HQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHwYD - VR0jBBgwFoAUIK7MtOCIs/DygzZ1/vR3ieAwplAwDQYJKoZIhvcNAQELBQADggEB - AIWgFxpLfZ9LUc90qdiC2yHaLKyTN/LMdkUjw82ULVLYJ0ot0JJaJvO4iDFe/Ra9 - t13LUhcE+W4ChentUHvwYKO5zghf4UtiryM8/wqmcZ03xyswdVaKuk5Ov39/9sTJ - 6rfzMpf3mJZDO6JYC475TCQQ3hKAUUzOiFg41CMeqAy9vn0zgBk58IzZmruvdn43 - YH6N/ooqVTj3CnkmVkWoB4zBjDzX9DuxpYvqI3seD7qLtXK2cm2X+Pqv90UoPsB/ - XegALjODFpTbN5Scvbpb3npXEKbvR7X9+xy7BbVYD2K0FQ9+S1UTU8Rz7Dh9SDHM - Ixy5W9o6gVFhB5mxceOxKNc= - -----END CERTIFICATE----- - key: | - -----BEGIN RSA PRIVATE KEY----- - MIIEogIBAAKCAQEAoTT4Y1GJ5aTwA8emKdCiPL9RCOIALYygX9lmpQ6loSE5rS4u - 6bgbztSg9EV5mvV+oNA1g473rgwyjNlJEIQJvEO323okDjXI47j09N5a9aGhRc0b - hfGlwmt/gE6NZUuI/u9wjIh8nLysh4rOYrN2+Us9WRfBL8wjmRZWNFRygrEMB9lH - zPefEQoUp4BcOh4oLbu8//x3Of5fwDMitL5gQtSmZFnLMbK0HD7CJWNz9DQA1n9e - 8rRg5KWAN7Tu6MIaY/H/DVCL3LfswIBSF6eQ73umGHf1zhS7PLSHPxYxkEeg35KK - riN+749238Ra3bi0CzTey0It+EK+zjtt1fd7XQIDAQABAoIBAG+AhfWcZncSuHjE - 1SfDCRRfeBtexqS6ygrCAn9UPDfRFWW1tfkuwP00kQekLlKCT03H9dvDPYmDIkvN - 1v23dkxjwn3qY5n4lbT9L2FXky6m1zfCgLEKzY5GcHA85QwVTPKYhw6NMTPwRJ2T - 4uDeJQKVih9fkN4Qoua2TnXvmyzNU49ffgFMJ0Ec7vNmS7MCUtlGec1Y0xKgTflt - yqhChpG2MBxdX8tLNgSC+lgRBZSzRaP/0oGZuV3FQ7W4fuXLNN8CdhSzHbVlbK+D - CO1f6nqZ8PZKJ/7SGwB2Q05EqscNAF3tl1dsGpnLqOLpnqJ2+f/H4W6/uB2tAILd - ySaC53kCgYEAwOHrNulo7HLgSXEs4/Ta9nLPoZxpDxkNKCRENDJCsoakQ2O33aL4 - mrHdSmxGqecpoCvkifx/ZCN073ZykVIoSY70+N7f79EytGjkAzDxEAnYMKSU7SSJ - TGA+c8Juqh6uvbMuJy/ZiQE6DZsweqhxopov7xSd89RIvNaBZdXq3QcCgYEA1fWJ - VHCEeQGl6eMtSWOfiADUnmOG70xlvmpzlD18pTTCIF7V1rFaAXjJl0ldI3mASJy/ - usiHZq54bUWcvof8DjI7YJ0OS8e7pmUZK9+O9fGTLIf8TIz6qq0PfERk+SyWGdAo - Z8HQMJBKWX809KPkJ9isd62wfREHVazfljxdL3sCgYBwxKTsWdKKSy9uQMjqDcHm - zIEwD24s8YyLp4hoq+nqzmVDMQ3SevG2H78tP9ighRIFHyRiuEkSlthLGIkrBUmg - mAAJcOSkJT7r01dbtkV6BwdqiQ65Bt9u0+Yvb8GbnIy1RAj7yDH6s8jpI45YaBrn - 4hWcRgWDBN3x6ceFbmf+CQKBgA5vwNJnvSiFCfLcF0Qqaqs8kxwUzxf6aasNd7r6 - 4xwqkSap/3e7A72xrrh8hMJOAm/j07QAr9In14xX9BmPB1zV2tfMARjv4yN5Ete4 - /+ZsZGfOzSFFKey2PKM/4ihF7+LR/sfxdeCw+7NKOAKBxHVD029H0u69ZWdMgNGc - RRVdAoGAFH7huA61ylOGh/W6IMU0wvJqg9SeT53JoZTr++0MS+0sdYoRGrq4RzSZ - bXKuvqZaSrXMRB9JZ72FfpjwZhDPZtNOXJV00K4yjZIui6h+TPsDk4lnxVSPYMpP - My/zrtJTCPM+Gqa6mhYTz4fyITv7igxqyECakrCa/Ct0SVDZbSI= - -----END RSA PRIVATE KEY----- - + hostname: "gearbox-ped-gearbox-p1.apps.bsd-openshift-prod.bsd.uchicago.edu" + # -- (bool) If the service will be deployed with a Pod Disruption Budget. Note- you need to have more than 2 replicas for the pdb to be deployed. + pdb: true + # -- (int) The minimum amount of pods that are available at all times if the PDB is deployed. + minAvialable: 2 + # -- (map) This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/ + autoscaling: + enabled: true + minReplicas: 2 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 80 + arborist: + enabled: false + # -- (map) Resource requests and limits for the containers in the pod + resources: + # -- (map) The amount of resources that the container requests + requests: + # -- (string) The amount of memory requested + memory: 12Mi + # -- (string) The amount of CPU requested (e.g., 100m = 0.1 CPU) + cpu: 100m + # -- (map) The maximum amount of resources that the container is allowed to use + limits: + # -- (string) The maximum amount of memory the container can use + memory: 512Mi + # -- (string) The maximum amount of CPU the container can use + cpu: 500m #local Dev Only - postgres: - password: "arborist_thisisaweakpassword" + # postgres: + # password: "arborist_thisisaweakpassword" image: - repository: quay.io/cdis/arborist - tag: 2025.07 + # repository: quay.io/pcdc/arborist + # tag: 2025.09 + repository: "973342646972.dkr.ecr.us-east-1.amazonaws.com/openshift-pcdc/arborist" + tag: "latest" + pullPolicy: Always + service: + # -- (string) Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". + type: ClusterIP + # -- (int) The port number that the service exposes. + port: 80 + targetPort: 8080 + podSecurityContext: + runAsNonRoot: true + runAsUser: 1000970000 + runAsGroup: 1000970000 + fsGroup: 1000970000 fence: - #local Dev Only - postgres: - password: "fence_thisisaweakpassword" - FENCE_CONFIG: - DEBUG: true - MOCK_STORAGE: true - #fill in - AMANUENSIS_PUBLIC_KEY_PATH: "/fence/keys/key/jwt_public_key.pem" - MOCK_GOOGLE_AUTH: true - mock_default_user: "test@example.com" + enabled: true volumes: - name: old-config-volume secret: @@ -114,6 +96,16 @@ fence: configMap: name: "manifest-fence" optional: true + - name: nginx-config + configMap: + name: nginx-config + items: + - key: nginx.conf + path: nginx.conf + - name: openshift-fence-override-files + configMap: + name: openshift-fence-override-files + defaultMode: 0755 volumeMounts: - name: "old-config-volume" @@ -160,14 +152,22 @@ fence: readOnly: true mountPath: "/fence/keys/key/jwt_private_key.pem" subPath: "jwt_private_key.pem" - # - name: "fence-jwt-keys" - # readOnly: true - # mountPath: "/fence/keys/key/jwt_public_key.pem" - # subPath: "jwt_public_key.pem" - name: "config-volume-public" readOnly: true mountPath: "/var/www/fence/fence-config-public.yaml" subPath: fence-config-public.yaml + - name: nginx-config + mountPath: /etc/nginx/nginx.conf + subPath: nginx.conf + readOnly: true + - name: openshift-fence-override-files + mountPath: /fence/dockerrun.bash + subPath: dockerrun.bash + readOnly: true + - name: openshift-fence-override-files + mountPath: /fence/deployment/wsgi/gunicorn.conf.py + subPath: gunicorn.conf.py + readOnly: true # -- (list) Volumes to mount to the init container. @@ -193,17 +193,36 @@ fence: mountPath: "/var/www/fence/fence_google_storage_creds_secret.json" subPath: fence_google_storage_creds_secret.json + service: + # -- (string) Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". + type: ClusterIP + # -- (int) The port number that the service exposes. + port: 80 + targetPort: 8080 + podSecurityContext: - # runAsNonRoot: true - # runAsUser: 1000 - # runAsGroup: 1000 - {} - # securityContext: - # allowPrivilegeEscalation: false - # runAsNonRoot: true + supplementalGroups: + - 1000 + # -- (map) Resource requests and limits for the containers in the pod + resources: + # -- (map) The amount of resources that the container requests + requests: + # -- (string) The amount of memory requested + memory: 12Mi + # -- (string) The amount of CPU requested (e.g., 100m = 0.1 CPU) + cpu: 100m + # -- (map) The maximum amount of resources that the container is allowed to use + limits: + # -- (string) The maximum amount of memory the container can use + memory: 512Mi + # -- (string) The maximum amount of CPU the container can use + cpu: 500m image: - repository: quay.io/pcdc/fence - tag: "helm-test" + # repository: quay.io/pcdc/fence + # tag: "3.6.0" + # pullPolicy: IfNotPresent + repository: "973342646972.dkr.ecr.us-east-1.amazonaws.com/openshift-pcdc/fence" + tag: "latest" pullPolicy: Always USER_YAML: | authz: @@ -431,41 +450,89 @@ fence: - sower portal: - enabled: true + enabled: false image: - repository: quay.io/pcdc/gearbox_fe - tag: "dev" + repository: "973342646972.dkr.ecr.us-east-1.amazonaws.com/openshift-pcdc/gearbox-fe" + tag: "latest" pullPolicy: Always + # -- (map) Resource requests and limits for the containers in the pod resources: + # -- (map) The amount of resources that the container requests requests: cpu: 1.0 + memory: 1024Mi + limits: + cpu: 2.0 + memory: 4096Mi + # -- (map) The maximum amount of resources that the container is allowed to use + service: + # -- (string) Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". + type: ClusterIP + # -- (int) The port number that the service exposes. + port: 80 + targetPort: 8080 gitops: json: | { "s3_bucket": "https://test-compose-gearbox-data-bucket-with-versioning.s3.amazonaws.com" } - + podSecurityContext: + runAsNonRoot: true + runAsUser: 1000970000 + runAsGroup: 1000970000 + fsGroup: 1000970000 gearboxS3Bucket: "https://test-compose-gearbox-data-bucket-with-versioning.s3.amazonaws.com" revproxy: enabled: true + # -- (int) Number of replicas for the deployment. + replicaCount: 2 + nginx: + user: "nginx" + resolver: "dns-default.openshift-dns.svc.cluster.local" + pidFile: "/tmp/nginx.pid" + service: + # -- (string) Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". + type: ClusterIP + targetPort: 8080 image: repository: quay.io/cdis/nginx - tag: 2025.08 + tag: 2025.09 + # -- (map) Resource requests and limits for the containers in the pod + resources: + # -- (map) The amount of resources that the container requests + requests: + # -- (string) The amount of CPU requested (e.g., 100m = 0.1 CPU) + cpu: 100m + # -- (string) The amount of memory requested + memory: 12Mi + # -- (map) The maximum amount of resources that the container is allowed to use + limits: + # -- (string) The maximum amount of memory the container can use + memory: 512Mi + # -- (string) The maximum amount of CPU the container can use + cpu: 500m gearbox: - #local Dev Only - postgres: - password: "gearbox_thisisaweakpassword" - enabled: true + enabled: false image: - repository: quay.io/pcdc/gearbox_be - tag: "pcdc_dev_2025-08-26T14_03_39-05_00" + # repository: quay.io/pcdc/gearbox_be + # tag: "pcdc_dev_2025-08-26T14_03_39-05_00" + # pullPolicy: Always + repository: "973342646972.dkr.ecr.us-east-1.amazonaws.com/openshift-pcdc/gearbox-matching" + tag: "latest" pullPolicy: Always + service: + # -- (string) Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". + type: ClusterIP + # -- (int) The port number that the service exposes. + port: 80 + targetPort: 8080 podSecurityContext: - # runAsNonRoot: true - # runAsUser: 1000 - # runAsGroup: 1000 + runAsNonRoot: true + runAsUser: 1000970000 + runAsGroup: 1000970000 + fsGroup: 1000970000 volumes: - name: config-volume secret: @@ -477,30 +544,122 @@ gearbox: - key: jwt_public_key.pem path: jwt_public_key.pem optional: false - + - name: nginx-config + configMap: + name: nginx-config + items: + - key: nginx.conf + path: nginx.conf + volumeMounts: + - name: "gearbox-middleware-jwt-keys" + readOnly: true + mountPath: "/gearbox/src/gearbox/keys/jwt_public_key.pem" + subPath: jwt_public_key.pem + - name: config-volume + readOnly: true + mountPath: /gearbox/.env + subPath: gearbox.env + - name: nginx-config + mountPath: /etc/nginx/nginx.conf + subPath: nginx.conf + readOnly: true + resources: + # -- (map) The amount of resources that the container requests + requests: + # -- (string) The amount of memory requested + memory: 12Mi + # -- (string) The amount of CPU requested (e.g., 100m = 0.1 CPU) + cpu: 100m + # -- (map) The maximum amount of resources that the container is allowed to use + limits: + # -- (string) The maximum amount of memory the container can use + memory: 512Mi + # -- (string) The maximum amount of CPU the container can use + cpu: 500m gearbox-middleware: - enabled: true + enabled: false gearboxMiddlewareG3auto: testing: False + service: + # -- (string) Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". + type: ClusterIP + # -- (int) The port number that the service exposes. + port: 80 + targetPort: 8080 image: - repository: quay.io/pcdc/gearbox-middleware - tag: "pcdc_dev_Fri__15_Aug_2025_17_57_50_GMT" + # repository: quay.io/pcdc/gearbox-middleware + # tag: "pcdc_dev_Fri__15_Aug_2025_17_57_50_GMT" + # pullPolicy: Always + repository: "973342646972.dkr.ecr.us-east-1.amazonaws.com/openshift-pcdc/gearbox-middleware" + tag: "latest" pullPolicy: Always + # -- (list) Volumes to attach to the container. + volumes: + - name: config-volume + secret: + secretName: "gearbox-middleware-g3auto" + - name: gearbox-middleware-jwt-keys + secret: + secretName: "gearbox-middleware-jwt-keys" + items: + - key: jwt_private_key.pem + path: jwt_private_key.pem + optional: false + - name: nginx-config + configMap: + name: nginx-config + items: + - key: nginx.conf + path: nginx.conf + volumeMounts: + - name: "gearbox-middleware-jwt-keys" + readOnly: true + mountPath: "/gearbox-middleware/gearbox_middleware/keys/jwt_private_key.pem" + subPath: jwt_private_key.pem + - name: config-volume + readOnly: true + mountPath: /gearbox-middleware/.env + subPath: gearbox-middleware.env + - name: nginx-config + mountPath: /etc/nginx/nginx.conf + subPath: nginx.conf + readOnly: true + resources: + # -- (map) The amount of resources that the container requests + requests: + # -- (string) The amount of memory requested + memory: 12Mi + # -- (string) The amount of CPU requested (e.g., 100m = 0.1 CPU) + cpu: 100m + # -- (map) The maximum amount of resources that the container is allowed to use + limits: + # -- (string) The maximum amount of memory the container can use + memory: 512Mi + # -- (string) The maximum amount of CPU the container can use + cpu: 500m podSecurityContext: - # runAsNonRoot: true - # runAsUser: 1000 - # runAsGroup: 1000 + runAsNonRoot: true + runAsUser: 1000970000 + runAsGroup: 1000970000 + fsGroup: 1000970000 postgresql: + enabled: false image: repository: bitnamilegacy/postgresql - #local Dev Only - auth: - postgresPassword: "thisisaterriblepassword" primary: - persistence: - enabled: true - size: 1Gi + networkPolicy: + enabled: false + readReplicas: + networkPolicy: + enabled: false + #local Dev Only + # auth: + # postgresPassword: "thisisaterriblepassword" + # primary: + # persistence: + # enabled: true + # size: 1Gi ######################################################################################## # DISABLED SERVICES # ######################################################################################## @@ -562,3 +721,9 @@ hatchery: cohort-middleware: enabled: false + +etl: + enabled: false + +gen3-network-policies: + enabled: false diff --git a/helm/arborist/templates/arbrorsit-expired-access-cronjob.yaml b/helm/arborist/templates/arbrorsit-expired-access-cronjob.yaml index 12900598e..41c057fef 100644 --- a/helm/arborist/templates/arbrorsit-expired-access-cronjob.yaml +++ b/helm/arborist/templates/arbrorsit-expired-access-cronjob.yaml @@ -77,6 +77,8 @@ spec: name: arborist-dbcreds key: dbcreated optional: false + resources: + {{- toYaml .Values.resources | nindent 12 }} command: ["sh"] args: - "-c" diff --git a/helm/arborist/templates/deployment.yaml b/helm/arborist/templates/deployment.yaml index 3ad5d93e2..358a29146 100644 --- a/helm/arborist/templates/deployment.yaml +++ b/helm/arborist/templates/deployment.yaml @@ -81,7 +81,7 @@ spec: /go/src/github.com/uc-cdis/arborist/migrations/latest # run arborist - /go/src/github.com/uc-cdis/arborist/bin/arborist + /go/src/github.com/uc-cdis/arborist/bin/arborist --port 8080 env: {{- toYaml .Values.env | nindent 12 }} diff --git a/helm/common/templates/_db_setup_job.tpl b/helm/common/templates/_db_setup_job.tpl index 77b248e74..5d3287a6a 100644 --- a/helm/common/templates/_db_setup_job.tpl +++ b/helm/common/templates/_db_setup_job.tpl @@ -57,6 +57,8 @@ spec: image: quay.io/cdis/awshelper:master imagePullPolicy: Always command: ["/bin/bash", "-c"] + resources: + {{- toYaml .Values.resources | nindent 12 }} env: - name: PGPASSWORD {{- if $.Values.global.dev }} @@ -131,8 +133,8 @@ spec: #!/bin/bash set -e - source "${GEN3_HOME}/gen3/lib/utils.sh" - gen3_load "gen3/gen3setup" + #source "${GEN3_HOME}/gen3/lib/utils.sh" + #gen3_load "gen3/gen3setup" echo "PGHOST=$PGHOST" echo "PGPORT=$PGPORT" @@ -149,7 +151,7 @@ spec: >&2 echo "Postgres is up - executing command" if psql -lqt | cut -d \| -f 1 | grep -qw $SERVICE_PGDB; then - gen3_log_info "Database exists" + #gen3_log_info "Database exists" PGPASSWORD=$SERVICE_PGPASS psql -d $SERVICE_PGDB -h $PGHOST -p $PGPORT -U $SERVICE_PGUSER -c "\conninfo" kubectl patch secret/{{ .Chart.Name }}-dbcreds -p '{"data":{"dbcreated":"dHJ1ZQo="}}' else diff --git a/helm/common/templates/_jwt_key_pairs.tpl b/helm/common/templates/_jwt_key_pairs.tpl index bf9f3c877..61c2ba834 100644 --- a/helm/common/templates/_jwt_key_pairs.tpl +++ b/helm/common/templates/_jwt_key_pairs.tpl @@ -63,6 +63,8 @@ spec: optional: false - name: SERVICE value: {{ .Chart.Name }} + resources: + {{- toYaml .Values.resources | nindent 12 }} command: ["/bin/sh", "-c"] args: - | diff --git a/helm/fence/templates/openshift-override-files.yaml b/helm/fence/templates/openshift-override-files.yaml new file mode 100644 index 000000000..be87f67b5 --- /dev/null +++ b/helm/fence/templates/openshift-override-files.yaml @@ -0,0 +1,44 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: openshift-fence-override-files +data: + dockerrun.bash: |- + #!/bin/bash + # + # Kubernetes may mount jwt-keys as a tar ball + # + if [ -f /fence/jwt-keys.tar ]; then + ( + cd /fence + tar xvf jwt-keys.tar + if [ -d jwt-keys ]; then + mkdir -p keys + mv jwt-keys/* keys/ + fi + ) + fi + + if [ "${OPENSHIFT}" = "true" ]; then + /usr/bin/nginx + else + nginx + fi + poetry run gunicorn -c "/fence/deployment/wsgi/gunicorn.conf.py" + + gunicorn.conf.py: |- + wsgi_app = "deployment.wsgi.wsgi:application" + bind = "0.0.0.0:8000" + workers = 1 + preload_app = True + import os + # Set user/group to the current user's UID and GID, unless UID is 0 (root), then set to 'gen3' + if os.getuid() == 0: + user = "gen3" + group = "gen3" + else: + user = os.getuid() + group = os.getgid() + timeout = 300 + keepalive = 2 + keepalive_timeout = 5 \ No newline at end of file diff --git a/helm/gearbox-middleware/templates/deployment.yaml b/helm/gearbox-middleware/templates/deployment.yaml index e9c1574f4..623a9f8ab 100644 --- a/helm/gearbox-middleware/templates/deployment.yaml +++ b/helm/gearbox-middleware/templates/deployment.yaml @@ -37,6 +37,8 @@ spec: {{- end }} spec: serviceAccountName: {{ include "gearbox-middleware.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} {{- with .Values.affinity }} affinity: {{- toYaml . | nindent 8 }} @@ -87,18 +89,20 @@ spec: resources: {{- toYaml .Values.resources | nindent 12 }} ports: - - containerPort: 80 + - name: http + containerPort: {{ .Values.service.targetPort }} + protocol: TCP livenessProbe: httpGet: path: /_status - port: 80 + port: http initialDelaySeconds: 10 periodSeconds: 60 timeoutSeconds: 30 readinessProbe: httpGet: path: /_status - port: 80 + port: http initContainers: - name: wait-for-gearbox image: curlimages/curl:latest diff --git a/helm/gearbox-middleware/templates/service.yaml b/helm/gearbox-middleware/templates/service.yaml index 550734a1e..3b49b48d1 100644 --- a/helm/gearbox-middleware/templates/service.yaml +++ b/helm/gearbox-middleware/templates/service.yaml @@ -8,7 +8,7 @@ spec: type: {{ .Values.service.type }} ports: - port: {{ .Values.service.port }} - targetPort: 80 + targetPort: http protocol: TCP name: http selector: diff --git a/helm/gearbox-middleware/values.yaml b/helm/gearbox-middleware/values.yaml index 866028e95..a8ac4e9ca 100644 --- a/helm/gearbox-middleware/values.yaml +++ b/helm/gearbox-middleware/values.yaml @@ -216,3 +216,18 @@ partOf: "Workspace-tab" selectorLabels: # -- (map) Will completely override the commonLabels defined in the common chart's _label_setup.tpl commonLabels: + +# -- (map) Security context for the pod +podSecurityContext: + {} + # fsGroup: 2000 + +# -- (map) Security context for the containers in the pod +securityContext: + {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 \ No newline at end of file diff --git a/helm/gearbox/templates/deployment.yaml b/helm/gearbox/templates/deployment.yaml index b72e53544..06db9ef6f 100644 --- a/helm/gearbox/templates/deployment.yaml +++ b/helm/gearbox/templates/deployment.yaml @@ -37,6 +37,8 @@ spec: {{- end }} spec: serviceAccountName: {{ include "gearbox.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} {{- with .Values.affinity }} affinity: {{- toYaml . | nindent 8 }} @@ -87,18 +89,20 @@ spec: resources: {{- toYaml .Values.resources | nindent 12 }} ports: - - containerPort: 80 + - name: http + containerPort: {{ .Values.service.targetPort }} + protocol: TCP livenessProbe: httpGet: path: /_status - port: 80 + port: http initialDelaySeconds: 10 periodSeconds: 60 timeoutSeconds: 30 readinessProbe: httpGet: path: /_status - port: 80 + port: http initContainers: - name: gearbox-db-migrate image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" diff --git a/helm/gearbox/templates/service.yaml b/helm/gearbox/templates/service.yaml index cba3c01ff..92c7bd807 100644 --- a/helm/gearbox/templates/service.yaml +++ b/helm/gearbox/templates/service.yaml @@ -8,7 +8,7 @@ spec: type: {{ .Values.service.type }} ports: - port: {{ .Values.service.port }} - targetPort: 80 + targetPort: http protocol: TCP name: http selector: diff --git a/helm/gearbox/values.yaml b/helm/gearbox/values.yaml index af7c8353a..7b173bbf9 100644 --- a/helm/gearbox/values.yaml +++ b/helm/gearbox/values.yaml @@ -145,6 +145,7 @@ service: type: ClusterIP # -- (int) The port number that the service exposes. port: 80 + targetPort: 80 # -- (map) Service account to use or create. serviceAccount: @@ -309,3 +310,18 @@ partOf: "Workspace-tab" selectorLabels: # -- (map) Will completely override the commonLabels defined in the common chart's _label_setup.tpl commonLabels: + +# -- (map) Security context for the pod +podSecurityContext: + {} + # fsGroup: 2000 + +# -- (map) Security context for the containers in the pod +securityContext: + {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 diff --git a/helm/gen3/templates/cleanup-helm-hooks-job.yaml b/helm/gen3/templates/cleanup-helm-hooks-job.yaml index 33cfeaa61..7723d71c0 100644 --- a/helm/gen3/templates/cleanup-helm-hooks-job.yaml +++ b/helm/gen3/templates/cleanup-helm-hooks-job.yaml @@ -88,4 +88,11 @@ spec: # Clean up secrets created by hooks (if any) kubectl delete secrets -l app=gen3-created-by-hook - echo "Cleanup completed" \ No newline at end of file + echo "Cleanup completed" + resources: + requests: + memory: 12Mi + cpu: 100m + limits: + memory: 512Mi + cpu: 500m \ No newline at end of file diff --git a/helm/portal/templates/deployment.yaml b/helm/portal/templates/deployment.yaml index 7f2d03b2b..1c71336f6 100644 --- a/helm/portal/templates/deployment.yaml +++ b/helm/portal/templates/deployment.yaml @@ -109,6 +109,7 @@ spec: - /bin/sh - -c - | + sleep infinity mkdir -p /tmp/repo cd /tmp/repo # Clone only the specific branch with minimal history and sparse checkout @@ -163,9 +164,10 @@ spec: - containerPort: {{ .Values.service.targetPort }} name: http - containerPort: 443 - # command: - # - /bin/bash - # - ./dockerStart.sh + command: + - /bin/sh + - -c + - sleep infinity env: - name: HOSTNAME value: revproxy-service @@ -274,12 +276,12 @@ spec: - name: "config-volume" mountPath: "/data-portal/data/config/pcdc.json" subPath: "gitops.json" - - name: "nginx-config" - mountPath: "/etc/nginx/conf.d/nginx.conf" - subPath: "nginx.conf" - - name: "nginx-config" - mountPath: "/etc/nginx/nginx.conf" - subPath: "main" + # - name: "nginx-config" + # mountPath: "/etc/nginx/conf.d/nginx.conf" + # subPath: "nginx.conf" + # - name: "nginx-config" + # mountPath: "/etc/nginx/nginx.conf" + # subPath: "main" - name: "config-volume" mountPath: "/data-portal/custom/logo/gitops-logo.png" subPath: "gitops-logo.png" diff --git a/pcdc-default-values.yaml b/pcdc-default-values.yaml index 079fc20a1..0a5c9ac9f 100644 --- a/pcdc-default-values.yaml +++ b/pcdc-default-values.yaml @@ -101,6 +101,11 @@ fence: AMANUENSIS_PUBLIC_KEY_PATH: "/amanuensis/jwt_public_key.pem" MOCK_GOOGLE_AUTH: true mock_default_user: "test@example.com" + ENABLE_VISA_UPDATE_CRON: false + ENABLE_DELETE_EXPIRED_CLIENTS: false + ENABLE_FENCE_DEPLOYMENT: true + ENABLE_PRESIGNED_URL_FENCE: true + USER_YAML_ENABLED: true #LOGIN_REDIRECT_WHITELIST: ["https://localhost:9443/", "http://localhost:9443/"] podSecurityContext: {} image: From 42be0548e72a7cc5c180801232ffc9ac22452b56 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Mon, 15 Dec 2025 17:23:38 -0800 Subject: [PATCH 121/126] add changes to our services --- helm/amanuensis/Chart.yaml | 14 ++++++------- helm/amanuensis/templates/db-init.yaml | 6 ++++++ helm/amanuensis/templates/deployment.yaml | 3 +++ .../amanuensis/templates/external-secret.yaml | 2 +- helm/amanuensis/values.yaml | 13 ++++++++++-- helm/gearbox-middleware/Chart.yaml | 12 +++++------ .../templates/deployment.yaml | 3 +++ helm/gearbox-middleware/values.yaml | 8 +++++++ helm/gearbox/Chart.yaml | 2 +- helm/gearbox/templates/db-init.yaml | 9 +++++++- helm/gearbox/templates/deployment.yaml | 3 +++ helm/gearbox/templates/external-secret.yaml | 6 +++++- helm/gearbox/values.yaml | 19 +++++++++++++++++ helm/pcdcanalysistools/Chart.yaml | 2 +- .../templates/deployment.yaml | 3 +++ helm/pcdcanalysistools/values.yaml | 21 ++++++++++++------- 16 files changed, 99 insertions(+), 27 deletions(-) diff --git a/helm/amanuensis/Chart.yaml b/helm/amanuensis/Chart.yaml index b8b7fe684..ca8fa95c2 100644 --- a/helm/amanuensis/Chart.yaml +++ b/helm/amanuensis/Chart.yaml @@ -24,10 +24,10 @@ version: 1.0.0 appVersion: "master" dependencies: -- name: common - version: 0.1.23 - repository: file://../common -- name: postgresql - version: 11.9.13 - repository: "https://charts.bitnami.com/bitnami" - condition: postgres.separate + - name: common + version: 0.1.28 + repository: file://../common + - name: postgresql + version: 11.9.13 + repository: "https://charts.bitnami.com/bitnami" + condition: postgres.separate diff --git a/helm/amanuensis/templates/db-init.yaml b/helm/amanuensis/templates/db-init.yaml index d99ca1b2e..e43084729 100644 --- a/helm/amanuensis/templates/db-init.yaml +++ b/helm/amanuensis/templates/db-init.yaml @@ -4,3 +4,9 @@ --- {{ include "common.db_setup_sa" . }} --- +{{- if and $.Values.global.externalSecrets.deploy (or $.Values.global.externalSecrets.pushSecret .Values.externalSecrets.pushSecret) }} +--- +{{ include "common.db-push-secret" . }} +--- +{{ include "common.secret.db.bootstrap" . }} +{{- end }} \ No newline at end of file diff --git a/helm/amanuensis/templates/deployment.yaml b/helm/amanuensis/templates/deployment.yaml index b8a3add1b..8b14daf1e 100644 --- a/helm/amanuensis/templates/deployment.yaml +++ b/helm/amanuensis/templates/deployment.yaml @@ -35,6 +35,9 @@ spec: {{- include "amanuensis.selectorLabels" . | nindent 8 }} {{- include "common.extraLabels" . | nindent 8 }} spec: + {{- if .Values.global.topologySpread.enabled }} + {{- include "common.TopologySpread" . | nindent 6 }} + {{- end }} enableServiceLinks: false securityContext: {{- toYaml .Values.podSecurityContext | nindent 8 }} diff --git a/helm/amanuensis/templates/external-secret.yaml b/helm/amanuensis/templates/external-secret.yaml index 9f490e753..05bfb18c3 100644 --- a/helm/amanuensis/templates/external-secret.yaml +++ b/helm/amanuensis/templates/external-secret.yaml @@ -22,7 +22,7 @@ spec: #name of secret in secrets manager key: {{include "amanuensis-config" .}} --- -{{- if and (.Values.global.externalSecrets.deploy) (not .Values.global.externalSecrets.dbCreate) }} +{{- if and .Values.global.externalSecrets.deploy (not .Values.global.externalSecrets.createLocalK8sSecret) }} {{ include "common.externalSecret.db" . }} {{- end}} {{- end}} \ No newline at end of file diff --git a/helm/amanuensis/values.yaml b/helm/amanuensis/values.yaml index a5989192d..6a8c777af 100644 --- a/helm/amanuensis/values.yaml +++ b/helm/amanuensis/values.yaml @@ -41,7 +41,7 @@ global: postgres: # -- (bool) Whether the database should be created. - dbCreate: true + createLocalK8sSecret: false # -- (string) Name of external secret. Disabled if empty externalSecret: "" # -- (map) Master credentials to postgres. This is going to be the default postgres server being used for each service, unless each service specifies their own postgres @@ -105,7 +105,14 @@ global: maxReplicas: 10 averageCPUValue: 500m averageMemoryValue: 500Mi - + # -- (map) Karpenter topology spread configuration. + topologySpread: + # -- (bool) Whether to enable topology spread constraints for all subcharts that support it. + enabled: false + # -- (string) The topology key to use for spreading. Defaults to "topology.kubernetes.io/zone". + topologyKey: "topology.kubernetes.io/zone" + # -- (int) The maxSkew to use for topology spread constraints. Defaults to 1. + maxSkew: 1 # -- (map) This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/ autoscaling: {} @@ -118,6 +125,8 @@ externalSecrets: createK8sAmanuensisConfigSecret: false # -- (string) Will override the name of the aws secrets manager secret. Default is "amanuensis-config" amanuensisConfig: + # -- (bool) Whether to create the database and Secrets Manager secrets via PushSecret. + pushSecret: false # -- (string) Will override the name of the aws secrets manager secret. Default is "Values.global.environment-.Chart.Name-creds" dbcreds: diff --git a/helm/gearbox-middleware/Chart.yaml b/helm/gearbox-middleware/Chart.yaml index 111b74431..400358bec 100644 --- a/helm/gearbox-middleware/Chart.yaml +++ b/helm/gearbox-middleware/Chart.yaml @@ -24,9 +24,9 @@ version: 0.1.0 appVersion: "1.16.0" dependencies: -- name: common - version: 0.1.23 - repository: file://../common -- name: gearbox - version: 0.1.0 - repository: file://../gearbox \ No newline at end of file + - name: common + version: 0.1.28 + repository: file://../common + - name: gearbox + version: 0.1.0 + repository: file://../gearbox diff --git a/helm/gearbox-middleware/templates/deployment.yaml b/helm/gearbox-middleware/templates/deployment.yaml index e9c1574f4..89d24825d 100644 --- a/helm/gearbox-middleware/templates/deployment.yaml +++ b/helm/gearbox-middleware/templates/deployment.yaml @@ -36,6 +36,9 @@ spec: {{- include "common.grafanaAnnotations" . | nindent 8 }} {{- end }} spec: + {{- if .Values.global.topologySpread.enabled }} + {{- include "common.TopologySpread" . | nindent 6 }} + {{- end }} serviceAccountName: {{ include "gearbox-middleware.serviceAccountName" . }} {{- with .Values.affinity }} affinity: diff --git a/helm/gearbox-middleware/values.yaml b/helm/gearbox-middleware/values.yaml index dd7b71e50..a6a3ee9f5 100644 --- a/helm/gearbox-middleware/values.yaml +++ b/helm/gearbox-middleware/values.yaml @@ -57,6 +57,14 @@ global: maxReplicas: 10 averageCPUValue: 500m averageMemoryValue: 500Mi + # -- (map) Karpenter topology spread configuration. + topologySpread: + # -- (bool) Whether to enable topology spread constraints for all subcharts that support it. + enabled: false + # -- (string) The topology key to use for spreading. Defaults to "topology.kubernetes.io/zone". + topologyKey: "topology.kubernetes.io/zone" + # -- (int) The maxSkew to use for topology spread constraints. Defaults to 1. + maxSkew: 1 # -- (map) This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/ autoscaling: {} diff --git a/helm/gearbox/Chart.yaml b/helm/gearbox/Chart.yaml index 32d287fa0..318b0d7d2 100644 --- a/helm/gearbox/Chart.yaml +++ b/helm/gearbox/Chart.yaml @@ -25,7 +25,7 @@ appVersion: "1.16.0" dependencies: - name: common - version: 0.1.23 + version: 0.1.28 repository: file://../common - name: postgresql version: 11.9.13 diff --git a/helm/gearbox/templates/db-init.yaml b/helm/gearbox/templates/db-init.yaml index bdf813f7b..e43084729 100644 --- a/helm/gearbox/templates/db-init.yaml +++ b/helm/gearbox/templates/db-init.yaml @@ -2,4 +2,11 @@ --- {{ include "common.db_setup_job" . }} --- -{{ include "common.db_setup_sa" . }} \ No newline at end of file +{{ include "common.db_setup_sa" . }} +--- +{{- if and $.Values.global.externalSecrets.deploy (or $.Values.global.externalSecrets.pushSecret .Values.externalSecrets.pushSecret) }} +--- +{{ include "common.db-push-secret" . }} +--- +{{ include "common.secret.db.bootstrap" . }} +{{- end }} \ No newline at end of file diff --git a/helm/gearbox/templates/deployment.yaml b/helm/gearbox/templates/deployment.yaml index b72e53544..080db68e9 100644 --- a/helm/gearbox/templates/deployment.yaml +++ b/helm/gearbox/templates/deployment.yaml @@ -36,6 +36,9 @@ spec: {{- include "common.grafanaAnnotations" . | nindent 8 }} {{- end }} spec: + {{- if .Values.global.topologySpread.enabled }} + {{- include "common.TopologySpread" . | nindent 6 }} + {{- end }} serviceAccountName: {{ include "gearbox.serviceAccountName" . }} {{- with .Values.affinity }} affinity: diff --git a/helm/gearbox/templates/external-secret.yaml b/helm/gearbox/templates/external-secret.yaml index 32e868322..4ce4d9235 100644 --- a/helm/gearbox/templates/external-secret.yaml +++ b/helm/gearbox/templates/external-secret.yaml @@ -16,4 +16,8 @@ spec: remoteRef: #name of secret in secrets manager key: {{include "gearbox-g3auto" .}} -{{- end }} \ No newline at end of file +{{- end }} +{{- if and .Values.global.externalSecrets.deploy (not .Values.global.externalSecrets.createLocalK8sSecret) }} +{{ include "common.externalSecret.db" . }} +{{- end}} +{{- end}} \ No newline at end of file diff --git a/helm/gearbox/values.yaml b/helm/gearbox/values.yaml index 4a9f31904..ef5969e15 100644 --- a/helm/gearbox/values.yaml +++ b/helm/gearbox/values.yaml @@ -4,6 +4,17 @@ # Global configuration global: + # -- (map) External Secrets settings. + externalSecrets: + # -- (bool) Will use ExternalSecret resources to pull secrets from Secrets Manager instead of creating them locally. Be cautious as this will override secrets you have deployed. + deploy: false + # -- (bool) Will create the databases and store the creds in Kubernetes Secrets even if externalSecrets is deployed. Useful if you want to use ExternalSecrets for other secrets besides db secrets. + createLocalK8sSecret: false + # -- (string) Will deploy a separate External Secret Store for this service. + separateSecretStore: false + # -- (string) Will use a manually deployed clusterSecretStore if defined. + clusterSecretStoreRef: "" + # -- (map) AWS configuration aws: # -- (bool) Set to true if deploying to AWS. Controls ingress annotations. @@ -57,6 +68,14 @@ global: maxReplicas: 10 averageCPUValue: 500m averageMemoryValue: 500Mi + # -- (map) Karpenter topology spread configuration. + topologySpread: + # -- (bool) Whether to enable topology spread constraints for all subcharts that support it. + enabled: false + # -- (string) The topology key to use for spreading. Defaults to "topology.kubernetes.io/zone". + topologyKey: "topology.kubernetes.io/zone" + # -- (int) The maxSkew to use for topology spread constraints. Defaults to 1. + maxSkew: 1 # -- (map) Controls network policy settings netPolicy: diff --git a/helm/pcdcanalysistools/Chart.yaml b/helm/pcdcanalysistools/Chart.yaml index 7b7fc61af..e2f860bae 100644 --- a/helm/pcdcanalysistools/Chart.yaml +++ b/helm/pcdcanalysistools/Chart.yaml @@ -25,5 +25,5 @@ appVersion: "master" dependencies: - name: common - version: 0.1.20 + version: 0.1.28 repository: file://../common diff --git a/helm/pcdcanalysistools/templates/deployment.yaml b/helm/pcdcanalysistools/templates/deployment.yaml index 7dfb913de..80174261b 100644 --- a/helm/pcdcanalysistools/templates/deployment.yaml +++ b/helm/pcdcanalysistools/templates/deployment.yaml @@ -43,6 +43,9 @@ spec: {{- include "common.grafanaAnnotations" . | nindent 8 }} {{- end }} spec: + {{- if .Values.global.topologySpread.enabled }} + {{- include "common.TopologySpread" . | nindent 6 }} + {{- end }} {{- with .Values.affinity }} affinity: {{- toYaml . | nindent 8 }} diff --git a/helm/pcdcanalysistools/values.yaml b/helm/pcdcanalysistools/values.yaml index 1d68d15c1..4ab7a1545 100644 --- a/helm/pcdcanalysistools/values.yaml +++ b/helm/pcdcanalysistools/values.yaml @@ -1,7 +1,6 @@ # Default values for pcdcanalysistools. # This is a YAML-formatted file. # Declare variables to be passed into your templates. - # Global configuration global: # -- (map) AWS configuration @@ -19,7 +18,6 @@ global: externalSecretAwsCreds: # -- (bool) Whether the deployment is for development purposes. dev: true - postgres: # -- (bool) Whether the database should be created. dbCreate: true @@ -61,7 +59,7 @@ global: # -- (bool) If the service will be deployed with a Pod Disruption Budget. Note- you need to have more than 2 replicas for the pdb to be deployed. pdb: false # -- (int) The minimum amount of pods that are available at all times if the PDB is deployed. - minAvialable: 1 + minAvailable: 1 # -- (map) External Secrets settings. externalSecrets: # -- (bool) Will use ExternalSecret resources to pull secrets from Secrets Manager instead of creating them locally. Be cautious as this will override any pcdcanalysistools secrets you have deployed. @@ -72,10 +70,17 @@ global: autoscaling: enabled: false minReplicas: 1 - maxReplicas: 100 - targetCPUUtilizationPercentage: 80 - targetMemoryUtilizationPercentage: 80 - + maxReplicas: 10 + averageCPUValue: 500m + averageMemoryValue: 500Mi + # -- (map) Karpenter topology spread configuration. + topologySpread: + # -- (bool) Whether to enable topology spread constraints for all subcharts that support it. + enabled: false + # -- (string) The topology key to use for spreading. Defaults to "topology.kubernetes.io/zone". + topologyKey: "topology.kubernetes.io/zone" + # -- (int) The maxSkew to use for topology spread constraints. Defaults to 1. + maxSkew: 1 # -- (map) This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/ autoscaling: {} @@ -84,6 +89,8 @@ metricsEnabled: # -- (map) External Secrets settings. externalSecrets: + # -- (bool) Whether to create the database and Secrets Manager secrets via PushSecret. + pushSecret: false # -- (string) Will override the name of the aws secrets manager secret. Default is "Values.global.environment-.Chart.Name-creds" dbcreds: From 11e63d6ec92498dcd333247b3d45129e56572b10 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Tue, 16 Dec 2025 15:12:24 -0800 Subject: [PATCH 122/126] add route and update variables --- gearbox-default-values.yaml | 27 ++++++++++---------- helm/revproxy/templates/openshift_route.yaml | 27 ++++++++++++++++++++ helm/revproxy/values.yaml | 24 +++++++++++++++-- 3 files changed, 62 insertions(+), 16 deletions(-) create mode 100644 helm/revproxy/templates/openshift_route.yaml diff --git a/gearbox-default-values.yaml b/gearbox-default-values.yaml index fcb708e5e..bb64007d5 100644 --- a/gearbox-default-values.yaml +++ b/gearbox-default-values.yaml @@ -12,9 +12,6 @@ global: autoscaling: enabled: true minReplicas: 2 - maxReplicas: 100 - targetCPUUtilizationPercentage: 80 - targetMemoryUtilizationPercentage: 80 arborist: enabled: false @@ -54,7 +51,7 @@ arborist: fsGroup: 1000970000 fence: - enabled: true + enabled: false volumes: - name: old-config-volume secret: @@ -348,6 +345,9 @@ revproxy: image: repository: quay.io/cdis/nginx tag: 2025.09 + autoscaling: + averageCPUValue: 300m + averageMemoryValue: 256Mi # -- (map) Resource requests and limits for the containers in the pod resources: # -- (map) The amount of resources that the container requests @@ -363,6 +363,14 @@ revproxy: # -- (string) The maximum amount of CPU the container can use cpu: 500m + # OpenShift Route configuration (replaces Ingress on OpenShift) + openshiftRoute: + enabled: true + # Leave host empty to auto-generate, or specify your desired hostname + host: "gearbox-ped-gearbox-p1.apps.bsd-openshift-prod.bsd.uchicago.edu" # Auto-generates: -.apps. + annotations: + openshift.io/host.generated: "true" + gearbox: enabled: false image: @@ -493,16 +501,6 @@ gearbox-middleware: runAsGroup: 1000970000 fsGroup: 1000970000 -postgresql: - enabled: false - image: - repository: bitnamilegacy/postgresql - primary: - networkPolicy: - enabled: false - readReplicas: - networkPolicy: - enabled: false #local Dev Only # auth: # postgresPassword: "thisisaterriblepassword" @@ -510,6 +508,7 @@ postgresql: # persistence: # enabled: true # size: 1Gi + ######################################################################################## # DISABLED SERVICES # ######################################################################################## diff --git a/helm/revproxy/templates/openshift_route.yaml b/helm/revproxy/templates/openshift_route.yaml new file mode 100644 index 000000000..ec6a05116 --- /dev/null +++ b/helm/revproxy/templates/openshift_route.yaml @@ -0,0 +1,27 @@ +{{- if .Values.openshiftRoute.enabled }} +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + name: {{ include "revproxy.fullname" . }} + labels: + {{- include "revproxy.labels" . | nindent 4 }} + {{- if .Values.openshiftRoute.annotations }} + annotations: + {{- toYaml .Values.openshiftRoute.annotations | nindent 4 }} + {{- end }} +spec: + {{- if .Values.openshiftRoute.host }} + host: {{ .Values.openshiftRoute.host }} + {{- end }} + path: {{ .Values.openshiftRoute.path | default "/" }} + to: + kind: Service + name: revproxy-service + weight: 100 + port: + targetPort: {{ .Values.openshiftRoute.targetPort | default "http" }} + tls: + termination: {{ .Values.openshiftRoute.tls.termination | default "edge" }} + insecureEdgeTerminationPolicy: {{ .Values.openshiftRoute.tls.insecureEdgeTerminationPolicy | default "Redirect" }} + wildcardPolicy: {{ .Values.openshiftRoute.wildcardPolicy | default "None" }} +{{- end }} \ No newline at end of file diff --git a/helm/revproxy/values.yaml b/helm/revproxy/values.yaml index 1345c680b..94e13ae3e 100644 --- a/helm/revproxy/values.yaml +++ b/helm/revproxy/values.yaml @@ -86,7 +86,6 @@ global: # -- (int) The maxSkew to use for topology spread constraints. Defaults to 1. maxSkew: 1 - # -- (map) This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/ autoscaling: {} @@ -205,6 +204,27 @@ ingress: # hosts: # - chart-example.local +# -- (map) Configuration for OpenShift Route. +openshiftRoute: + # -- (bool) Whether to create an OpenShift Route + enabled: false + # -- (map) Annotations to add to the Route. + annotations: {} + # -- (string) Hostname for the Route. Leave empty to let OpenShift auto-generate + host: "" + # -- (string) Path for the Route. + path: "/" + # -- (string) Target port for the Route. + targetPort: "http" + # -- (map) TLS configuration for the Route. + tls: + # -- (string) Termination type for the Route. Valid options are "edge", "passthrough", and "reencrypt". + termination: "edge" + # -- (string) Insecure edge termination policy. Valid options are "None", "Allow", and "Redirect". + insecureEdgeTerminationPolicy: "Redirect" + # -- (string) Wildcard policy for the Route. Valid options are "None" and "Subdomain". + wildcardPolicy: "None" + # -- (map) Resource requests and limits for the containers in the pod resources: # -- (map) The amount of resources that the container requests @@ -268,4 +288,4 @@ extraServices: nginx: user: nginx resolver: kube-dns.kube-system.svc.cluster.local - pidFile: /var/run/nginx.pid \ No newline at end of file + pidFile: /var/run/nginx.pid From 10f1eb6f9f6424bd5c3189d9771291a859bfc612 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Thu, 18 Dec 2025 12:59:05 -0800 Subject: [PATCH 123/126] update values and postgres info --- .gitignore | 5 ++- gearbox-default-values.yaml | 49 ++++++++++++++++-------- helm/gen3/Chart.yaml | 4 +- helm/gen3/values.yaml | 3 +- helm/revproxy/templates/ingress_dev.yaml | 2 +- 5 files changed, 40 insertions(+), 23 deletions(-) diff --git a/.gitignore b/.gitignore index 803d8c1e1..dcc5b99d0 100644 --- a/.gitignore +++ b/.gitignore @@ -20,4 +20,7 @@ temp.yaml # Main Helm values file /values.yaml postgres.txt -pcdc_data/external/external_reference.json # External reference data file for PCDC +# External reference data file for PCDC +pcdc_data/external/external_reference.json +# Script that creates external secrets IAM user +external_secrets.bash \ No newline at end of file diff --git a/gearbox-default-values.yaml b/gearbox-default-values.yaml index bb64007d5..ccc4567ed 100644 --- a/gearbox-default-values.yaml +++ b/gearbox-default-values.yaml @@ -1,6 +1,6 @@ # Global configuration global: - dev: false + dev: true portal_app: gitops authz_entity_name: "subject" hostname: "gearbox-ped-gearbox-p1.apps.bsd-openshift-prod.bsd.uchicago.edu" @@ -12,9 +12,22 @@ global: autoscaling: enabled: true minReplicas: 2 + # -- (map) Pod security context settings + compatibility: + # -- (map) OpenShift Settings + openshift: + # -- (bool) Set to force if deploying to OpenShift + adaptSecurityContext: force arborist: - enabled: false + enabled: true + postgres: + password: "arborist_thisisaweakpassword" + imagePullSecrets: + - name: ecr-pull-secret + autoscaling: + averageCPUValue: 300m + averageMemoryValue: 256Mi # -- (map) Resource requests and limits for the containers in the pod resources: # -- (map) The amount of resources that the container requests @@ -29,26 +42,14 @@ arborist: memory: 512Mi # -- (string) The maximum amount of CPU the container can use cpu: 500m - #local Dev Only - # postgres: - # password: "arborist_thisisaweakpassword" image: - # repository: quay.io/pcdc/arborist - # tag: 2025.09 repository: "973342646972.dkr.ecr.us-east-1.amazonaws.com/openshift-pcdc/arborist" tag: "latest" pullPolicy: Always + # -- (int) Number of replicas for the deployment. + replicaCount: 2 service: - # -- (string) Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". - type: ClusterIP - # -- (int) The port number that the service exposes. - port: 80 targetPort: 8080 - podSecurityContext: - runAsNonRoot: true - runAsUser: 1000970000 - runAsGroup: 1000970000 - fsGroup: 1000970000 fence: enabled: false @@ -508,7 +509,21 @@ gearbox-middleware: # persistence: # enabled: true # size: 1Gi - +postgresql: + enabled: true + image: + repository: bitnamilegacy/postgresql + readReplicas: + networkPolicy: + enabled: false + auth: + postgresPassword: "thisisaterriblepassword" + primary: + networkPolicy: + enabled: false + persistence: + enabled: true + size: 1Gi ######################################################################################## # DISABLED SERVICES # ######################################################################################## diff --git a/helm/gen3/Chart.yaml b/helm/gen3/Chart.yaml index a1d3e1105..63ab91b6c 100644 --- a/helm/gen3/Chart.yaml +++ b/helm/gen3/Chart.yaml @@ -162,9 +162,9 @@ dependencies: repository: "https://helm.elastic.co" condition: elasticsearch.enabled - name: postgresql - version: 11.9.13 + version: 14.3.3 repository: "https://charts.bitnami.com/bitnami" - condition: global.dev + condition: postgresql.enabled # (optional) NeuVector Kubernetes Security Policy templates to protect Gen3 # NeuVector must be installed separately. diff --git a/helm/gen3/values.yaml b/helm/gen3/values.yaml index c9228dad0..7dc169713 100644 --- a/helm/gen3/values.yaml +++ b/helm/gen3/values.yaml @@ -244,7 +244,6 @@ gen3-workflow: # -- (bool) Whether to deploy the gen3-workflow subchart. enabled: false - # -- (map) Configurations for guppy chart. guppy: # -- (bool) Whether to deploy the guppy subchart. @@ -402,6 +401,7 @@ access-backend: # -- (map) To configure postgresql subchart # Disable persistence by default so we can spin up and down ephemeral environments postgresql: + enabled: true image: repository: bitnamilegacy/postgresql primary: @@ -464,7 +464,6 @@ amanuensis: # -- (bool) Whether to deploy the amanuensis subchart. enabled: true - # -- (map) Secret information for External Secrets and DB Secrets. secrets: # -- (str) AWS access key ID. Overrides global key. diff --git a/helm/revproxy/templates/ingress_dev.yaml b/helm/revproxy/templates/ingress_dev.yaml index df2ea60c8..6ecaa8d67 100644 --- a/helm/revproxy/templates/ingress_dev.yaml +++ b/helm/revproxy/templates/ingress_dev.yaml @@ -1,4 +1,4 @@ -{{- if and (eq .Values.global.dev true) (eq .Values.global.aws.enabled false) }} +{{- if and (eq .Values.global.dev true) (eq .Values.global.aws.enabled false) (eq .Values.openshiftRoute.enabled false) }} apiVersion: networking.k8s.io/v1 kind: Ingress metadata: From a83e275285a050aa68dd85ca4e880a31c50757cf Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Thu, 18 Dec 2025 16:01:21 -0800 Subject: [PATCH 124/126] make changes to fence --- gearbox-default-values.yaml | 23 +++++++++++-------- .../fence-delete-expired-clients-cron.yaml | 4 ++++ helm/fence/templates/fence-deployment.yaml | 4 ++++ helm/fence/templates/jwt-keys.yaml | 12 ++++------ helm/fence/templates/presigned-url-fence.yaml | 4 ++++ helm/fence/templates/useryaml-job.yaml | 6 +++++ .../templates/openshift-override-files.yaml | 20 +++++----------- helm/gen3/templates/postgres-es-pdb.yaml | 4 ++++ 8 files changed, 47 insertions(+), 30 deletions(-) rename helm/{fence => gen3}/templates/openshift-override-files.yaml (63%) diff --git a/gearbox-default-values.yaml b/gearbox-default-values.yaml index ccc4567ed..4ee80211a 100644 --- a/gearbox-default-values.yaml +++ b/gearbox-default-values.yaml @@ -7,7 +7,7 @@ global: # -- (bool) If the service will be deployed with a Pod Disruption Budget. Note- you need to have more than 2 replicas for the pdb to be deployed. pdb: true # -- (int) The minimum amount of pods that are available at all times if the PDB is deployed. - minAvialable: 2 + minAvailable: 2 # -- (map) This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/ autoscaling: enabled: true @@ -52,7 +52,15 @@ arborist: targetPort: 8080 fence: - enabled: false + enabled: true + replicaCount: 2 + autoscaling: + averageCPUValue: 300m + averageMemoryValue: 256Mi + imagePullSecrets: + - name: ecr-pull-secret + postgres: + password: "fence_thisisaweakpassword" volumes: - name: old-config-volume secret: @@ -150,6 +158,10 @@ fence: readOnly: true mountPath: "/fence/keys/key/jwt_private_key.pem" subPath: "jwt_private_key.pem" + - name: "fence-jwt-keys" + readOnly: true + mountPath: "/fence/keys/key/jwt_public_key.pem" + subPath: "jwt_public_key.pem" - name: "config-volume-public" readOnly: true mountPath: "/var/www/fence/fence-config-public.yaml" @@ -192,10 +204,6 @@ fence: subPath: fence_google_storage_creds_secret.json service: - # -- (string) Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". - type: ClusterIP - # -- (int) The port number that the service exposes. - port: 80 targetPort: 8080 podSecurityContext: @@ -216,9 +224,6 @@ fence: # -- (string) The maximum amount of CPU the container can use cpu: 500m image: - # repository: quay.io/pcdc/fence - # tag: "3.6.0" - # pullPolicy: IfNotPresent repository: "973342646972.dkr.ecr.us-east-1.amazonaws.com/openshift-pcdc/fence" tag: "latest" pullPolicy: Always diff --git a/helm/fence/templates/fence-delete-expired-clients-cron.yaml b/helm/fence/templates/fence-delete-expired-clients-cron.yaml index dc7aaef17..bdb2b45c9 100644 --- a/helm/fence/templates/fence-delete-expired-clients-cron.yaml +++ b/helm/fence/templates/fence-delete-expired-clients-cron.yaml @@ -22,6 +22,10 @@ spec: {{- toYaml .Values.podSecurityContext | nindent 12 }} volumes: {{- toYaml .Values.volumes | nindent 10 }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} containers: - name: fence image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" diff --git a/helm/fence/templates/fence-deployment.yaml b/helm/fence/templates/fence-deployment.yaml index 013a06602..2dbea1c4c 100644 --- a/helm/fence/templates/fence-deployment.yaml +++ b/helm/fence/templates/fence-deployment.yaml @@ -46,6 +46,10 @@ spec: {{- toYaml .Values.podSecurityContext | nindent 8 }} volumes: {{- toYaml .Values.volumes | nindent 8 }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} containers: - name: fence image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" diff --git a/helm/fence/templates/jwt-keys.yaml b/helm/fence/templates/jwt-keys.yaml index 06d10f288..3724b85a7 100644 --- a/helm/fence/templates/jwt-keys.yaml +++ b/helm/fence/templates/jwt-keys.yaml @@ -1,9 +1,7 @@ {{- if or (not .Values.global.externalSecrets.deploy) (and .Values.global.externalSecrets.deploy .Values.externalSecrets.createK8sJwtKeysSecret) }} -apiVersion: v1 -kind: Secret -metadata: - name: fence-jwt-keys -type: Opaque -data: - jwt_private_key.pem: {{ include "getOrCreatePrivateKey" . }} +{{include "common.jwt-key-pair-secret" .}} +--- +{{include "common.jwt_public_key_setup_sa" .}} +--- +{{include "common.create_public_key_job" .}} {{- end }} \ No newline at end of file diff --git a/helm/fence/templates/presigned-url-fence.yaml b/helm/fence/templates/presigned-url-fence.yaml index 5d6c4517e..cef6b3139 100644 --- a/helm/fence/templates/presigned-url-fence.yaml +++ b/helm/fence/templates/presigned-url-fence.yaml @@ -42,6 +42,10 @@ spec: {{- toYaml .Values.podSecurityContext | nindent 8 }} volumes: {{- toYaml .Values.volumes | nindent 8 }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} containers: - name: presigned-url-fence image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" diff --git a/helm/fence/templates/useryaml-job.yaml b/helm/fence/templates/useryaml-job.yaml index e594ddbb3..be11da813 100644 --- a/helm/fence/templates/useryaml-job.yaml +++ b/helm/fence/templates/useryaml-job.yaml @@ -25,10 +25,16 @@ spec: - name: useryaml configMap: name: useryaml + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} containers: - name: fence image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} + resources: + {{- toYaml .Values.resources | nindent 12 }} env: {{- toYaml .Values.env | nindent 10 }} volumeMounts: diff --git a/helm/fence/templates/openshift-override-files.yaml b/helm/gen3/templates/openshift-override-files.yaml similarity index 63% rename from helm/fence/templates/openshift-override-files.yaml rename to helm/gen3/templates/openshift-override-files.yaml index be87f67b5..796528738 100644 --- a/helm/fence/templates/openshift-override-files.yaml +++ b/helm/gen3/templates/openshift-override-files.yaml @@ -19,26 +19,18 @@ data: ) fi - if [ "${OPENSHIFT}" = "true" ]; then - /usr/bin/nginx - else - nginx - fi + /usr/bin/nginx + poetry run gunicorn -c "/fence/deployment/wsgi/gunicorn.conf.py" gunicorn.conf.py: |- + from os import getuid, getgid wsgi_app = "deployment.wsgi.wsgi:application" bind = "0.0.0.0:8000" workers = 1 preload_app = True - import os - # Set user/group to the current user's UID and GID, unless UID is 0 (root), then set to 'gen3' - if os.getuid() == 0: - user = "gen3" - group = "gen3" - else: - user = os.getuid() - group = os.getgid() + user = getuid() + group = getgid() timeout = 300 keepalive = 2 - keepalive_timeout = 5 \ No newline at end of file + keepalive_timeout = 5 diff --git a/helm/gen3/templates/postgres-es-pdb.yaml b/helm/gen3/templates/postgres-es-pdb.yaml index 528f51b08..b17928319 100644 --- a/helm/gen3/templates/postgres-es-pdb.yaml +++ b/helm/gen3/templates/postgres-es-pdb.yaml @@ -1,4 +1,5 @@ {{- if and .Values.global.pdb (.Values.global.dev) }} +{{ if .Values.postgresql.enabled }} apiVersion: policy/v1 kind: PodDisruptionBudget metadata: @@ -8,7 +9,9 @@ spec: selector: matchLabels: app.kubernetes.io/name: "postgresql" +{{- end }} --- +{{ if .Values.elasticsearch.enabled }} apiVersion: policy/v1 kind: PodDisruptionBudget metadata: @@ -19,3 +22,4 @@ spec: matchLabels: app: "gen3-elasticsearch-master" {{- end }} +{{- end }} From 5f62ba9ef6bc275b10866e850e0e6c2550efbfd9 Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Fri, 19 Dec 2025 10:24:27 -0800 Subject: [PATCH 125/126] final updates --- gearbox-default-values.yaml | 224 ++++++++++++++---- .../arbrorsit-expired-access-cronjob.yaml | 10 +- .../fence-delete-expired-clients-cron.yaml | 39 +-- helm/fence/templates/fence-deployment.yaml | 10 - .../templates/deployment.yaml | 4 + helm/gearbox/templates/deployment.yaml | 4 + helm/gearbox/templates/external-secret.yaml | 1 - .../templates/openshift-override-files.yaml | 36 --- helm/portal/templates/deployment.yaml | 12 +- 9 files changed, 220 insertions(+), 120 deletions(-) delete mode 100644 helm/gen3/templates/openshift-override-files.yaml diff --git a/gearbox-default-values.yaml b/gearbox-default-values.yaml index 4ee80211a..acd6d0fe5 100644 --- a/gearbox-default-values.yaml +++ b/gearbox-default-values.yaml @@ -7,7 +7,7 @@ global: # -- (bool) If the service will be deployed with a Pod Disruption Budget. Note- you need to have more than 2 replicas for the pdb to be deployed. pdb: true # -- (int) The minimum amount of pods that are available at all times if the PDB is deployed. - minAvailable: 2 + minAvailable: 1 # -- (map) This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/ autoscaling: enabled: true @@ -108,10 +108,6 @@ fence: items: - key: nginx.conf path: nginx.conf - - name: openshift-fence-override-files - configMap: - name: openshift-fence-override-files - defaultMode: 0755 volumeMounts: - name: "old-config-volume" @@ -170,14 +166,6 @@ fence: mountPath: /etc/nginx/nginx.conf subPath: nginx.conf readOnly: true - - name: openshift-fence-override-files - mountPath: /fence/dockerrun.bash - subPath: dockerrun.bash - readOnly: true - - name: openshift-fence-override-files - mountPath: /fence/deployment/wsgi/gunicorn.conf.py - subPath: gunicorn.conf.py - readOnly: true # -- (list) Volumes to mount to the init container. @@ -202,7 +190,68 @@ fence: readOnly: true mountPath: "/var/www/fence/fence_google_storage_creds_secret.json" subPath: fence_google_storage_creds_secret.json - + env: + - name: GEN3_UWSGI_TIMEOUT + valueFrom: + configMapKeyRef: + name: manifest-global + key: uwsgi-timeout + optional: true + - name: DD_AGENT_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: AWS_STS_REGIONAL_ENDPOINTS + value: regional + - name: PYTHONPATH + value: /var/www/fence + - name: GEN3_DEBUG + value: "False" + - name: PGHOST + valueFrom: + secretKeyRef: + name: fence-dbcreds + key: host + optional: false + - name: PGUSER + valueFrom: + secretKeyRef: + name: fence-dbcreds + key: username + optional: false + - name: PGPASSWORD + valueFrom: + secretKeyRef: + name: fence-dbcreds + key: password + optional: false + - name: PGDB + valueFrom: + secretKeyRef: + name: fence-dbcreds + key: database + optional: false + - name: DBREADY + valueFrom: + secretKeyRef: + name: fence-dbcreds + key: dbcreated + optional: false + - name: DB + value: postgresql://$(PGUSER):$(PGPASSWORD)@$(PGHOST):5432/$(PGDB) + - name: INDEXD_PASSWORD + valueFrom: + secretKeyRef: + name: indexd-service-creds + key: fence + optional: true + - name: gen3Env + valueFrom: + configMapKeyRef: + name: manifest-global + key: hostname + - name: OPENSHIFT + value: "true" service: targetPort: 8080 @@ -303,7 +352,7 @@ fence: - services.gearbox-data-admin portal: - enabled: false + enabled: true image: repository: "973342646972.dkr.ecr.us-east-1.amazonaws.com/openshift-pcdc/gearbox-fe" tag: "latest" @@ -312,8 +361,9 @@ portal: resources: # -- (map) The amount of resources that the container requests requests: - cpu: 1.0 - memory: 1024Mi + memory: 12Mi + # -- (string) The amount of CPU requested (e.g., 100m = 0.1 CPU) + cpu: 100m limits: cpu: 2.0 memory: 4096Mi @@ -329,6 +379,8 @@ portal: { "s3_bucket": "https://test-compose-gearbox-data-bucket-with-versioning.s3.amazonaws.com" } + imagePullSecrets: + - name: ecr-pull-secret podSecurityContext: runAsNonRoot: true runAsUser: 1000970000 @@ -378,25 +430,24 @@ revproxy: openshift.io/host.generated: "true" gearbox: - enabled: false + enabled: true + replicaCount: 2 + postgres: + password: "gearbox_thisisaweakpassword" + autoscaling: + averageCPUValue: 300m + averageMemoryValue: 256Mi + imagePullSecrets: + - name: ecr-pull-secret + podSecurityContext: + supplementalGroups: + - 1000 image: - # repository: quay.io/pcdc/gearbox_be - # tag: "pcdc_dev_2025-08-26T14_03_39-05_00" - # pullPolicy: Always repository: "973342646972.dkr.ecr.us-east-1.amazonaws.com/openshift-pcdc/gearbox-matching" tag: "latest" pullPolicy: Always service: - # -- (string) Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". - type: ClusterIP - # -- (int) The port number that the service exposes. - port: 80 targetPort: 8080 - podSecurityContext: - runAsNonRoot: true - runAsUser: 1000970000 - runAsGroup: 1000970000 - fsGroup: 1000970000 volumes: - name: config-volume secret: @@ -440,20 +491,63 @@ gearbox: memory: 512Mi # -- (string) The maximum amount of CPU the container can use cpu: 500m + env: + - name: GEN3_DEBUG + value: "False" + - name: DB_DATABASE + valueFrom: + secretKeyRef: + name: gearbox-dbcreds + key: database + optional: false + - name: DB_HOST + valueFrom: + secretKeyRef: + name: gearbox-dbcreds + key: host + optional: false + - name: DB_USER + valueFrom: + secretKeyRef: + name: gearbox-dbcreds + key: username + optional: false + - name: ADMIN_LOGINS + valueFrom: + secretKeyRef: + name: gearbox-g3auto + key: base64Authz.txt + optional: true + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: gearbox-dbcreds + key: password + optional: false + - name: DBREADY + valueFrom: + secretKeyRef: + name: gearbox-dbcreds + key: dbcreated + optional: false + - name: OPENSHIFT + value: "true" gearbox-middleware: - enabled: false + enabled: true + replicaCount: 2 + autoscaling: + averageCPUValue: 300m + averageMemoryValue: 256Mi + imagePullSecrets: + - name: ecr-pull-secret + podSecurityContext: + supplementalGroups: + - 1000 gearboxMiddlewareG3auto: testing: False service: - # -- (string) Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". - type: ClusterIP - # -- (int) The port number that the service exposes. - port: 80 targetPort: 8080 image: - # repository: quay.io/pcdc/gearbox-middleware - # tag: "pcdc_dev_Fri__15_Aug_2025_17_57_50_GMT" - # pullPolicy: Always repository: "973342646972.dkr.ecr.us-east-1.amazonaws.com/openshift-pcdc/gearbox-middleware" tag: "latest" pullPolicy: Always @@ -488,6 +582,41 @@ gearbox-middleware: mountPath: /etc/nginx/nginx.conf subPath: nginx.conf readOnly: true + env: + - name: GEN3_DEBUG + value: "False" + - name: DB_DATABASE + valueFrom: + secretKeyRef: + name: gearbox-dbcreds + key: database + optional: false + - name: DB_HOST + valueFrom: + secretKeyRef: + name: gearbox-dbcreds + key: host + optional: false + - name: DB_USER + valueFrom: + secretKeyRef: + name: gearbox-dbcreds + key: username + optional: false + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: gearbox-dbcreds + key: password + optional: false + - name: DBREADY + valueFrom: + secretKeyRef: + name: gearbox-dbcreds + key: dbcreated + optional: false + - name: OPENSHIFT + value: "true" resources: # -- (map) The amount of resources that the container requests requests: @@ -501,19 +630,7 @@ gearbox-middleware: memory: 512Mi # -- (string) The maximum amount of CPU the container can use cpu: 500m - podSecurityContext: - runAsNonRoot: true - runAsUser: 1000970000 - runAsGroup: 1000970000 - fsGroup: 1000970000 - #local Dev Only - # auth: - # postgresPassword: "thisisaterriblepassword" - # primary: - # persistence: - # enabled: true - # size: 1Gi postgresql: enabled: true image: @@ -524,6 +641,13 @@ postgresql: auth: postgresPassword: "thisisaterriblepassword" primary: + resources: + requests: + cpu: 250m # Change this value to adjust CPU requests, e.g., "500m" for 0.5 CPU + memory: 512Mi # Add or change this line to set memory requests + limits: + cpu: 1 # Add or change this line to set CPU limits, e.g., "1" for 1 CPU + memory: 1Gi # Add or change this line to set memory limits networkPolicy: enabled: false persistence: diff --git a/helm/arborist/templates/arbrorsit-expired-access-cronjob.yaml b/helm/arborist/templates/arbrorsit-expired-access-cronjob.yaml index 41c057fef..51c3ca689 100644 --- a/helm/arborist/templates/arbrorsit-expired-access-cronjob.yaml +++ b/helm/arborist/templates/arbrorsit-expired-access-cronjob.yaml @@ -33,12 +33,18 @@ spec: values: - ONDEMAND automountServiceAccountToken: false + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 12 }} + {{- end }} containers: - name: arborist image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: Always env: + {{- if .Values.env }} {{- toYaml .Values.env | nindent 12 }} + {{- end }} - name: PGPASSWORD valueFrom: secretKeyRef: @@ -77,8 +83,10 @@ spec: name: arborist-dbcreds key: dbcreated optional: false + {{- if .Values.resources }} resources: - {{- toYaml .Values.resources | nindent 12 }} + {{- toYaml .Values.resources | nindent 14 }} + {{- end }} command: ["sh"] args: - "-c" diff --git a/helm/fence/templates/fence-delete-expired-clients-cron.yaml b/helm/fence/templates/fence-delete-expired-clients-cron.yaml index bdb2b45c9..cb2fcdcbb 100644 --- a/helm/fence/templates/fence-delete-expired-clients-cron.yaml +++ b/helm/fence/templates/fence-delete-expired-clients-cron.yaml @@ -19,33 +19,38 @@ spec: app: gen3job spec: securityContext: - {{- toYaml .Values.podSecurityContext | nindent 12 }} + {{- toYaml .Values.podSecurityContext | nindent 12 }} volumes: - {{- toYaml .Values.volumes | nindent 10 }} + {{- toYaml .Values.volumes | nindent 12 }} {{- with .Values.imagePullSecrets }} imagePullSecrets: - {{- toYaml . | nindent 8 }} + {{- toYaml . | nindent 12 }} {{- end }} containers: - name: fence image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: Always env: - - name: FENCE_PUBLIC_CONFIG - valueFrom: - configMapKeyRef: - name: manifest-fence - key: fence-config-public.yaml - optional: true - - name: slackWebHook - valueFrom: - secretKeyRef: - name: slack-webhook - key: slack_webhook - optional: true - {{- toYaml .Values.env | nindent 16 }} + {{- if .Values.env }} + {{- toYaml .Values.env | nindent 14 }} + {{- end }} + - name: FENCE_PUBLIC_CONFIG + valueFrom: + configMapKeyRef: + name: manifest-fence + key: fence-config-public.yaml + optional: true + - name: slackWebHook + valueFrom: + secretKeyRef: + name: slack-webhook + key: slack_webhook + optional: true + volumeMounts: - {{- toYaml .Values.initVolumeMounts | nindent 12 }} + {{- toYaml .Values.initVolumeMounts | nindent 14 }} + resources: + {{- toYaml .Values.resources | nindent 14 }} command: ["/bin/bash"] args: - "-c" diff --git a/helm/fence/templates/fence-deployment.yaml b/helm/fence/templates/fence-deployment.yaml index 2dbea1c4c..6f2b4ae29 100644 --- a/helm/fence/templates/fence-deployment.yaml +++ b/helm/fence/templates/fence-deployment.yaml @@ -94,16 +94,6 @@ spec: - name: fence-init image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} - ports: - - name: http - containerPort: {{ .Values.service.targetPort }} - protocol: TCP - - name: https - containerPort: 443 - protocol: TCP - - name: container - containerPort: 6567 - protocol: TCP resources: {{- toYaml .Values.resources | nindent 12 }} command: ["/bin/bash"] diff --git a/helm/gearbox-middleware/templates/deployment.yaml b/helm/gearbox-middleware/templates/deployment.yaml index 387fa8e8a..a7e806b89 100644 --- a/helm/gearbox-middleware/templates/deployment.yaml +++ b/helm/gearbox-middleware/templates/deployment.yaml @@ -49,6 +49,10 @@ spec: automountServiceAccountToken: {{ .Values.automountServiceAccountToken}} volumes: {{- toYaml .Values.volumes | nindent 8 }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds}} containers: - name: gearbox-middleware diff --git a/helm/gearbox/templates/deployment.yaml b/helm/gearbox/templates/deployment.yaml index 29b325809..144ee04ec 100644 --- a/helm/gearbox/templates/deployment.yaml +++ b/helm/gearbox/templates/deployment.yaml @@ -42,6 +42,10 @@ spec: serviceAccountName: {{ include "gearbox.serviceAccountName" . }} securityContext: {{- toYaml .Values.podSecurityContext | nindent 8 }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} {{- with .Values.affinity }} affinity: {{- toYaml . | nindent 8 }} diff --git a/helm/gearbox/templates/external-secret.yaml b/helm/gearbox/templates/external-secret.yaml index 4ce4d9235..cb73179bd 100644 --- a/helm/gearbox/templates/external-secret.yaml +++ b/helm/gearbox/templates/external-secret.yaml @@ -19,5 +19,4 @@ spec: {{- end }} {{- if and .Values.global.externalSecrets.deploy (not .Values.global.externalSecrets.createLocalK8sSecret) }} {{ include "common.externalSecret.db" . }} -{{- end}} {{- end}} \ No newline at end of file diff --git a/helm/gen3/templates/openshift-override-files.yaml b/helm/gen3/templates/openshift-override-files.yaml deleted file mode 100644 index 796528738..000000000 --- a/helm/gen3/templates/openshift-override-files.yaml +++ /dev/null @@ -1,36 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: openshift-fence-override-files -data: - dockerrun.bash: |- - #!/bin/bash - # - # Kubernetes may mount jwt-keys as a tar ball - # - if [ -f /fence/jwt-keys.tar ]; then - ( - cd /fence - tar xvf jwt-keys.tar - if [ -d jwt-keys ]; then - mkdir -p keys - mv jwt-keys/* keys/ - fi - ) - fi - - /usr/bin/nginx - - poetry run gunicorn -c "/fence/deployment/wsgi/gunicorn.conf.py" - - gunicorn.conf.py: |- - from os import getuid, getgid - wsgi_app = "deployment.wsgi.wsgi:application" - bind = "0.0.0.0:8000" - workers = 1 - preload_app = True - user = getuid() - group = getgid() - timeout = 300 - keepalive = 2 - keepalive_timeout = 5 diff --git a/helm/portal/templates/deployment.yaml b/helm/portal/templates/deployment.yaml index ad9161579..55d15babe 100644 --- a/helm/portal/templates/deployment.yaml +++ b/helm/portal/templates/deployment.yaml @@ -78,6 +78,10 @@ spec: optional: true - name: extra-images emptyDir: {} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} initContainers: {{- if .Values.extraImages }} - name: init @@ -112,7 +116,6 @@ spec: - /bin/sh - -c - | - sleep infinity mkdir -p /tmp/repo cd /tmp/repo # Clone only the specific branch with minimal history and sparse checkout @@ -167,10 +170,9 @@ spec: - containerPort: {{ .Values.service.targetPort }} name: http - containerPort: 443 - command: - - /bin/sh - - -c - - sleep infinity + # command: + # - /bin/bash + # - ./dockerStart.sh env: - name: HOSTNAME value: revproxy-service From d26c32093d1b84afd88ef9e03af65b483c6814ac Mon Sep 17 00:00:00 2001 From: paulmurdoch19 Date: Fri, 19 Dec 2025 10:35:05 -0800 Subject: [PATCH 126/126] prep pr --- .github/ct.yaml | 4 +- .github/workflows/release.yaml | 6 +- .gitignore | 5 +- .secrets.baseline | 6 +- dev-gearbox-default-values-openshift.yaml | 731 ----------- gearbox-default-values.yaml | 722 ----------- openshift.code-workspace | 11 - pcdc-default-values.yaml | 1369 --------------------- 8 files changed, 12 insertions(+), 2842 deletions(-) delete mode 100644 dev-gearbox-default-values-openshift.yaml delete mode 100644 gearbox-default-values.yaml delete mode 100644 openshift.code-workspace delete mode 100644 pcdc-default-values.yaml diff --git a/.github/ct.yaml b/.github/ct.yaml index a25e27043..f5c0f5fdd 100644 --- a/.github/ct.yaml +++ b/.github/ct.yaml @@ -1,5 +1,5 @@ remote: origin -target-branch: openshift +target-branch: master chart-dirs: - helm chart-repos: @@ -11,4 +11,4 @@ helm-extra-args: --timeout 600s check-version-increment: true debug: false validate-maintainers: false -helm-dependency-extra-args: "--skip-refresh" +helm-dependency-extra-args: "--skip-refresh" \ No newline at end of file diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 8f0c7afce..f4391ce5e 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -3,7 +3,7 @@ name: Release Charts on: push: branches: - - openshift + - master jobs: release: @@ -20,12 +20,12 @@ jobs: run: | git config user.name "$GITHUB_ACTOR" git config user.email "$GITHUB_ACTOR@users.noreply.github.com" - + - name: Install Helm uses: azure/setup-helm@v3 with: version: v3.10.0 - + - name: Add helm repositories run: | helm repo add bitnami https://charts.bitnami.com/bitnami diff --git a/.gitignore b/.gitignore index dcc5b99d0..528c85cea 100644 --- a/.gitignore +++ b/.gitignore @@ -23,4 +23,7 @@ postgres.txt # External reference data file for PCDC pcdc_data/external/external_reference.json # Script that creates external secrets IAM user -external_secrets.bash \ No newline at end of file +external_secrets.bash +secret-gearbox-default-values.yaml +secret-openshift-gearbox-default-values.yaml +secret-pcdc-default-values.yaml \ No newline at end of file diff --git a/.secrets.baseline b/.secrets.baseline index 150edb012..299ed00c1 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -153,14 +153,14 @@ "filename": "helm/portal/values.yaml", "hashed_secret": "08eeb737b239bdb7362a875b90e22c10b8826b20", "is_verified": false, - "line_number": 507 + "line_number": 518 }, { "type": "Base64 High Entropy String", "filename": "helm/portal/values.yaml", "hashed_secret": "eb9739c6625f06b4ab73035223366dda6262ae77", "is_verified": false, - "line_number": 509 + "line_number": 520 } ], "helm/revproxy/nginx/helpers.js": [ @@ -173,5 +173,5 @@ } ] }, - "generated_at": "2025-09-24T16:09:34Z" + "generated_at": "2025-12-04T22:48:57Z" } diff --git a/dev-gearbox-default-values-openshift.yaml b/dev-gearbox-default-values-openshift.yaml deleted file mode 100644 index 9d1aef98d..000000000 --- a/dev-gearbox-default-values-openshift.yaml +++ /dev/null @@ -1,731 +0,0 @@ -# Global configuration -global: - portal_app: gitops - authz_entity_name: "subject" - - # -- (map) Pod security context settings - compatibility: - # -- (map) OpenShift Settings - openshift: - # -- (bool) Set to force if deploying to OpenShift - adaptSecurityContext: force - - -arborist: - enabled: false - # -- (map) Resource requests and limits for the containers in the pod - resources: - # -- (map) The amount of resources that the container requests - requests: - # -- (string) The amount of memory requested - memory: 12Mi - # -- (string) The amount of CPU requested (e.g., 100m = 0.1 CPU) - cpu: 100m - # -- (map) The maximum amount of resources that the container is allowed to use - limits: - # -- (string) The maximum amount of memory the container can use - memory: 512Mi - # -- (string) The maximum amount of CPU the container can use - cpu: 500m - #local Dev Only - # postgres: - # password: "arborist_thisisaweakpassword" - image: - # repository: quay.io/pcdc/arborist - # tag: 2025.09 - repository: "973342646972.dkr.ecr.us-east-1.amazonaws.com/openshift-pcdc/arborist" - tag: "latest" - pullPolicy: Always - service: - # -- (string) Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". - type: ClusterIP - # -- (int) The port number that the service exposes. - port: 80 - targetPort: 8080 - podSecurityContext: - runAsNonRoot: true - runAsUser: 1000970000 - runAsGroup: 1000970000 - fsGroup: 1000970000 - -fence: - #local Dev Only - # postgres: - # password: "fence_thisisaweakpassword" - FENCE_CONFIG: - DEBUG: true - MOCK_STORAGE: true - #fill in - AMANUENSIS_PUBLIC_KEY_PATH: "/fence/keys/key/jwt_public_key.pem" - MOCK_GOOGLE_AUTH: true - mock_default_user: "test@example.com" - ENABLE_VISA_UPDATE_CRON: false - ENABLE_DELETE_EXPIRED_CLIENTS: false - ENABLE_FENCE_DEPLOYMENT: true - ENABLE_PRESIGNED_URL_FENCE: false - USER_YAML_ENABLED: false - volumes: - - name: old-config-volume - secret: - secretName: "fence-secret" - - name: json-secret-volume - secret: - secretName: "fence-json-secret" - optional: true - - name: creds-volume - secret: - secretName: "fence-creds" - - name: config-helper - configMap: - name: config-helper - optional: true - - name: logo-volume - configMap: - name: "logo-config" - - name: config-volume - secret: - secretName: "fence-config" - - name: fence-google-app-creds-secret-volume - secret: - secretName: "fence-google-app-creds-secret" - - name: fence-google-storage-creds-secret-volume - secret: - secretName: "fence-google-storage-creds-secret" - - name: fence-jwt-keys - secret: - secretName: "fence-jwt-keys" - - name: privacy-policy - configMap: - name: "privacy-policy" - - name: yaml-merge - configMap: - name: "fence-yaml-merge" - optional: true - - name: config-volume-public - configMap: - name: "manifest-fence" - optional: true - - name: nginx-config - configMap: - name: nginx-config - items: - - key: nginx.conf - path: nginx.conf - - volumeMounts: - - name: "old-config-volume" - readOnly: true - mountPath: "/var/www/fence/local_settings.py" - subPath: local_settings.py - - name: "json-secret-volume" - readOnly: true - mountPath: "/var/www/fence/fence_credentials.json" - subPath: fence_credentials.json - - name: "creds-volume" - readOnly: true - mountPath: "/var/www/fence/creds.json" - subPath: creds.json - - name: "config-helper" - readOnly: true - mountPath: "/var/www/fence/config_helper.py" - subPath: config_helper.py - - name: "logo-volume" - readOnly: true - mountPath: "/fence/fence/static/img/logo.svg" - subPath: "logo.svg" - - name: "privacy-policy" - readOnly: true - mountPath: "/fence/fence/static/privacy_policy.md" - subPath: "privacy_policy.md" - - name: "config-volume" - readOnly: true - mountPath: "/var/www/fence/fence-config.yaml" - subPath: fence-config.yaml - - name: "yaml-merge" - readOnly: true - mountPath: "/var/www/fence/yaml_merge.py" - subPath: yaml_merge.py - - name: "fence-google-app-creds-secret-volume" - readOnly: true - mountPath: "/var/www/fence/fence_google_app_creds_secret.json" - subPath: fence_google_app_creds_secret.json - - name: "fence-google-storage-creds-secret-volume" - readOnly: true - mountPath: "/var/www/fence/fence_google_storage_creds_secret.json" - subPath: fence_google_storage_creds_secret.json - - name: "fence-jwt-keys" - readOnly: true - mountPath: "/fence/keys/key/jwt_private_key.pem" - subPath: "jwt_private_key.pem" - - name: "fence-jwt-keys" - readOnly: true - mountPath: "/fence/keys/key/jwt_public_key.pem" - subPath: "jwt_public_key.pem" - - name: "config-volume-public" - readOnly: true - mountPath: "/var/www/fence/fence-config-public.yaml" - subPath: fence-config-public.yaml - - name: nginx-config - mountPath: /etc/nginx/nginx.conf - subPath: nginx.conf - readOnly: true - - # -- (list) Volumes to mount to the init container. - - initVolumeMounts: - - name: "config-volume" - readOnly: true - mountPath: "/var/www/fence/fence-config.yaml" - subPath: fence-config.yaml - - name: "config-volume-public" - readOnly: true - mountPath: "/var/www/fence/fence-config-public.yaml" - subPath: fence-config-public.yaml - - name: "yaml-merge" - readOnly: true - mountPath: "/var/www/fence/yaml_merge.py" - subPath: yaml_merge.py - - name: "fence-google-app-creds-secret-volume" - readOnly: true - mountPath: "/var/www/fence/fence_google_app_creds_secret.json" - subPath: fence_google_app_creds_secret.json - - name: "fence-google-storage-creds-secret-volume" - readOnly: true - mountPath: "/var/www/fence/fence_google_storage_creds_secret.json" - subPath: fence_google_storage_creds_secret.json - - service: - # -- (string) Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". - type: ClusterIP - # -- (int) The port number that the service exposes. - port: 80 - targetPort: 8080 - podSecurityContext: - runAsNonRoot: true - # runAsUser: 1000970000 - # runAsGroup: 1000970000 - # fsGroup: 1000970000 - supplementalGroups: - - 1000 - # -- (map) Resource requests and limits for the containers in the pod - resources: - # -- (map) The amount of resources that the container requests - requests: - # -- (string) The amount of memory requested - memory: 12Mi - # -- (string) The amount of CPU requested (e.g., 100m = 0.1 CPU) - cpu: 100m - # -- (map) The maximum amount of resources that the container is allowed to use - limits: - # -- (string) The maximum amount of memory the container can use - memory: 512Mi - # -- (string) The maximum amount of CPU the container can use - cpu: 500m - image: - # repository: quay.io/pcdc/fence - # tag: "3.6.0" - # pullPolicy: IfNotPresent - repository: "973342646972.dkr.ecr.us-east-1.amazonaws.com/openshift-pcdc/fence" - tag: "latest" - pullPolicy: Always - USER_YAML: | - authz: - - resources: - - name: 'gearbox_gateway' - - name: 'data_file' - description: 'data files, stored in S3' - - name: 'sower' - description: 'sower resource' - - name: workspace - description: jupyter notebooks - - name: analysis - description: analysis tool service - - name: portal - description: data portal service - - name: privacy - description: User privacy policy - - name: 'services' - subresources: - - name: 'sheepdog' - subresources: - - name: 'submission' - subresources: - - name: 'program' - - name: 'project' - - name: 'amanuensis' - - name: 'fence' - subresources: - - name: 'admin' - - name: programs - subresources: - - name: pcdc - - policies: - - id: gearbox_admin - resource_paths: ['/gearbox_gateway'] - role_ids: ['gearbox_user'] - - id: 'data_upload' - description: 'upload raw data files to S3' - resource_paths: - - /data_file - role_ids: - - file_uploader - - id: 'services.amanuensis-admin' - description: 'admin access to amanuensis' - role_ids: - - 'amanuensis_admin' - resource_paths: - - '/services/amanuensis' - - id: 'services.fence-admin' - description: 'admin access to fence' - role_ids: - - 'fence_admin' - resource_paths: - - '/services/fence/admin' - - id: workspace - description: be able to use workspace - resource_paths: - - /workspace - role_ids: - - workspace_user - - id: analysis - description: be able to use analysis tool service - resource_paths: - - /analysis - role_ids: - - analysis_user - - id: privacy_policy - description: User agreed on the privacy policy - resource_paths: - - /privacy - role_ids: - - reader - - id: indexd_admin - description: full access to indexd API - role_ids: - - indexd_admin - resource_paths: - - /programs - - description: be able to use sower job - id: sower - resource_paths: [/sower] - role_ids: [sower_user] - - id: 'services.sheepdog-admin' - description: 'CRUD access to programs and projects' - role_ids: - - 'sheepdog_admin' - resource_paths: - - '/services/sheepdog/submission/program' - - '/services/sheepdog/submission/project' - - id: all_programs_reader - role_ids: - - reader - - storage_reader - resource_paths: - - /programs - - id: login_no_access - role_ids: - - reader - resource_paths: - - /portal - - id: 'data_admin' - description: 'policy test, should write a policy per resource and assign to user in order to avoid duplicating policies' - role_ids: - - admin - resource_paths: - - /programs - - /programs/pcdc - - - roles: - - id: 'gearbox_user' - permissions: - - id: 'gearbox_access' - action: - service: '*' - method: '*' - - id: 'file_uploader' - description: 'can upload data files' - permissions: - - id: 'file_upload' - action: - service: 'fence' - method: 'file_upload' - - id: 'amanuensis_admin' - description: 'can do admin work on project/data request' - permissions: - - id: 'amanuensis_admin_action' - action: - service: 'amanuensis' - method: '*' - - id: 'fence_admin' - description: 'can use the admin endpoint in Fence' - permissions: - - id: 'fence_admin_permission' - action: - service: 'fence' - method: '*' - - id: workspace_user - permissions: - - action: {method: access, service: jupyterhub} - id: workspace_access - - id: sower_user - permissions: - - action: {method: access, service: job} - id: sower_access - - id: analysis_user - permissions: - - action: {method: access, service: analysis} - id: analysis_access - # Sheepdog admin role - - id: 'sheepdog_admin' - description: 'sheepdog admin role for program project crud' - permissions: - - id: 'sheepdog_admin_action' - action: - service: 'sheepdog' - method: '*' - - id: indexd_admin - description: full access to indexd API - permissions: - - id: indexd_admin - action: - service: indexd - method: '*' - - id: admin - permissions: - - id: admin - action: - service: '*' - method: '*' - - id: creator - permissions: - - id: creator - action: - service: '*' - method: create - - id: reader - permissions: - - id: reader - action: - service: '*' - method: read - - id: updater - permissions: - - id: updater - action: - service: '*' - method: update - - id: deleter - permissions: - - id: deleter - action: - service: '*' - method: delete - - id: storage_writer - permissions: - - id: storage_creator - action: - service: '*' - method: write-storage - - id: storage_reader - permissions: - - id: storage_reader - action: - service: '*' - method: read-storage - - users: - ### BEGIN INTERNS SECTION ### - ### END INTERNS SECTION ### - pmurdoch@uchicago.edu: - admin: true - policies: - - gearbox_admin - - data_upload - - workspace - - services.sheepdog-admin - - services.amanuensis-admin - - data_admin - - analysis - - privacy_policy - - login_no_access - - sower - -portal: - enabled: false - image: - repository: "973342646972.dkr.ecr.us-east-1.amazonaws.com/openshift-pcdc/gearbox-fe" - tag: "latest" - pullPolicy: Always - # -- (map) Resource requests and limits for the containers in the pod - resources: - # -- (map) The amount of resources that the container requests - requests: - cpu: 1.0 - memory: 1024Mi - limits: - cpu: 2.0 - memory: 4096Mi - # -- (map) The maximum amount of resources that the container is allowed to use - service: - # -- (string) Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". - type: ClusterIP - # -- (int) The port number that the service exposes. - port: 80 - targetPort: 8080 - gitops: - json: | - { - "s3_bucket": "https://test-compose-gearbox-data-bucket-with-versioning.s3.amazonaws.com" - } - podSecurityContext: - runAsNonRoot: true - runAsUser: 1000970000 - runAsGroup: 1000970000 - fsGroup: 1000970000 - gearboxS3Bucket: "https://test-compose-gearbox-data-bucket-with-versioning.s3.amazonaws.com" - -revproxy: - enabled: true - nginx: - user: "nginx" - resolver: "dns-default.openshift-dns.svc.cluster.local" - service: - # -- (string) Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". - type: ClusterIP - # -- (int) The port number that the service exposes. - port: 80 - targetPort: 8080 - image: - repository: quay.io/cdis/nginx - tag: 2025.09 - # -- (map) Resource requests and limits for the containers in the pod - resources: - # -- (map) The amount of resources that the container requests - requests: - # -- (string) The amount of CPU requested (e.g., 100m = 0.1 CPU) - cpu: 100m - # -- (string) The amount of memory requested - memory: 12Mi - # -- (map) The maximum amount of resources that the container is allowed to use - limits: - # -- (string) The maximum amount of memory the container can use - memory: 512Mi - # -- (string) The maximum amount of CPU the container can use - cpu: 500m - -gearbox: - enabled: false - image: - # repository: quay.io/pcdc/gearbox_be - # tag: "pcdc_dev_2025-08-26T14_03_39-05_00" - # pullPolicy: Always - repository: "973342646972.dkr.ecr.us-east-1.amazonaws.com/openshift-pcdc/gearbox-matching" - tag: "latest" - pullPolicy: Always - service: - # -- (string) Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". - type: ClusterIP - # -- (int) The port number that the service exposes. - port: 80 - targetPort: 8080 - podSecurityContext: - runAsNonRoot: true - runAsUser: 1000970000 - runAsGroup: 1000970000 - fsGroup: 1000970000 - volumes: - - name: config-volume - secret: - secretName: "gearbox-g3auto" - - name: gearbox-middleware-jwt-keys - secret: - secretName: "gearbox-middleware-jwt-keys" - items: - - key: jwt_public_key.pem - path: jwt_public_key.pem - optional: false - - name: nginx-config - configMap: - name: nginx-config - items: - - key: nginx.conf - path: nginx.conf - volumeMounts: - - name: "gearbox-middleware-jwt-keys" - readOnly: true - mountPath: "/gearbox/src/gearbox/keys/jwt_public_key.pem" - subPath: jwt_public_key.pem - - name: config-volume - readOnly: true - mountPath: /gearbox/.env - subPath: gearbox.env - - name: nginx-config - mountPath: /etc/nginx/nginx.conf - subPath: nginx.conf - readOnly: true - resources: - # -- (map) The amount of resources that the container requests - requests: - # -- (string) The amount of memory requested - memory: 12Mi - # -- (string) The amount of CPU requested (e.g., 100m = 0.1 CPU) - cpu: 100m - # -- (map) The maximum amount of resources that the container is allowed to use - limits: - # -- (string) The maximum amount of memory the container can use - memory: 512Mi - # -- (string) The maximum amount of CPU the container can use - cpu: 500m -gearbox-middleware: - enabled: false - gearboxMiddlewareG3auto: - testing: False - service: - # -- (string) Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". - type: ClusterIP - # -- (int) The port number that the service exposes. - port: 80 - targetPort: 8080 - image: - # repository: quay.io/pcdc/gearbox-middleware - # tag: "pcdc_dev_Fri__15_Aug_2025_17_57_50_GMT" - # pullPolicy: Always - repository: "973342646972.dkr.ecr.us-east-1.amazonaws.com/openshift-pcdc/gearbox-middleware" - tag: "latest" - pullPolicy: Always - # -- (list) Volumes to attach to the container. - volumes: - - name: config-volume - secret: - secretName: "gearbox-middleware-g3auto" - - name: gearbox-middleware-jwt-keys - secret: - secretName: "gearbox-middleware-jwt-keys" - items: - - key: jwt_private_key.pem - path: jwt_private_key.pem - optional: false - - name: nginx-config - configMap: - name: nginx-config - items: - - key: nginx.conf - path: nginx.conf - volumeMounts: - - name: "gearbox-middleware-jwt-keys" - readOnly: true - mountPath: "/gearbox-middleware/gearbox_middleware/keys/jwt_private_key.pem" - subPath: jwt_private_key.pem - - name: config-volume - readOnly: true - mountPath: /gearbox-middleware/.env - subPath: gearbox-middleware.env - - name: nginx-config - mountPath: /etc/nginx/nginx.conf - subPath: nginx.conf - readOnly: true - resources: - # -- (map) The amount of resources that the container requests - requests: - # -- (string) The amount of memory requested - memory: 12Mi - # -- (string) The amount of CPU requested (e.g., 100m = 0.1 CPU) - cpu: 100m - # -- (map) The maximum amount of resources that the container is allowed to use - limits: - # -- (string) The maximum amount of memory the container can use - memory: 512Mi - # -- (string) The maximum amount of CPU the container can use - cpu: 500m - podSecurityContext: - runAsNonRoot: true - runAsUser: 1000970000 - runAsGroup: 1000970000 - fsGroup: 1000970000 - -postgresql: - image: - repository: bitnamilegacy/postgresql - primary: - networkPolicy: - enabled: false - readReplicas: - networkPolicy: - enabled: false - #local Dev Only - # auth: - # postgresPassword: "thisisaterriblepassword" - # primary: - # persistence: - # enabled: true - # size: 1Gi -######################################################################################## -# DISABLED SERVICES # -######################################################################################## -elasticsearch: - enabled: false - -amanuensis: - enabled: false - -guppy: - enabled: false - -manifestservice: - enabled: false - -pcdcanalysistools: - enabled: false - -peregrine: - enabled: false - -sheepdog: - enabled: false - -sower: - enabled: false - -wts: - enabled: false - -ambassador: - # -- (bool) Whether to deploy the ambassador subchart. - enabled: false - -argo-wrapper: - # -- (bool) Whether to deploy the argo-wrapper subchart. - enabled: false - -audit: - # -- (bool) Whether to deploy the audit subchart. - enabled: false - -aws-es-proxy: - enabled: false - -metadata: - # -- (bool) Whether to deploy the metadata subchart. - enabled: false - -pidgin: - # -- (bool) Whether to deploy the pidgin subchart. - enabled: false - -indexd: - enabled: false - -hatchery: - enabled: false - -cohort-middleware: - enabled: false - -etl: - enabled: false - -gen3-network-policies: - enabled: false diff --git a/gearbox-default-values.yaml b/gearbox-default-values.yaml deleted file mode 100644 index acd6d0fe5..000000000 --- a/gearbox-default-values.yaml +++ /dev/null @@ -1,722 +0,0 @@ -# Global configuration -global: - dev: true - portal_app: gitops - authz_entity_name: "subject" - hostname: "gearbox-ped-gearbox-p1.apps.bsd-openshift-prod.bsd.uchicago.edu" - # -- (bool) If the service will be deployed with a Pod Disruption Budget. Note- you need to have more than 2 replicas for the pdb to be deployed. - pdb: true - # -- (int) The minimum amount of pods that are available at all times if the PDB is deployed. - minAvailable: 1 - # -- (map) This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/ - autoscaling: - enabled: true - minReplicas: 2 - # -- (map) Pod security context settings - compatibility: - # -- (map) OpenShift Settings - openshift: - # -- (bool) Set to force if deploying to OpenShift - adaptSecurityContext: force - -arborist: - enabled: true - postgres: - password: "arborist_thisisaweakpassword" - imagePullSecrets: - - name: ecr-pull-secret - autoscaling: - averageCPUValue: 300m - averageMemoryValue: 256Mi - # -- (map) Resource requests and limits for the containers in the pod - resources: - # -- (map) The amount of resources that the container requests - requests: - # -- (string) The amount of memory requested - memory: 12Mi - # -- (string) The amount of CPU requested (e.g., 100m = 0.1 CPU) - cpu: 100m - # -- (map) The maximum amount of resources that the container is allowed to use - limits: - # -- (string) The maximum amount of memory the container can use - memory: 512Mi - # -- (string) The maximum amount of CPU the container can use - cpu: 500m - image: - repository: "973342646972.dkr.ecr.us-east-1.amazonaws.com/openshift-pcdc/arborist" - tag: "latest" - pullPolicy: Always - # -- (int) Number of replicas for the deployment. - replicaCount: 2 - service: - targetPort: 8080 - -fence: - enabled: true - replicaCount: 2 - autoscaling: - averageCPUValue: 300m - averageMemoryValue: 256Mi - imagePullSecrets: - - name: ecr-pull-secret - postgres: - password: "fence_thisisaweakpassword" - volumes: - - name: old-config-volume - secret: - secretName: "fence-secret" - - name: json-secret-volume - secret: - secretName: "fence-json-secret" - optional: true - - name: creds-volume - secret: - secretName: "fence-creds" - - name: config-helper - configMap: - name: config-helper - optional: true - - name: logo-volume - configMap: - name: "logo-config" - - name: config-volume - secret: - secretName: "fence-config" - - name: fence-google-app-creds-secret-volume - secret: - secretName: "fence-google-app-creds-secret" - - name: fence-google-storage-creds-secret-volume - secret: - secretName: "fence-google-storage-creds-secret" - - name: fence-jwt-keys - secret: - secretName: "fence-jwt-keys" - - name: privacy-policy - configMap: - name: "privacy-policy" - - name: yaml-merge - configMap: - name: "fence-yaml-merge" - optional: true - - name: config-volume-public - configMap: - name: "manifest-fence" - optional: true - - name: nginx-config - configMap: - name: nginx-config - items: - - key: nginx.conf - path: nginx.conf - - volumeMounts: - - name: "old-config-volume" - readOnly: true - mountPath: "/var/www/fence/local_settings.py" - subPath: local_settings.py - - name: "json-secret-volume" - readOnly: true - mountPath: "/var/www/fence/fence_credentials.json" - subPath: fence_credentials.json - - name: "creds-volume" - readOnly: true - mountPath: "/var/www/fence/creds.json" - subPath: creds.json - - name: "config-helper" - readOnly: true - mountPath: "/var/www/fence/config_helper.py" - subPath: config_helper.py - - name: "logo-volume" - readOnly: true - mountPath: "/fence/fence/static/img/logo.svg" - subPath: "logo.svg" - - name: "privacy-policy" - readOnly: true - mountPath: "/fence/fence/static/privacy_policy.md" - subPath: "privacy_policy.md" - - name: "config-volume" - readOnly: true - mountPath: "/var/www/fence/fence-config.yaml" - subPath: fence-config.yaml - - name: "yaml-merge" - readOnly: true - mountPath: "/var/www/fence/yaml_merge.py" - subPath: yaml_merge.py - - name: "fence-google-app-creds-secret-volume" - readOnly: true - mountPath: "/var/www/fence/fence_google_app_creds_secret.json" - subPath: fence_google_app_creds_secret.json - - name: "fence-google-storage-creds-secret-volume" - readOnly: true - mountPath: "/var/www/fence/fence_google_storage_creds_secret.json" - subPath: fence_google_storage_creds_secret.json - - name: "fence-jwt-keys" - readOnly: true - mountPath: "/fence/keys/key/jwt_private_key.pem" - subPath: "jwt_private_key.pem" - - name: "fence-jwt-keys" - readOnly: true - mountPath: "/fence/keys/key/jwt_public_key.pem" - subPath: "jwt_public_key.pem" - - name: "config-volume-public" - readOnly: true - mountPath: "/var/www/fence/fence-config-public.yaml" - subPath: fence-config-public.yaml - - name: nginx-config - mountPath: /etc/nginx/nginx.conf - subPath: nginx.conf - readOnly: true - - # -- (list) Volumes to mount to the init container. - - initVolumeMounts: - - name: "config-volume" - readOnly: true - mountPath: "/var/www/fence/fence-config.yaml" - subPath: fence-config.yaml - - name: "config-volume-public" - readOnly: true - mountPath: "/var/www/fence/fence-config-public.yaml" - subPath: fence-config-public.yaml - - name: "yaml-merge" - readOnly: true - mountPath: "/var/www/fence/yaml_merge.py" - subPath: yaml_merge.py - - name: "fence-google-app-creds-secret-volume" - readOnly: true - mountPath: "/var/www/fence/fence_google_app_creds_secret.json" - subPath: fence_google_app_creds_secret.json - - name: "fence-google-storage-creds-secret-volume" - readOnly: true - mountPath: "/var/www/fence/fence_google_storage_creds_secret.json" - subPath: fence_google_storage_creds_secret.json - env: - - name: GEN3_UWSGI_TIMEOUT - valueFrom: - configMapKeyRef: - name: manifest-global - key: uwsgi-timeout - optional: true - - name: DD_AGENT_HOST - valueFrom: - fieldRef: - fieldPath: status.hostIP - - name: AWS_STS_REGIONAL_ENDPOINTS - value: regional - - name: PYTHONPATH - value: /var/www/fence - - name: GEN3_DEBUG - value: "False" - - name: PGHOST - valueFrom: - secretKeyRef: - name: fence-dbcreds - key: host - optional: false - - name: PGUSER - valueFrom: - secretKeyRef: - name: fence-dbcreds - key: username - optional: false - - name: PGPASSWORD - valueFrom: - secretKeyRef: - name: fence-dbcreds - key: password - optional: false - - name: PGDB - valueFrom: - secretKeyRef: - name: fence-dbcreds - key: database - optional: false - - name: DBREADY - valueFrom: - secretKeyRef: - name: fence-dbcreds - key: dbcreated - optional: false - - name: DB - value: postgresql://$(PGUSER):$(PGPASSWORD)@$(PGHOST):5432/$(PGDB) - - name: INDEXD_PASSWORD - valueFrom: - secretKeyRef: - name: indexd-service-creds - key: fence - optional: true - - name: gen3Env - valueFrom: - configMapKeyRef: - name: manifest-global - key: hostname - - name: OPENSHIFT - value: "true" - service: - targetPort: 8080 - - podSecurityContext: - supplementalGroups: - - 1000 - # -- (map) Resource requests and limits for the containers in the pod - resources: - # -- (map) The amount of resources that the container requests - requests: - # -- (string) The amount of memory requested - memory: 12Mi - # -- (string) The amount of CPU requested (e.g., 100m = 0.1 CPU) - cpu: 100m - # -- (map) The maximum amount of resources that the container is allowed to use - limits: - # -- (string) The maximum amount of memory the container can use - memory: 512Mi - # -- (string) The maximum amount of CPU the container can use - cpu: 500m - image: - repository: "973342646972.dkr.ecr.us-east-1.amazonaws.com/openshift-pcdc/fence" - tag: "latest" - pullPolicy: Always - USER_YAML: | - authz: - - resources: - - name: portal - description: data portal service - - name: 'services' - subresources: - - name: 'gearbox' - subresources: - - name: 'data-admin' - - name: 'data-manager' - - name: 'fence' - subresources: - - name: 'admin' - - policies: - - id: base_access - resource_paths: ['/portal'] - role_ids: ['base_access'] - - id: 'services.gearbox-data-manager' - description: 'admin access to most gearbox data endpoints' - resource_paths: - - /services/gearbox/data-manager - role_ids: - - 'data_manager' - - id: 'services.gearbox-data-admin' - description: 'admin access to gearbox data' - role_ids: - - 'data_admin' - resource_paths: - - '/services/gearbox/data-admin' - - id: 'services.fence-admin' - description: 'admin access to fence' - role_ids: - - 'fence_admin' - resource_paths: - - '/services/fence/admin' - - roles: - - id: fence_admin - permissions: - - id: fence_admin_permission - action: - service: 'fence' - method: '*' - - id: base_access - permissions: - - id: gearbox_access - action: - service: '*' - method: 'read' - - id: data_manager - permissions: - - id: gearbox_data_manager - action: - service: 'gearbox' - method: '*' - - id: data_admin - permissions: - - id: gearbox_data_admin - action: - service: '*' - method: '*' - users: - ### BEGIN INTERNS SECTION ### - ### END INTERNS SECTION ### - pmurdoch@uchicago.edu: - admin: true - policies: - - base_access - - services.gearbox-data-manager - - services.fence-admin - - services.gearbox-data-admin - -portal: - enabled: true - image: - repository: "973342646972.dkr.ecr.us-east-1.amazonaws.com/openshift-pcdc/gearbox-fe" - tag: "latest" - pullPolicy: Always - # -- (map) Resource requests and limits for the containers in the pod - resources: - # -- (map) The amount of resources that the container requests - requests: - memory: 12Mi - # -- (string) The amount of CPU requested (e.g., 100m = 0.1 CPU) - cpu: 100m - limits: - cpu: 2.0 - memory: 4096Mi - # -- (map) The maximum amount of resources that the container is allowed to use - service: - # -- (string) Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". - type: ClusterIP - # -- (int) The port number that the service exposes. - port: 80 - targetPort: 8080 - gitops: - json: | - { - "s3_bucket": "https://test-compose-gearbox-data-bucket-with-versioning.s3.amazonaws.com" - } - imagePullSecrets: - - name: ecr-pull-secret - podSecurityContext: - runAsNonRoot: true - runAsUser: 1000970000 - runAsGroup: 1000970000 - fsGroup: 1000970000 - gearboxS3Bucket: "https://test-compose-gearbox-data-bucket-with-versioning.s3.amazonaws.com" - -revproxy: - enabled: true - # -- (int) Number of replicas for the deployment. - replicaCount: 2 - nginx: - user: "nginx" - resolver: "dns-default.openshift-dns.svc.cluster.local" - pidFile: "/tmp/nginx.pid" - service: - # -- (string) Type of service. Valid values are "ClusterIP", "NodePort", "LoadBalancer", "ExternalName". - type: ClusterIP - targetPort: 8080 - image: - repository: quay.io/cdis/nginx - tag: 2025.09 - autoscaling: - averageCPUValue: 300m - averageMemoryValue: 256Mi - # -- (map) Resource requests and limits for the containers in the pod - resources: - # -- (map) The amount of resources that the container requests - requests: - # -- (string) The amount of CPU requested (e.g., 100m = 0.1 CPU) - cpu: 100m - # -- (string) The amount of memory requested - memory: 12Mi - # -- (map) The maximum amount of resources that the container is allowed to use - limits: - # -- (string) The maximum amount of memory the container can use - memory: 512Mi - # -- (string) The maximum amount of CPU the container can use - cpu: 500m - - # OpenShift Route configuration (replaces Ingress on OpenShift) - openshiftRoute: - enabled: true - # Leave host empty to auto-generate, or specify your desired hostname - host: "gearbox-ped-gearbox-p1.apps.bsd-openshift-prod.bsd.uchicago.edu" # Auto-generates: -.apps. - annotations: - openshift.io/host.generated: "true" - -gearbox: - enabled: true - replicaCount: 2 - postgres: - password: "gearbox_thisisaweakpassword" - autoscaling: - averageCPUValue: 300m - averageMemoryValue: 256Mi - imagePullSecrets: - - name: ecr-pull-secret - podSecurityContext: - supplementalGroups: - - 1000 - image: - repository: "973342646972.dkr.ecr.us-east-1.amazonaws.com/openshift-pcdc/gearbox-matching" - tag: "latest" - pullPolicy: Always - service: - targetPort: 8080 - volumes: - - name: config-volume - secret: - secretName: "gearbox-g3auto" - - name: gearbox-middleware-jwt-keys - secret: - secretName: "gearbox-middleware-jwt-keys" - items: - - key: jwt_public_key.pem - path: jwt_public_key.pem - optional: false - - name: nginx-config - configMap: - name: nginx-config - items: - - key: nginx.conf - path: nginx.conf - volumeMounts: - - name: "gearbox-middleware-jwt-keys" - readOnly: true - mountPath: "/gearbox/src/gearbox/keys/jwt_public_key.pem" - subPath: jwt_public_key.pem - - name: config-volume - readOnly: true - mountPath: /gearbox/.env - subPath: gearbox.env - - name: nginx-config - mountPath: /etc/nginx/nginx.conf - subPath: nginx.conf - readOnly: true - resources: - # -- (map) The amount of resources that the container requests - requests: - # -- (string) The amount of memory requested - memory: 12Mi - # -- (string) The amount of CPU requested (e.g., 100m = 0.1 CPU) - cpu: 100m - # -- (map) The maximum amount of resources that the container is allowed to use - limits: - # -- (string) The maximum amount of memory the container can use - memory: 512Mi - # -- (string) The maximum amount of CPU the container can use - cpu: 500m - env: - - name: GEN3_DEBUG - value: "False" - - name: DB_DATABASE - valueFrom: - secretKeyRef: - name: gearbox-dbcreds - key: database - optional: false - - name: DB_HOST - valueFrom: - secretKeyRef: - name: gearbox-dbcreds - key: host - optional: false - - name: DB_USER - valueFrom: - secretKeyRef: - name: gearbox-dbcreds - key: username - optional: false - - name: ADMIN_LOGINS - valueFrom: - secretKeyRef: - name: gearbox-g3auto - key: base64Authz.txt - optional: true - - name: DB_PASSWORD - valueFrom: - secretKeyRef: - name: gearbox-dbcreds - key: password - optional: false - - name: DBREADY - valueFrom: - secretKeyRef: - name: gearbox-dbcreds - key: dbcreated - optional: false - - name: OPENSHIFT - value: "true" -gearbox-middleware: - enabled: true - replicaCount: 2 - autoscaling: - averageCPUValue: 300m - averageMemoryValue: 256Mi - imagePullSecrets: - - name: ecr-pull-secret - podSecurityContext: - supplementalGroups: - - 1000 - gearboxMiddlewareG3auto: - testing: False - service: - targetPort: 8080 - image: - repository: "973342646972.dkr.ecr.us-east-1.amazonaws.com/openshift-pcdc/gearbox-middleware" - tag: "latest" - pullPolicy: Always - # -- (list) Volumes to attach to the container. - volumes: - - name: config-volume - secret: - secretName: "gearbox-middleware-g3auto" - - name: gearbox-middleware-jwt-keys - secret: - secretName: "gearbox-middleware-jwt-keys" - items: - - key: jwt_private_key.pem - path: jwt_private_key.pem - optional: false - - name: nginx-config - configMap: - name: nginx-config - items: - - key: nginx.conf - path: nginx.conf - volumeMounts: - - name: "gearbox-middleware-jwt-keys" - readOnly: true - mountPath: "/gearbox-middleware/gearbox_middleware/keys/jwt_private_key.pem" - subPath: jwt_private_key.pem - - name: config-volume - readOnly: true - mountPath: /gearbox-middleware/.env - subPath: gearbox-middleware.env - - name: nginx-config - mountPath: /etc/nginx/nginx.conf - subPath: nginx.conf - readOnly: true - env: - - name: GEN3_DEBUG - value: "False" - - name: DB_DATABASE - valueFrom: - secretKeyRef: - name: gearbox-dbcreds - key: database - optional: false - - name: DB_HOST - valueFrom: - secretKeyRef: - name: gearbox-dbcreds - key: host - optional: false - - name: DB_USER - valueFrom: - secretKeyRef: - name: gearbox-dbcreds - key: username - optional: false - - name: DB_PASSWORD - valueFrom: - secretKeyRef: - name: gearbox-dbcreds - key: password - optional: false - - name: DBREADY - valueFrom: - secretKeyRef: - name: gearbox-dbcreds - key: dbcreated - optional: false - - name: OPENSHIFT - value: "true" - resources: - # -- (map) The amount of resources that the container requests - requests: - # -- (string) The amount of memory requested - memory: 12Mi - # -- (string) The amount of CPU requested (e.g., 100m = 0.1 CPU) - cpu: 100m - # -- (map) The maximum amount of resources that the container is allowed to use - limits: - # -- (string) The maximum amount of memory the container can use - memory: 512Mi - # -- (string) The maximum amount of CPU the container can use - cpu: 500m - -postgresql: - enabled: true - image: - repository: bitnamilegacy/postgresql - readReplicas: - networkPolicy: - enabled: false - auth: - postgresPassword: "thisisaterriblepassword" - primary: - resources: - requests: - cpu: 250m # Change this value to adjust CPU requests, e.g., "500m" for 0.5 CPU - memory: 512Mi # Add or change this line to set memory requests - limits: - cpu: 1 # Add or change this line to set CPU limits, e.g., "1" for 1 CPU - memory: 1Gi # Add or change this line to set memory limits - networkPolicy: - enabled: false - persistence: - enabled: true - size: 1Gi -######################################################################################## -# DISABLED SERVICES # -######################################################################################## -elasticsearch: - enabled: false - -amanuensis: - enabled: false - -guppy: - enabled: false - -manifestservice: - enabled: false - -pcdcanalysistools: - enabled: false - -peregrine: - enabled: false - -sheepdog: - enabled: false - -sower: - enabled: false - -wts: - enabled: false - -ambassador: - # -- (bool) Whether to deploy the ambassador subchart. - enabled: false - -argo-wrapper: - # -- (bool) Whether to deploy the argo-wrapper subchart. - enabled: false - -audit: - # -- (bool) Whether to deploy the audit subchart. - enabled: false - -aws-es-proxy: - enabled: false - -metadata: - # -- (bool) Whether to deploy the metadata subchart. - enabled: false - -pidgin: - # -- (bool) Whether to deploy the pidgin subchart. - enabled: false - -indexd: - enabled: false - -hatchery: - enabled: false - -cohort-middleware: - enabled: false - -etl: - enabled: false - -gen3-network-policies: - enabled: false diff --git a/openshift.code-workspace b/openshift.code-workspace deleted file mode 100644 index a6d1847f4..000000000 --- a/openshift.code-workspace +++ /dev/null @@ -1,11 +0,0 @@ -{ - "folders": [ - { - "path": "." - }, - { - "path": "../fence" - } - ], - "settings": {} -} \ No newline at end of file diff --git a/pcdc-default-values.yaml b/pcdc-default-values.yaml deleted file mode 100644 index 0a5c9ac9f..000000000 --- a/pcdc-default-values.yaml +++ /dev/null @@ -1,1369 +0,0 @@ -global: - dev: true - hostname: localhost - portalApp: pcdc - dictionaryUrl: https://pcdc-staging-dictionaries.s3.amazonaws.com/pcdc-schema-staging-20250114.json - authz_entity_name: "subject" - tierAccessLevel: "granular" - tierAccessLimit: "5" - postgres: - # -- (bool) Whether the database create job should run. - dbCreate: true - # -- (string) Name of external secret of the postgres master credentials. Disabled if empty - externalSecret: "" - master: - # -- global postgres master username - username: postgres - # -- global postgres master password - password: - # -- global postgres master host - host: - # -- global postgres master port - port: "5432" - tls: - cert: | - -----BEGIN CERTIFICATE----- - MIIDDTCCAfWgAwIBAgIQcMmHCSPIuchREDNi1OpQ5DANBgkqhkiG9w0BAQsFADAP - MQ0wCwYDVQQDEwRnZW4zMB4XDTI0MDMyNTIyMDgwNFoXDTI1MDMyNTIyMDgwNFow - FDESMBAGA1UEAxMJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB - CgKCAQEAoTT4Y1GJ5aTwA8emKdCiPL9RCOIALYygX9lmpQ6loSE5rS4u6bgbztSg - 9EV5mvV+oNA1g473rgwyjNlJEIQJvEO323okDjXI47j09N5a9aGhRc0bhfGlwmt/ - gE6NZUuI/u9wjIh8nLysh4rOYrN2+Us9WRfBL8wjmRZWNFRygrEMB9lHzPefEQoU - p4BcOh4oLbu8//x3Of5fwDMitL5gQtSmZFnLMbK0HD7CJWNz9DQA1n9e8rRg5KWA - N7Tu6MIaY/H/DVCL3LfswIBSF6eQ73umGHf1zhS7PLSHPxYxkEeg35KKriN+7492 - 38Ra3bi0CzTey0It+EK+zjtt1fd7XQIDAQABo2AwXjAOBgNVHQ8BAf8EBAMCBaAw - HQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHwYD - VR0jBBgwFoAUIK7MtOCIs/DygzZ1/vR3ieAwplAwDQYJKoZIhvcNAQELBQADggEB - AIWgFxpLfZ9LUc90qdiC2yHaLKyTN/LMdkUjw82ULVLYJ0ot0JJaJvO4iDFe/Ra9 - t13LUhcE+W4ChentUHvwYKO5zghf4UtiryM8/wqmcZ03xyswdVaKuk5Ov39/9sTJ - 6rfzMpf3mJZDO6JYC475TCQQ3hKAUUzOiFg41CMeqAy9vn0zgBk58IzZmruvdn43 - YH6N/ooqVTj3CnkmVkWoB4zBjDzX9DuxpYvqI3seD7qLtXK2cm2X+Pqv90UoPsB/ - XegALjODFpTbN5Scvbpb3npXEKbvR7X9+xy7BbVYD2K0FQ9+S1UTU8Rz7Dh9SDHM - Ixy5W9o6gVFhB5mxceOxKNc= - -----END CERTIFICATE----- - key: | - -----BEGIN RSA PRIVATE KEY----- - MIIEogIBAAKCAQEAoTT4Y1GJ5aTwA8emKdCiPL9RCOIALYygX9lmpQ6loSE5rS4u - 6bgbztSg9EV5mvV+oNA1g473rgwyjNlJEIQJvEO323okDjXI47j09N5a9aGhRc0b - hfGlwmt/gE6NZUuI/u9wjIh8nLysh4rOYrN2+Us9WRfBL8wjmRZWNFRygrEMB9lH - zPefEQoUp4BcOh4oLbu8//x3Of5fwDMitL5gQtSmZFnLMbK0HD7CJWNz9DQA1n9e - 8rRg5KWAN7Tu6MIaY/H/DVCL3LfswIBSF6eQ73umGHf1zhS7PLSHPxYxkEeg35KK - riN+749238Ra3bi0CzTey0It+EK+zjtt1fd7XQIDAQABAoIBAG+AhfWcZncSuHjE - 1SfDCRRfeBtexqS6ygrCAn9UPDfRFWW1tfkuwP00kQekLlKCT03H9dvDPYmDIkvN - 1v23dkxjwn3qY5n4lbT9L2FXky6m1zfCgLEKzY5GcHA85QwVTPKYhw6NMTPwRJ2T - 4uDeJQKVih9fkN4Qoua2TnXvmyzNU49ffgFMJ0Ec7vNmS7MCUtlGec1Y0xKgTflt - yqhChpG2MBxdX8tLNgSC+lgRBZSzRaP/0oGZuV3FQ7W4fuXLNN8CdhSzHbVlbK+D - CO1f6nqZ8PZKJ/7SGwB2Q05EqscNAF3tl1dsGpnLqOLpnqJ2+f/H4W6/uB2tAILd - ySaC53kCgYEAwOHrNulo7HLgSXEs4/Ta9nLPoZxpDxkNKCRENDJCsoakQ2O33aL4 - mrHdSmxGqecpoCvkifx/ZCN073ZykVIoSY70+N7f79EytGjkAzDxEAnYMKSU7SSJ - TGA+c8Juqh6uvbMuJy/ZiQE6DZsweqhxopov7xSd89RIvNaBZdXq3QcCgYEA1fWJ - VHCEeQGl6eMtSWOfiADUnmOG70xlvmpzlD18pTTCIF7V1rFaAXjJl0ldI3mASJy/ - usiHZq54bUWcvof8DjI7YJ0OS8e7pmUZK9+O9fGTLIf8TIz6qq0PfERk+SyWGdAo - Z8HQMJBKWX809KPkJ9isd62wfREHVazfljxdL3sCgYBwxKTsWdKKSy9uQMjqDcHm - zIEwD24s8YyLp4hoq+nqzmVDMQ3SevG2H78tP9ighRIFHyRiuEkSlthLGIkrBUmg - mAAJcOSkJT7r01dbtkV6BwdqiQ65Bt9u0+Yvb8GbnIy1RAj7yDH6s8jpI45YaBrn - 4hWcRgWDBN3x6ceFbmf+CQKBgA5vwNJnvSiFCfLcF0Qqaqs8kxwUzxf6aasNd7r6 - 4xwqkSap/3e7A72xrrh8hMJOAm/j07QAr9In14xX9BmPB1zV2tfMARjv4yN5Ete4 - /+ZsZGfOzSFFKey2PKM/4ihF7+LR/sfxdeCw+7NKOAKBxHVD029H0u69ZWdMgNGc - RRVdAoGAFH7huA61ylOGh/W6IMU0wvJqg9SeT53JoZTr++0MS+0sdYoRGrq4RzSZ - bXKuvqZaSrXMRB9JZ72FfpjwZhDPZtNOXJV00K4yjZIui6h+TPsDk4lnxVSPYMpP - My/zrtJTCPM+Gqa6mhYTz4fyITv7igxqyECakrCa/Ct0SVDZbSI= - -----END RSA PRIVATE KEY----- - -arborist: - enabled: true - #local Dev Only - postgres: - password: "arborist_thisisaweakpassword" - image: - repository: quay.io/pcdc/arborist - tag: "2025.07" - -amanuensis: - enabled: true - #local Dev Only - postgres: - password: "amanuensis_thisisaweakpassword" - image: - repository: "quay.io/pcdc/amanuensis" - tag: "2.26.2" - pullPolicy: IfNotPresent - -fence: - enabled: true - #local Dev Only - postgres: - password: "fence_thisisaweakpassword" - FENCE_CONFIG: - DEBUG: true - MOCK_STORAGE: true - #fill in - AMANUENSIS_PUBLIC_KEY_PATH: "/amanuensis/jwt_public_key.pem" - MOCK_GOOGLE_AUTH: true - mock_default_user: "test@example.com" - ENABLE_VISA_UPDATE_CRON: false - ENABLE_DELETE_EXPIRED_CLIENTS: false - ENABLE_FENCE_DEPLOYMENT: true - ENABLE_PRESIGNED_URL_FENCE: true - USER_YAML_ENABLED: true - #LOGIN_REDIRECT_WHITELIST: ["https://localhost:9443/", "http://localhost:9443/"] - podSecurityContext: {} - image: - repository: "quay.io/pcdc/fence" - tag: "helm-test" - pullPolicy: Always - # -- (list) Volumes to attach to the container. - volumes: - - name: old-config-volume - secret: - secretName: "fence-secret" - - name: json-secret-volume - secret: - secretName: "fence-json-secret" - optional: true - - name: creds-volume - secret: - secretName: "fence-creds" - - name: config-helper - configMap: - name: config-helper - optional: true - - name: logo-volume - configMap: - name: "logo-config" - - name: config-volume - secret: - secretName: "fence-config" - - name: fence-google-app-creds-secret-volume - secret: - secretName: "fence-google-app-creds-secret" - - name: fence-google-storage-creds-secret-volume - secret: - secretName: "fence-google-storage-creds-secret" - - name: fence-jwt-keys - secret: - secretName: "fence-jwt-keys" - - name: privacy-policy - configMap: - name: "privacy-policy" - - name: yaml-merge - configMap: - name: "fence-yaml-merge" - - name: amanuensis-jwt-keys - secret: - secretName: "amanuensis-jwt-keys" - items: - - key: jwt_public_key.pem - path: jwt_public_key.pem - optional: false - - name: config-volume-public - configMap: - name: "manifest-fence" - optional: true - - # -- (list) Volumes to mount to the container. - volumeMounts: - - name: "old-config-volume" - readOnly: true - mountPath: "/var/www/fence/local_settings.py" - subPath: local_settings.py - - name: "json-secret-volume" - readOnly: true - mountPath: "/var/www/fence/fence_credentials.json" - subPath: fence_credentials.json - - name: "creds-volume" - readOnly: true - mountPath: "/var/www/fence/creds.json" - subPath: creds.json - - name: "config-helper" - readOnly: true - mountPath: "/var/www/fence/config_helper.py" - subPath: config_helper.py - - name: "logo-volume" - readOnly: true - mountPath: "/fence/fence/static/img/logo.svg" - subPath: "logo.svg" - - name: "privacy-policy" - readOnly: true - mountPath: "/fence/fence/static/privacy_policy.md" - subPath: "privacy_policy.md" - - name: "config-volume" - readOnly: true - mountPath: "/var/www/fence/fence-config-secret.yaml" - subPath: fence-config.yaml - - name: "yaml-merge" - readOnly: true - mountPath: "/var/www/fence/yaml_merge.py" - subPath: yaml_merge.py - - name: "fence-google-app-creds-secret-volume" - readOnly: true - mountPath: "/var/www/fence/fence_google_app_creds_secret.json" - subPath: fence_google_app_creds_secret.json - - name: "fence-google-storage-creds-secret-volume" - readOnly: true - mountPath: "/var/www/fence/fence_google_storage_creds_secret.json" - subPath: fence_google_storage_creds_secret.json - - name: "fence-jwt-keys" - readOnly: true - mountPath: "/fence/keys/key/jwt_private_key.pem" - subPath: "jwt_private_key.pem" - - name: "amanuensis-jwt-keys" - readOnly: true - mountPath: "/amanuensis/jwt_public_key.pem" - subPath: "jwt_public_key.pem" - - name: "config-volume-public" - readOnly: true - mountPath: "/var/www/fence/fence-config-public.yaml" - subPath: fence-config-public.yaml - - USER_YAML: | - authz: - - resources: - - name: 'data_file' - description: 'data files, stored in S3' - - name: 'sower' - description: 'sower resource' - - name: workspace - description: jupyter notebooks - - name: analysis - description: analysis tool service - - name: portal - description: data portal service - - name: privacy - description: User privacy policy - - name: 'services' - subresources: - - name: 'sheepdog' - subresources: - - name: 'submission' - subresources: - - name: 'program' - - name: 'project' - - name: 'amanuensis' - - name: 'fence' - subresources: - - name: 'admin' - - name: programs - subresources: - - name: pcdc - - policies: - - id: 'data_upload' - description: 'upload raw data files to S3' - resource_paths: - - /data_file - role_ids: - - file_uploader - - id: 'services.amanuensis-admin' - description: 'admin access to amanuensis' - role_ids: - - 'amanuensis_admin' - resource_paths: - - '/services/amanuensis' - - id: 'services.fence-admin' - description: 'admin access to fence' - role_ids: - - 'fence_admin' - resource_paths: - - '/services/fence/admin' - - id: workspace - description: be able to use workspace - resource_paths: - - /workspace - role_ids: - - workspace_user - - id: analysis - description: be able to use analysis tool service - resource_paths: - - /analysis - role_ids: - - analysis_user - - id: privacy_policy - description: User agreed on the privacy policy - resource_paths: - - /privacy - role_ids: - - reader - - id: indexd_admin - description: full access to indexd API - role_ids: - - indexd_admin - resource_paths: - - /programs - - description: be able to use sower job - id: sower - resource_paths: [/sower] - role_ids: [sower_user] - - id: 'services.sheepdog-admin' - description: 'CRUD access to programs and projects' - role_ids: - - 'sheepdog_admin' - resource_paths: - - '/services/sheepdog/submission/program' - - '/services/sheepdog/submission/project' - - id: all_programs_reader - role_ids: - - reader - - storage_reader - resource_paths: - - /programs - - id: login_no_access - role_ids: - - reader - resource_paths: - - /portal - - id: 'data_admin' - description: 'policy test, should write a policy per resource and assign to user in order to avoid duplicating policies' - role_ids: - - admin - resource_paths: - - /programs - - /programs/pcdc - - - roles: - - id: 'file_uploader' - description: 'can upload data files' - permissions: - - id: 'file_upload' - action: - service: 'fence' - method: 'file_upload' - - id: 'amanuensis_admin' - description: 'can do admin work on project/data request' - permissions: - - id: 'amanuensis_admin_action' - action: - service: 'amanuensis' - method: '*' - - id: 'fence_admin' - description: 'can use the admin endpoint in Fence' - permissions: - - id: 'fence_admin_permission' - action: - service: 'fence' - method: '*' - - id: workspace_user - permissions: - - action: {method: access, service: jupyterhub} - id: workspace_access - - id: sower_user - permissions: - - action: {method: access, service: job} - id: sower_access - - id: analysis_user - permissions: - - action: {method: access, service: analysis} - id: analysis_access - # Sheepdog admin role - - id: 'sheepdog_admin' - description: 'sheepdog admin role for program project crud' - permissions: - - id: 'sheepdog_admin_action' - action: - service: 'sheepdog' - method: '*' - - id: indexd_admin - description: full access to indexd API - permissions: - - id: indexd_admin - action: - service: indexd - method: '*' - - id: admin - permissions: - - id: admin - action: - service: '*' - method: '*' - - id: creator - permissions: - - id: creator - action: - service: '*' - method: create - - id: reader - permissions: - - id: reader - action: - service: '*' - method: read - - id: updater - permissions: - - id: updater - action: - service: '*' - method: update - - id: deleter - permissions: - - id: deleter - action: - service: '*' - method: delete - - id: storage_writer - permissions: - - id: storage_creator - action: - service: '*' - method: write-storage - - id: storage_reader - permissions: - - id: storage_reader - action: - service: '*' - method: read-storage - - users: - ### BEGIN INTERNS SECTION ### - ### END INTERNS SECTION ### - pmurdoch@uchicago.edu: - admin: true - policies: - - data_upload - - workspace - - services.sheepdog-admin - - services.amanuensis-admin - - data_admin - - analysis - - privacy_policy - - login_no_access - - sower - -guppy: - enabled: true - image: - repository: "quay.io/pcdc/guppy" - tag: "1.11.0" - pullPolicy: "IfNotPresent" - # image: - # repository: "guppy" - # tag: "test" - # pullPolicy: "Never" - authFilterField: "auth_resource_path" - -manifestservice: - enabled: false - image: - repository: quay.io/cdis/manifestservice - tag: "2025.01" - -pcdcanalysistools: - enabled: true - image: - repository: quay.io/pcdc/pcdcanalysistools - tag: "1.10.1" - -peregrine: - enabled: true - #local Dev Only - postgres: - password: "peregrine_thisisaweakpassword" - image: - repository: quay.io/pcdc/peregrine - tag: "1.4.1" - -portal: - enabled: true - image: - repository: "quay.io/pcdc/windmill" - tag: "1.43.0" - pullPolicy: IfNotPresent - resources: - requests: - cpu: 1.0 - gitops: - json: | - { - "gaTrackingId": "undefined", - "graphql": { - "boardCounts": [ - { - "graphql": "_person_count", - "name": "Person", - "plural": "Persons" - }, - { - "graphql": "_subject_count", - "name": "Subject", - "plural": "Subjects" - } - ], - "chartCounts": [ - { - "graphql": "_person_count", - "name": "Person" - }, - { - "graphql": "_subject_count", - "name": "Subject" - } - ], - "projectDetails": "boardCounts" - }, - "components": { - "appName": "Pediatric Cancer Data Commons Portal", - "index": { - "introduction": { - "heading": "Pediatric Cancer Data Commons", - "text": "The Pediatric Cancer Data Commons supports the management, analysis and sharing of data for the research community.", - "link": "/submission" - }, - "buttons": [ - { - "name": "Define Data Field", - "icon": "data-field-define", - "body": "The Pediatric Cancer Data Commons defines the data. Please study the dictionary before you start browsing.", - "link": "/DD", - "label": "Learn more" - }, - { - "name": "Explore Data", - "icon": "data-explore", - "body": "The Exploration Page gives you insights and a clear overview under selected factors.", - "link": "/explorer", - "label": "Explore data" - } - ], - "barChart": { - "showPercentage": true - } - }, - "navigation": { - "items": [ - { - "icon": "dictionary", - "link": "/DD", - "color": "#a2a2a2", - "name": "Dictionary" - }, - { - "icon": "exploration", - "link": "/explorer", - "color": "#a2a2a2", - "name": "Exploration" - } - ] - }, - "topBar": { - "items": [ - { - "icon": "external-link", - "leftOrientation": true, - "link": "https://commons.cri.uchicago.edu/pcdc/", - "name": "About PCDC" - }, - { - "icon": "external-link", - "leftOrientation": true, - "link": "https://commons.cri.uchicago.edu/sponsors/", - "name": "Our Sponsors" - } - ] - }, - "login": { - "title": "Pediatric Cancer Data Commons", - "subTitle": "Connect. Share. Cure.", - "text": "Welcome to the Pediatric Cancer Data Commons (PCDC), brought to you by Data for the Common Good (D4CG). Headquartered at the University of Chicago, D4CG works with international leaders to develop and apply uniform data standards that facilitate the collection, combination, and analysis of data from many different sources.\n\nThe PCDC harnesses pediatric, AYA, and adult cancer clinical data from around the world into a single unified platform, making it possible to explore and access data across multiple types of cancer. The PCDC Data Portal currently includes some of the world's largest sets of clinical data for pediatric neuroblastoma, soft tissue sarcoma, germ cell tumors, AML, and Hodgkin lymphoma, with the addition of more cancer types in progress.", - "contact": "If you have any questions about access or the registration process, please contact ", - "email": "pcdc_help@lists.uchicago.edu" - }, - "footerLogos": [ - { - "src": "/src/img/gen3.png", - "href": "https://ctds.uchicago.edu/gen3", - "alt": "Gen3 Data Commons", - "height": 40 - }, - { - "src": "/src/img/uchicago.png", - "href": "https://www.uchicago.edu/", - "alt": "The University of Chicago", - "height": 40 - } - ] - }, - "explorerConfig": [ - { - "id": 1, - "label": "data", - "charts": { - "sex": { - "chartType": "bar", - "title": "Sex" - }, - "race": { - "chartType": "bar", - "title": "Race" - }, - "ethnicity": { - "chartType": "bar", - "title": "Ethnicity" - }, - "consortium": { - "chartType": "bar", - "title": "Consortium" - } - }, - "filters": { - "anchor": { - "field": "disease_phase", - "options": ["Initial Diagnosis", "Relapse"], - "tabs": ["Disease", "Molecular", "Surgery", "Radiation", "Response", "SMN", "Imaging", "Labs", "SCT"] - }, - "tabs": [ - { - "title": "Subject", - "fields": [ - "consortium", - "data_contributor_id", - "studies.study_id", - "studies.treatment_arm", - "sex", - "race", - "ethnicity", - "year_at_disease_phase", - "survival_characteristics.lkss_obfuscated", - "censor_status", - "age_at_censor_status", - "medical_histories.medical_history", - "medical_histories.medical_history_status", - "external_references.external_resource_name", - "biospecimen_status" - ] - }, - { - "title": "Disease", - "fields": [ - "histologies.histology", - "histologies.histology_grade", - "histologies.histology_inpc", - "tumor_assessments.age_at_tumor_assessment", - "tumor_assessments.tumor_classification", - "tumor_assessments.tumor_site", - "tumor_assessments.tumor_state", - "tumor_assessments.longest_diam_dim1", - "tumor_assessments.depth", - "tumor_assessments.tumor_size", - "tumor_assessments.invasiveness", - "tumor_assessments.nodal_clinical", - "tumor_assessments.nodal_pathology", - "tumor_assessments.parameningeal_extension", - "tumor_assessments.necrosis", - "tumor_assessments.necrosis_pct", - "tumor_assessments.tumor_laterality", - "stagings.irs_group", - "stagings.tnm_finding", - "stagings.stage_system", - "stagings.stage", - "stagings.AB", - "stagings.E", - "stagings.S", - "disease_characteristics.mki", - "disease_characteristics.bulk_disease", - "disease_characteristics.BULK_MED_MASS", - "disease_characteristics.bulky_nodal_aggregate", - "disease_characteristics.who_aml", - "disease_characteristics.CNS_disease_status", - "disease_characteristics.MLDS" - ] - }, - { - "title": "Molecular", - "fields": [ - "molecular_analysis.anaplasia", - "molecular_analysis.anaplasia_extent", - "molecular_analysis.molecular_abnormality", - "molecular_analysis.molecular_abnormality_result", - "molecular_analysis.gene1", - "molecular_analysis.gene2", - "molecular_analysis.dna_index", - "molecular_analysis.age_at_molecular_analysis", - "molecular_analysis.mitoses", - "molecular_analysis.cytodifferentiation" - ] - }, - { - "title": "Surgery", - "fields": [ - "biopsy_surgical_procedures.tumor_classification", - "biopsy_surgical_procedures.procedure_type", - "biopsy_surgical_procedures.margins" - ] - }, - { - "title": "Radiation", - "fields": [ - "radiation_therapies.tumor_classification", - "radiation_therapies.energy_type", - "radiation_therapies.rt_dose" - ] - }, - { - "title": "Response", - "fields": [ - "subject_responses.tx_prior_response", - "subject_responses.response", - "subject_responses.interim_response", - "subject_responses.response_method", - "minimal_residual_diseases.mrd_result", - "minimal_residual_diseases.mrd_result_numeric", - "minimal_residual_diseases.mrd_result_unit", - "minimal_residual_diseases.mrd_method", - "minimal_residual_diseases.mrd_sample_source" - ] - }, - { - "title": "SMN", - "fields": [ - "secondary_malignant_neoplasm.age_at_smn", - "secondary_malignant_neoplasm.smn_site", - "secondary_malignant_neoplasm.smn_type", - "secondary_malignant_neoplasm.smn_yn", - "secondary_malignant_neoplasm.smn_morph_icdo" - ] - }, - { - "title": "Imaging", - "fields": [ - "imagings.imaging_method", - "imagings.imaging_result" - ] - }, - { - "title": "Labs", - "fields": [ - "labs.lab_test", - "labs.lab_result", - "labs.lab_result_numeric", - "labs.lab_result_unit" - ] - }, - { - "title": "SCT", - "fields": [ - "stem_cell_transplants.sct_type", - "stem_cell_transplants.sct_source", - "stem_cell_transplants.sct_donor_relationship" - ] - } - ], - "unitCalcConfig": { - "ageUnits": { - "quantity": "age", - "desiredUnit": "days", - "selectUnits": { "months": 30, "years": 365.25 } - }, - "calculatorMapping": { - "number": [ - "year_at_disease_phase", - "tumor_assessments.longest_diam_dim1", - "radiation_therapies.rt_dose", - "tumor_assessments.necrosis_pct", - "labs.lab_result_numeric" - ], - "age": [ - "age_at_censor_status", - "tumor_assessments.age_at_tumor_assessment", - "molecular_analysis.age_at_molecular_analysis", - "secondary_malignant_neoplasm.age_at_smn", - "radiation_therapies.age_at_rt_start", - "subject_responses.age_at_response" - ] - } - }, - "filterDependencyConfig": { - "relations": { - "molecular_abnormality": [ - "molecular_analysis.molecular_abnormality", - "molecular_analysis.molecular_abnormality_result" - ], - "tumor_site_state": [ - "tumor_assessments.tumor_state", - "tumor_assessments.tumor_site" - ], - "stage": ["stagings.stage_system", "stagings.stage"], - "mrd_result": [ - "minimal_residual_diseases.mrd_result_numeric", - "minimal_residual_diseases.mrd_result_unit" - ], - "lab_result": ["labs.lab_result_numeric", "labs.lab_result_unit"] - }, - "filterToRelation": { - "molecular_analysis.molecular_abnormality": "molecular_abnormality", - "molecular_analysis.molecular_abnormality_result": "molecular_abnormality", - "tumor_assessments.tumor_state": "tumor_site_state", - "tumor_assessments.tumor_site": "tumor_site_state", - "stagings.stage_system": "stage", - "stagings.stage": "stage", - "minimal_residual_diseases.mrd_result_numeric": "mrd_result", - "minimal_residual_diseases.mrd_result_unit": "mrd_result", - "labs.lab_result_numeric": "lab_result", - "labs.lab_result_unit": "lab_result" - } - } - }, - "projectId": "search", - "graphqlField": "subject", - "index": "", - "buttons": [ - { - "enabled": true, - "type": "export-to-pfb", - "title": "Export to PFB", - "leftIcon": "datafile", - "rightIcon": "download" - }, - { - "enabled": false, - "type": "data", - "title": "Download Data", - "leftIcon": "user", - "rightIcon": "download", - "fileName": "data.json", - "tooltipText": "You can only download data accessible to you" - } - ], - "table": { - "enabled": false, - "fields": [ - "external_references.external_links", - "consortium", - "data_contributor_id", - "subject_submitter_id", - "sex", - "race", - "ethnicity", - "survival_characteristics.lkss", - "survival_characteristics.age_at_lkss" - ] - }, - "patientIds": { - "filter": false, - "export": true, - "filterName": "subject_submitter_id", - "displayName": "Subject Submitter Ids" - }, - "survivalAnalysis": { - "result": { - "pval": false, - "risktable": true, - "survival": true - } - }, - "guppyConfig": { - "dataType": "subject", - "nodeCountTitle": "Subjects", - "fieldMapping": [ - { - "field": "data_contributor_id", - "name": "Data Contributor", - "tooltip": "Survival analysis is not allowed for this variable. Filtering by this variable will disable survival analysis." - }, - { - "field": "studies.study_id", - "name": "Study Id", - "tooltip": "Survival analysis is not allowed for this variable. Filtering by this variable will disable survival analysis." - }, - { - "field": "studies.treatment_arm", - "name": "Treatment Arm", - "tooltip": "Survival analysis is not allowed for this variable. Filtering by this variable will disable survival analysis." - }, - { - "field": "year_at_disease_phase", - "name": "Year at Initial Diagnosis" - }, - { - "field": "survival_characteristics.lkss", - "name": "Last Known Survival Status (LKSS)" - }, - { - "field": "survival_characteristics.lkss_obfuscated", - "name": "Last Known Survival Status (LKSS)" - }, - { - "field": "age_at_censor_status", - "name": "Age At Censor Status (days)" - }, - { - "field": "medical_histories.medical_history", - "name": "Medical History" - }, - { - "field": "medical_histories.medical_history_status", - "name": "Medical History Status" - }, - { - "field": "external_references.external_resource_name", - "name": "External Resource Name" - }, - { - "field": "biospecimen_status", - "name": "Biospecimen Availability" - }, - { - "field": "histologies.histology", - "name": "Histology" - }, - { - "field": "histologies.histology_grade", - "name": "Histology Grade" - }, - { - "field": "histologies.histology_inpc", - "name": "INPC Classification" - }, - { - "field": "tumor_assessments.age_at_tumor_assessment", - "name": "Age at Tumor Assessment (days)" - }, - { - "field": "tumor_assessments.tumor_classification", - "name": "Tumor Classification" - }, - { - "field": "tumor_assessments.tumor_site", - "name": "Tumor Site" - }, - { - "field": "tumor_assessments.tumor_state", - "name": "Tumor State" - }, - { - "field": "tumor_assessments.longest_diam_dim1", - "name": "Longest Diameter Dimension 1" - }, - { - "field": "tumor_assessments.depth", - "name": "Tumor Depth" - }, - { - "field": "tumor_assessments.tumor_size", - "name": "Tumor Size" - }, - { - "field": "tumor_assessments.invasiveness", - "name": "Invasiveness" - }, - { - "field": "tumor_assessments.nodal_clinical", - "name": "Nodal Clinical" - }, - { - "field": "tumor_assessments.nodal_pathology", - "name": "Nodal Pathology" - }, - { - "field": "tumor_assessments.parameningeal_extension", - "name": "Parameningeal Extension" - }, - { - "field": "tumor_assessments.necrosis", - "name": "Necrosis" - }, - { - "field": "tumor_assessments.necrosis_pct", - "name": "Necrosis PCT" - }, - { - "field": "tumor_assessments.tumor_laterality", - "name": "Tumor Laterality" - }, - { - "field": "stagings.irs_group", - "name": "IRS Group" - }, - { - "field": "stagings.tnm_finding", - "name": "TNM Finding" - }, - { - "field": "stagings.stage_system", - "name": "Stage System" - }, - { - "field": "stagings.stage", - "name": "Stage" - }, - { - "field": "stagings.AB", - "name": "Ann Arbor AB" - }, - { - "field": "stagings.E", - "name": "Ann Arbor E" - }, - { - "field": "stagings.S", - "name": "Ann Arbor S" - }, - { - "field": "disease_characteristics.mki", - "name": "MKI" - }, - { - "field": "disease_characteristics.bulk_disease", - "name": "Bulky Disease" - }, - { - "field": "disease_characteristics.BULK_MED_MASS", - "name": "Bulky Mediastinal Mass" - }, - { - "field": "disease_characteristics.bulky_nodal_aggregate", - "name": "Bulky Nodal Aggregate" - }, - { - "field": "disease_characteristics.who_aml", - "name": "WHO AML" - }, - { - "field": "disease_characteristics.CNS_disease_status", - "name": "CNS Disease Status" - }, - { - "field": "disease_characteristics.MLDS", - "name": "MLDS" - }, - { - "field": "molecular_analysis.anaplasia", - "name": "Anaplasia" - }, - { - "field": "molecular_analysis.anaplasia_extent", - "name": "Anaplasia Extent" - }, - { - "field": "molecular_analysis.molecular_abnormality", - "name": "Molecular Abnormality" - }, - { - "field": "molecular_analysis.molecular_abnormality_result", - "name": "Molecular Abnormality Result" - }, - { - "field": "molecular_analysis.gene1", - "name": "Gene 1" - }, - { - "field": "molecular_analysis.gene2", - "name": "Gene 2" - }, - { - "field": "molecular_analysis.dna_index", - "name": "DNA Index" - }, - { - "field": "molecular_analysis.age_at_molecular_analysis", - "name": "Age at Molecular Analysis (days)" - }, - { - "field": "molecular_analysis.mitoses", - "name": "Mitoses" - }, - { - "field": "molecular_analysis.cytodifferentiation", - "name": "Cytodifferentiation" - }, - { - "field": "biopsy_surgical_procedures.tumor_classification", - "name": "Tumor Classification" - }, - { - "field": "biopsy_surgical_procedures.procedure_type", - "name": "Procedure Type" - }, - { - "field": "biopsy_surgical_procedures.procedure_site", - "name": "Procedure Site" - }, - { - "field": "biopsy_surgical_procedures.margins", - "name": "Margins" - }, - { - "field": "radiation_therapies.tumor_classification", - "name": "Tumor Classification" - }, - { - "field": "radiation_therapies.age_at_rt_start", - "name": "Age at Radiation Therapy (days)" - }, - { - "field": "radiation_therapies.rt_site", - "name": "Radiation Site" - }, - { - "field": "radiation_therapies.energy_type", - "name": "Energy Type" - }, - { - "field": "radiation_therapies.rt_dose", - "name": "Radiation Dose" - }, - { - "field": "radiation_therapies.rt_unit", - "name": "Radiation Unit" - }, - { - "field": "subject_responses.age_at_response", - "name": "Age at Response (days)" - }, - { - "field": "subject_responses.tx_prior_response", - "name": "Treatment Prior Response" - }, - { - "field": "subject_responses.response", - "name": "Response" - }, - { - "field": "subject_responses.interim_response", - "name": "Interim Response" - }, - { - "field": "subject_responses.response_method", - "name": "Response Method" - }, - { - "field": "minimal_residual_diseases.mrd_result", - "name": "MRD Result" - }, - { - "field": "minimal_residual_diseases.mrd_result_numeric", - "name": "MRD Result Numeric" - }, - { - "field": "minimal_residual_diseases.mrd_result_unit", - "name": "MRD Result Unit" - }, - { - "field": "minimal_residual_diseases.mrd_method", - "name": "MRD Method" - }, - { - "field": "minimal_residual_diseases.mrd_sample_source", - "name": "MRD Sample Source" - }, - { - "field": "subject_responses.necrosis", - "name": "Necrosis" - }, - { - "field": "secondary_malignant_neoplasm.age_at_smn", - "name": "Age at SMN (days)" - }, - { - "field": "secondary_malignant_neoplasm.smn_site", - "name": "SMN Site" - }, - { - "field": "secondary_malignant_neoplasm.smn_type", - "name": "SMN Type" - }, - { - "field": "secondary_malignant_neoplasm.smn_yn", - "name": "Secondary Malignancy" - }, - { - "field": "secondary_malignant_neoplasm.smn_morph_icdo", - "name": "ICD-O Morphology" - }, - { - "field": "imagings.imaging_method", - "name": "Imaging Method" - }, - { - "field": "imagings.imaging_result", - "name": "Imaging Result" - }, - { - "field": "labs.lab_result_numeric", - "name": "Numeric Lab Result" - }, - { - "field": "labs.lab_result_unit", - "name": "Lab Result Unit" - }, - { - "field": "labs.lab_result", - "name": "Lab Result" - }, - { - "field": "labs.lab_test", - "name": "Lab Test" - }, - { - "field": "stem_cell_transplants.sct_type", - "name": "SCT Type" - }, - { - "field": "stem_cell_transplants.sct_source", - "name": "SCT Source" - }, - { - "field": "stem_cell_transplants.sct_donor_relationship", - "name": "SCT Donor Relationship" - } - ] - }, - "dataRequests": { - "enabled": false - }, - "getAccessButtonLink": "https://docs.pedscommons.org/PCDCProjectRequestForm/" - } - ] - } - -revproxy: - enabled: true - image: - repository: quay.io/cdis/nginx - tag: "2025.08" - -sheepdog: - enabled: true - #local Dev Only - postgres: - password: "sheepdog_thisisaweakpassword" - dictionaryUrl: https://pcdc-staging-dictionaries.s3.amazonaws.com/pcdc-schema-staging-20250114.json - image: - repository: quay.io/pcdc/sheepdog - tag: "1.7.2" - -sower: - enabled: false - image: - repository: quay.io/cdis/sower - tag: "2025.01" - pelican: - bucket: "gen3-helm-pelican-export" - # -- (map) Secret information for Usersync and External Secrets. - - sowerConfig: - - name: pelican-export - action: export - container: - name: job-task - image: quay.io/pcdc/pelican:1.3.3_export - pull_policy: Always - env: - - name: DICTIONARY_URL - valueFrom: - configMapKeyRef: - name: manifest-global - key: dictionary_url - - name: GEN3_HOSTNAME - valueFrom: - configMapKeyRef: - name: manifest-global - key: hostname - - name: ROOT_NODE - value: subject - - name: OUTPUT_FILE_FORMAT - value: ZIP - volumeMounts: - - name: pelican-creds-volume - readOnly: true - mountPath: "/pelican-creds.json" - subPath: config.json - - name: peregrine-creds-volume - readOnly: true - mountPath: "/peregrine-creds.json" - subPath: creds.json - cpu-limit: "1" - memory-limit: 2Gi - volumes: - - name: pelican-creds-volume - secret: - secretName: pelicanservice-g3auto - - name: peregrine-creds-volume - secret: - secretName: peregrine-creds - restart_policy: Never - -wts: - enabled: false - image: - repository: quay.io/cdis/workspace-token-service - tag: 2025.01 - fenceImage: - repository: "quay.io/pcdc/fence" - tag: "helm-test" - pullPolicy: Always - -postgresql: - image: - repository: bitnamilegacy/postgresql - #local Dev Only - auth: - postgresPassword: "thisisaterriblepassword" - primary: - persistence: - enabled: true - size: 5Gi - -elasticsearch: - enabled: true - clusterName: gen3-elasticsearch - maxUnavailable: 0 - singleNode: true - replicas: 1 - clusterHealthCheckParams: "wait_for_status=yellow&timeout=1s" - esConfig: - elasticsearch.yml: | - # Here we can add elasticsearch config - volumeClaimTemplate: - resources: - requests: - storage: 5Gi - - resources: - requests: - cpu: 0.5 - memory: 500Mi - limits: - cpu: 1 - memory: 2Gi -######################################################################################## -# DISABLED SERVICES # -######################################################################################## - -ambassador: - # -- (bool) Whether to deploy the ambassador subchart. - enabled: false - -argo-wrapper: - # -- (bool) Whether to deploy the argo-wrapper subchart. - enabled: false - -audit: - # -- (bool) Whether to deploy the audit subchart. - enabled: false - -aws-es-proxy: - enabled: false - -metadata: - # -- (bool) Whether to deploy the metadata subchart. - enabled: false - -pidgin: - # -- (bool) Whether to deploy the pidgin subchart. - enabled: false - -indexd: - enabled: false - -hatchery: - enabled: false - -gearbox: - enabled: false - -gearbox-middleware: - enabled: false - -cohort-middleware: - enabled: false