diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 000000000..a51f0722b Binary files /dev/null and b/.DS_Store differ diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..dcc0939db --- /dev/null +++ b/.gitignore @@ -0,0 +1,4 @@ +# Ignore Mac system files +.DS_store +*.idea +.idea/* \ No newline at end of file diff --git a/ansible/OpenPedCan-build.yml b/ansible/OpenPedCan-build.yml new file mode 100644 index 000000000..c49a8732b --- /dev/null +++ b/ansible/OpenPedCan-build.yml @@ -0,0 +1,12 @@ +--- +- name: OpenPedCan cicd pipeline + hosts: OpenPedCan-api + connection: local + gather_facts: no + become: yes + + tasks: + - name: This is a build stage + include_role: + name: OpenPedCan + tasks_from: build \ No newline at end of file diff --git a/ansible/OpenPedCan-deploy.yml b/ansible/OpenPedCan-deploy.yml new file mode 100644 index 000000000..5189e3305 --- /dev/null +++ b/ansible/OpenPedCan-deploy.yml @@ -0,0 +1,11 @@ +--- +- name: deploy stage of OpenPedCan-api pipeline + hosts: OpenPedCan-api + connection: local + gather_facts: no + + tasks: + - name: deploy stage + include_role: + name: OpenPedCan + tasks_from: deploy \ No newline at end of file diff --git a/ansible/agent-setup-icdc.yml b/ansible/agent-setup-icdc.yml new file mode 100644 index 000000000..0e1325d2e --- /dev/null +++ b/ansible/agent-setup-icdc.yml @@ -0,0 +1,9 @@ +--- +- name: setup sumologic and newrelic agents + hosts: agent_setup + become: yes + + roles: + - sumologic-icdc + - sumologic-journalctl + - newrelic-icdc \ No newline at end of file diff --git a/ansible/agent-setup.yml b/ansible/agent-setup.yml new file mode 100644 index 000000000..a944549a8 --- /dev/null +++ b/ansible/agent-setup.yml @@ -0,0 +1,9 @@ +--- +- name: setup sumologic and newrelic agents + hosts: agent_setup + become: yes + + roles: + - sumologic + - sumologic-journalctl + - newrelic \ No newline at end of file diff --git a/ansible/alb-ops.yml b/ansible/alb-ops.yml new file mode 100644 index 000000000..f55a740b2 --- /dev/null +++ b/ansible/alb-ops.yml @@ -0,0 +1,38 @@ +--- +- name: Add/Remove alb rules + hosts: docker-local + connection: local + gather_facts: yes + + tasks: + - name: get alb info + include_role: + name: alb-ops + tasks_from: info + when: action == "create" + + - name: get alb info + include_role: + name: alb-ops + tasks_from: rules + loop: + - name: fixed + priority: 3 + listener_arn: "{{https_listener_arn}}" + - name: frontend + priority: 2 + listener_arn: "{{https_listener_arn}}" + - name: backend + priority: 1 + listener_arn: "{{https_listener_arn}}" + when: action == "create" + + - name: remove alb rule + include_role: + name: alb-ops + tasks_from: remove + loop: + - fixed + - frontend + - backend + when: action == "remove" diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg index dc7559a21..a7cf19f4c 100644 --- a/ansible/ansible.cfg +++ b/ansible/ansible.cfg @@ -1,4 +1,9 @@ -[defaults] +[defaults] validate_certs = no +host_key_checking = False ansible_server_cert_validation = no -inventory = ./hosts \ No newline at end of file +inventory = ./hosts + + + + diff --git a/ansible/apm-integrations.yml b/ansible/apm-integrations.yml new file mode 100644 index 000000000..73ef74c2c --- /dev/null +++ b/ansible/apm-integrations.yml @@ -0,0 +1,12 @@ +--- +- name: deploy stage of bento pipeline + hosts: "{{tier}}" + gather_facts: yes + + tasks: + - debug: + msg: "{{groups}}" + - name: deploy new relic apm integrations + include_role: + name: newrelic-apm-integration + \ No newline at end of file diff --git a/ansible/bento-build.yml b/ansible/bento-build.yml new file mode 100644 index 000000000..4bc40e597 --- /dev/null +++ b/ansible/bento-build.yml @@ -0,0 +1,13 @@ +--- +- name: bento cicd pipeline + hosts: bento + connection: local + gather_facts: no + become: yes + + tasks: + - name: This is a build stage + include_role: + name: bento + tasks_from: build + vars_from: bento \ No newline at end of file diff --git a/ansible/bento-ctdc-data-loader.yml b/ansible/bento-ctdc-data-loader.yml new file mode 100644 index 000000000..ccc40d0f5 --- /dev/null +++ b/ansible/bento-ctdc-data-loader.yml @@ -0,0 +1,17 @@ +--- +- name: load data to neo4j db + hosts: all + connection: local + become: yes + gather_facts: yes + + tasks: + - name: perform data loading + include_role: + name: data-processing + tasks_from: ctdc-data-loader + + vars: + + data_bucket: bento-ctdc-metadata + ansible_python_interpreter: /usr/bin/python2.7 \ No newline at end of file diff --git a/ansible/bento-data-loader.yml b/ansible/bento-data-loader.yml new file mode 100644 index 000000000..c187b72b4 --- /dev/null +++ b/ansible/bento-data-loader.yml @@ -0,0 +1,14 @@ +--- +- name: load data to neo4j db + hosts: all + connection: local + #become: yes + gather_facts: yes + + tasks: + - name: perform data loading + include_role: + name: data-processing + tasks_from: bento-data-loader + vars: + ansible_python_interpreter: /usr/bin/python2.7 \ No newline at end of file diff --git a/ansible/bento-deploy-ctdc.yml b/ansible/bento-deploy-ctdc.yml new file mode 100644 index 000000000..5dd54d7a8 --- /dev/null +++ b/ansible/bento-deploy-ctdc.yml @@ -0,0 +1,13 @@ +--- +- name: deploy stage of bento pipeline + hosts: bento + connection: local + gather_facts: no + + tasks: + - name: deploy stage + include_role: + name: bento + tasks_from: deploy-ctdc + vars: + data_bucket: nci-cbiit-ctdc-{{tier}} \ No newline at end of file diff --git a/ansible/bento-deploy-dev.yml b/ansible/bento-deploy-dev.yml new file mode 100644 index 000000000..17fec3d37 --- /dev/null +++ b/ansible/bento-deploy-dev.yml @@ -0,0 +1,13 @@ +--- +- name: deploy stage of bento pipeline + hosts: bento + connection: local + gather_facts: no + + tasks: + - name: deploy stage + include_role: + name: bento + tasks_from: deploy-dev + vars: + data_bucket: nci-cbiit-ctdc-{{tier}} \ No newline at end of file diff --git a/ansible/bento-deploy.yml b/ansible/bento-deploy.yml new file mode 100644 index 000000000..7c52cbc6a --- /dev/null +++ b/ansible/bento-deploy.yml @@ -0,0 +1,14 @@ +--- +- name: deploy stage of bento pipeline + hosts: bento + connection: local + gather_facts: no + + tasks: + - name: deploy stage + include_role: + name: bento + tasks_from: deploy + vars_from: bento + vars: + data_bucket: nci-cbiit-ctdc-{{tier}} \ No newline at end of file diff --git a/ansible/bento-docs.yml b/ansible/bento-docs.yml new file mode 100644 index 000000000..b1b031a41 --- /dev/null +++ b/ansible/bento-docs.yml @@ -0,0 +1,11 @@ +--- +- name: deploy latest vwersion of Bento documentation + hosts: bento + connection: local + gather_facts: no + + tasks: + - name: deploy docs + include_role: + name: bento-docs + tasks_from: build \ No newline at end of file diff --git a/ansible/bento-es-loader.yml b/ansible/bento-es-loader.yml new file mode 100644 index 000000000..f857a294c --- /dev/null +++ b/ansible/bento-es-loader.yml @@ -0,0 +1,14 @@ +--- +- name: load data to elasticsearch + hosts: all + connection: local + become: yes + gather_facts: yes + + tasks: + - name: perform data loading + include_role: + name: data-processing + tasks_from: bento-es-loader + vars: + ansible_python_interpreter: /usr/bin/python2.7 \ No newline at end of file diff --git a/ansible/bento-git-tag.yml b/ansible/bento-git-tag.yml new file mode 100644 index 000000000..b0114bbd4 --- /dev/null +++ b/ansible/bento-git-tag.yml @@ -0,0 +1,21 @@ +- name: Tag bento repository daily + + hosts: localhost + connection: local + + vars: + retention: 30 + date_regex: '([12]\d{3}_(0[1-9]|1[0-2])_(0[1-9]|[12]\d|3[01])_(0[1-9]|1[0-2])_([0-5]\d))' + current_date: "{{ lookup('pipe','date +%Y_%m_%d_%H_%M') }}" + git_tag: "{{current_date}}" + git_home: "{{ ansible_env.HOME }}/git-tag" + + vars_files: + - config/icdc-env-vars.yaml + + tasks: + - name: tag repository + include_role: + name: git-tag + vars: + git_url: https://{{git_username}}:{{git_password}}@github.com/CBIIT/{{ branch }} \ No newline at end of file diff --git a/ansible/build-bento-ccdi.yml b/ansible/build-bento-ccdi.yml new file mode 100644 index 000000000..f8a06ee4c --- /dev/null +++ b/ansible/build-bento-ccdi.yml @@ -0,0 +1,12 @@ +--- +- name: bento cicd pipeline + hosts: bento + connection: local + gather_facts: no + become: yes + + tasks: + - name: This is a build stage + include_role: + name: bento-ccdi + tasks_from: build \ No newline at end of file diff --git a/ansible/build-bento-ctdc.yml b/ansible/build-bento-ctdc.yml new file mode 100644 index 000000000..6cf0c0126 --- /dev/null +++ b/ansible/build-bento-ctdc.yml @@ -0,0 +1,12 @@ +--- +- name: ctdc cicd pipeline + hosts: ctdc-local + gather_facts: no + connection: local + + + tasks: + - name: build bento-ctdc application + include_role: + name: bento-ctdc + tasks_from: build \ No newline at end of file diff --git a/ansible/build-bento-demo.yml b/ansible/build-bento-demo.yml new file mode 100644 index 000000000..51de6c452 --- /dev/null +++ b/ansible/build-bento-demo.yml @@ -0,0 +1,11 @@ +--- +- name: bento cicd pipeline + hosts: bento + connection: local + gather_facts: no + + tasks: + - name: This is a build stage + include_role: + name: bento-demo + tasks_from: build \ No newline at end of file diff --git a/ansible/build-bento-file-downloader.yml b/ansible/build-bento-file-downloader.yml new file mode 100644 index 000000000..7858cd344 --- /dev/null +++ b/ansible/build-bento-file-downloader.yml @@ -0,0 +1,12 @@ +--- +- name: bento cicd pipeline + hosts: bento + connection: local + gather_facts: no + become: yes + + tasks: + - name: This is a build stage + include_role: + name: bento-file-downloader + tasks_from: build \ No newline at end of file diff --git a/ansible/build-bento-gke.yml b/ansible/build-bento-gke.yml new file mode 100644 index 000000000..1ac0d1ea6 --- /dev/null +++ b/ansible/build-bento-gke.yml @@ -0,0 +1,11 @@ +--- +- name: bento cicd pipeline + hosts: gke + connection: local + gather_facts: yes + + tasks: + - name: This is a build stage + include_role: + name: bento-gke + tasks_from: build \ No newline at end of file diff --git a/ansible/build-bento-icdc.yml b/ansible/build-bento-icdc.yml new file mode 100644 index 000000000..9c14d5695 --- /dev/null +++ b/ansible/build-bento-icdc.yml @@ -0,0 +1,12 @@ +--- +- name: icdc cicd pipeline + hosts: icdc-local + gather_facts: no + connection: local + + + tasks: + - name: build bento-icdc application + include_role: + name: bento-icdc + tasks_from: build \ No newline at end of file diff --git a/ansible/build-bento-ins.yml b/ansible/build-bento-ins.yml new file mode 100644 index 000000000..86b5bb6f0 --- /dev/null +++ b/ansible/build-bento-ins.yml @@ -0,0 +1,12 @@ +--- +- name: bento cicd pipeline + hosts: bento + connection: local + gather_facts: no + become: yes + + tasks: + - name: This is a build stage + include_role: + name: bento-ins + tasks_from: build \ No newline at end of file diff --git a/ansible/build-cloud-run.yml b/ansible/build-cloud-run.yml new file mode 100644 index 000000000..96a0ed904 --- /dev/null +++ b/ansible/build-cloud-run.yml @@ -0,0 +1,11 @@ +--- +- name: bento cicd pipeline + hosts: gke + connection: local + gather_facts: yes + + tasks: + - name: This is a build stage + include_role: + name: bento-cloud-run + tasks_from: build \ No newline at end of file diff --git a/ansible/build-cloudone-ctdc.yml b/ansible/build-cloudone-ctdc.yml new file mode 100644 index 000000000..23687d98d --- /dev/null +++ b/ansible/build-cloudone-ctdc.yml @@ -0,0 +1,12 @@ +--- +- name: icdc cicd pipeline + hosts: ctdc-local + gather_facts: no + connection: local + + + tasks: + - name: build ctdc application + include_role: + name: bento-ctdc + tasks_from: build \ No newline at end of file diff --git a/ansible/build-cloudone-ecs.yml b/ansible/build-cloudone-ecs.yml new file mode 100644 index 000000000..a5a965364 --- /dev/null +++ b/ansible/build-cloudone-ecs.yml @@ -0,0 +1,11 @@ +--- +- name: cloudone-ecs cicd pipeline + hosts: cicd + connection: local + gather_facts: yes + + tasks: + - name: This is a build stage + include_role: + name: cloudone-ecs + tasks_from: build \ No newline at end of file diff --git a/ansible/build-icdc-data-dictionary.yml b/ansible/build-icdc-data-dictionary.yml new file mode 100644 index 000000000..faf9d9254 --- /dev/null +++ b/ansible/build-icdc-data-dictionary.yml @@ -0,0 +1,11 @@ +--- +- name: icdc cicd pipeline + hosts: icdc-local + gather_facts: no + connection: local + + tasks: + - name: This is a build stage + include_role: + name: icdc-data-dictionary + tasks_from: build \ No newline at end of file diff --git a/ansible/build-icdc-file-downloader.yml b/ansible/build-icdc-file-downloader.yml new file mode 100644 index 000000000..03d47c8ce --- /dev/null +++ b/ansible/build-icdc-file-downloader.yml @@ -0,0 +1,11 @@ +--- +- name: icdc cicd pipeline + hosts: icdc-local + gather_facts: no + connection: local + + tasks: + - name: This is a build stage + include_role: + name: icdc-file-downloader + tasks_from: build \ No newline at end of file diff --git a/ansible/build-image-bento.yml b/ansible/build-image-bento.yml new file mode 100644 index 000000000..994fb750f --- /dev/null +++ b/ansible/build-image-bento.yml @@ -0,0 +1,13 @@ +--- +- name: build bento base images + hosts: all + connection: local + #become: yes + gather_facts: yes + + tasks: + - name: build bento image + include_role: + name: build-image-bento + tasks_from: bento + vars_from: bento \ No newline at end of file diff --git a/ansible/build-image-icdc.yml b/ansible/build-image-icdc.yml new file mode 100644 index 000000000..ffd321d41 --- /dev/null +++ b/ansible/build-image-icdc.yml @@ -0,0 +1,14 @@ +--- +- name: build icdc base images + hosts: localhost + connection: local + become_user: commonsdocker + + vars_files: + config/icdc-env-vars.yaml + + tasks: + - name: build ICDC image + include_role: + name: build-image-bento + tasks_from: icdc diff --git a/ansible/build-image.yml b/ansible/build-image.yml new file mode 100644 index 000000000..d91e37811 --- /dev/null +++ b/ansible/build-image.yml @@ -0,0 +1,11 @@ +--- +- name: build docker images + hosts: localhost + connection: local + become_user: commonsdocker + + vars_files: + config/icdc-env-vars.yaml + + roles: + - build-image diff --git a/ansible/build-open-target-backend.yml b/ansible/build-open-target-backend.yml new file mode 100644 index 000000000..3cb0ba6fc --- /dev/null +++ b/ansible/build-open-target-backend.yml @@ -0,0 +1,11 @@ +--- +- name: ppdc cicd pipeline + hosts: cicd + connection: local + gather_facts: no + + tasks: + - name: This is a build stage open target backend + include_role: + name: open-target-backend + tasks_from: build \ No newline at end of file diff --git a/ansible/build-ppdc-otg.yml b/ansible/build-ppdc-otg.yml new file mode 100644 index 000000000..b9e39aa2c --- /dev/null +++ b/ansible/build-ppdc-otg.yml @@ -0,0 +1,11 @@ +--- +- name: bento cicd pipeline + hosts: ppdc-otg + connection: local + gather_facts: no + + tasks: + - name: This is a build stage + include_role: + name: ppdc-otg + tasks_from: build \ No newline at end of file diff --git a/ansible/build-ppdc-otp.yml b/ansible/build-ppdc-otp.yml new file mode 100644 index 000000000..a01a7c6ea --- /dev/null +++ b/ansible/build-ppdc-otp.yml @@ -0,0 +1,11 @@ +--- +- name: ppdc cicd pipeline + hosts: ppdc-otp + connection: local + gather_facts: no + + tasks: + - name: This is a build stage + include_role: + name: ppdc-otp + tasks_from: build \ No newline at end of file diff --git a/ansible/build.yml b/ansible/build.yml new file mode 100644 index 000000000..5cd1cde41 --- /dev/null +++ b/ansible/build.yml @@ -0,0 +1,11 @@ +--- +- name: deploy stage of cicd pipeline + hosts: cicd + connection: local + gather_facts: no + + tasks: + - name: deploy stage + include_role: + name: cicd + tasks_from: build \ No newline at end of file diff --git a/ansible/c3dc-build.yml b/ansible/c3dc-build.yml new file mode 100644 index 000000000..b6e305d04 --- /dev/null +++ b/ansible/c3dc-build.yml @@ -0,0 +1,13 @@ +--- +- name: bento cicd pipeline + hosts: bento + connection: local + gather_facts: no + become: yes + + tasks: + - name: This is a build stage + include_role: + name: bento + tasks_from: build + vars_from: c3dc \ No newline at end of file diff --git a/ansible/c3dc-deploy.yml b/ansible/c3dc-deploy.yml new file mode 100644 index 000000000..6d5efd323 --- /dev/null +++ b/ansible/c3dc-deploy.yml @@ -0,0 +1,14 @@ +--- +- name: deploy stage of bento pipeline + hosts: bento + connection: local + gather_facts: no + + tasks: + - name: deploy stage + include_role: + name: bento + tasks_from: deploy + vars_from: c3dc + vars: + data_bucket: nci-cbiit-ctdc-{{tier}} \ No newline at end of file diff --git a/ansible/ccdc-build-etl.yml b/ansible/ccdc-build-etl.yml new file mode 100644 index 000000000..852d2b006 --- /dev/null +++ b/ansible/ccdc-build-etl.yml @@ -0,0 +1,23 @@ +--- +- name: ccdc cicd etl pipeline + hosts: ccdc + connection: local + gather_facts: no + become: yes + + environment: + REACT_APP_BACKEND_API: "{% if tier == 'prod' %}https://{{stack_name}}.bento-tools.org/service/{% else %}https://{{stack_name}}-{{ tier }}.bento-tools.org/service/{% endif %}" + REACT_APP_ENVIRONMENT: "{{tier}}" + RDB_HOST : "{{ rds_host }}" + RDB_PORT : "{{ rds_port }}" + RDB_USER : "{{ rds_user }}" + RDB_PASSWORD : "{{ rds_password }}" + ES_HOST : "{{ es_host }}" + ES_PORT : "{{ es_port }}" + DIGEST_FILE_FOLDER : "{{ digest_file_folder }}" + + tasks: + - name: This is a build stage + include_role: + name: ccdc-etl + tasks_from: build \ No newline at end of file diff --git a/ansible/ccdc-build.yml b/ansible/ccdc-build.yml new file mode 100644 index 000000000..cc58cd9c3 --- /dev/null +++ b/ansible/ccdc-build.yml @@ -0,0 +1,12 @@ +--- +- name: bento cicd pipeline + hosts: bento + connection: local + gather_facts: no + become: yes + + tasks: + - name: This is a build stage + include_role: + name: ccdc + tasks_from: build \ No newline at end of file diff --git a/ansible/ccdc-deploy.yml b/ansible/ccdc-deploy.yml new file mode 100644 index 000000000..11233773e --- /dev/null +++ b/ansible/ccdc-deploy.yml @@ -0,0 +1,11 @@ +--- +- name: deploy stage of bento pipeline + hosts: bento + connection: local + gather_facts: no + + tasks: + - name: deploy stage + include_role: + name: ccdc + tasks_from: deploy \ No newline at end of file diff --git a/ansible/cicd.yml b/ansible/cicd.yml new file mode 100644 index 000000000..4e184427b --- /dev/null +++ b/ansible/cicd.yml @@ -0,0 +1,11 @@ +--- +- name: build stage of cicd pipeline + hosts: cicd + connection: local + gather_facts: no + + tasks: + - name: This is a build stage + include_role: + name: cicd + tasks_from: build \ No newline at end of file diff --git a/ansible/community-neo4j.yml b/ansible/community-neo4j.yml new file mode 100644 index 000000000..39aa16f79 --- /dev/null +++ b/ansible/community-neo4j.yml @@ -0,0 +1,11 @@ +--- +- name: setup neo4j database + hosts: all + connection: local + become: yes + gather_facts: yes + + roles: + - common + - community-neo4j + diff --git a/ansible/config/ctn-env-vars.yaml b/ansible/config/ctn-env-vars.yaml new file mode 100644 index 000000000..f6ee40b7f --- /dev/null +++ b/ansible/config/ctn-env-vars.yaml @@ -0,0 +1,51 @@ +$ANSIBLE_VAULT;1.1;AES256 +33616434313331636337346236356463396234353539613032663137313466316331323438633635 +3831306639633833303739656462343336363836393761610a313430616464623937653961313936 +38613733356661653761366133333565316365333637633431333061616436356166653765306137 +3363346432313739620a393632613735663232626161396437303432376333393264353166303361 +34326538346432306631353166376230373538313066393330306139643437303932643932633035 +64643036336266643433333730353835356632313138623464353365383332626533303130623631 +35613766626361383635663062356532336436356564636634343666306433666633323262383463 +38633939383837343033336262303666346665343532323362353636353836636236323636353931 +33643334336563656237656465333232636264623836333063323130383234613332343232663637 +37323433633433626365326462306662336236636238303336353832623964616664626635366137 +66623366613739306435356637653632663439346366336632616539386239663864343639363064 +64616639376337633261613765376534393639303637666235393863353964396630363533333162 +34333935656362613634663038613934383731616335326265643337346565336237633761613637 +61313163356136353464356131396537636537373065663238646633663062343633323962613938 +37323563323062373835633539613062306661613865353831393234656332336435373761616136 +38666161323562323232616339653933323663333931666164613036636439646337313932643531 +66643332616337613131383634363964373535326337353936383733666636623931623731316636 +38306162663130613338366334373633643831393736636437613632646663313861376330386162 +38343336393539363265373533353435393130376166623939353637396538346131613136383939 +64663632643438646166303339376164306338356438353337313063353633373631666464626163 +39306233323262636162313761353737633964613962316362373263643232613837356561326661 +30333065353739393431613838613337646337343138303539656437626664323638366632333339 +33353037666430353861386337643831306466313333363137333439346165306330326237313362 +62633832386164323461376137316632646363636165303534643739366163313434386434333663 +30353733373336356162333030356464313661353735313361626431313737636536303235616330 +31323662376636626539313735636237313062363635653332336135376164633933353361636331 +32326465626531393639646233333362643763623936343533633832646430373831363730396164 +31646532346463613330326565303564336461323662356536346633363334633536653766323264 +36366134613762333432623231636163343538323436646661623633386561656235656437363437 +36633661613662636166656432636563303265653331336461636164396633363731663734346437 +61346365323064366465373537613437366664323865333432633766366335303037653163633764 +39363563653266323433353962633133353939376639623463373564636561396364333539663432 +61393334366362623433363030353862666137396130666665396564646134663662613737366631 +65386233613130396666306639646264313263393336623766656665343732626234393762383261 +35303837653165366335363933306232363732326364333465333961306164633838633165363434 +38353530326233353066353661383763376537653833663565323734333863623439333366363931 +61386265373336343466653766396531346262663864646437636635633234306531646230303530 +63636662356665663133313362353961613634306664383765656539313264393036653132663933 +31666232303061333738363261376565653830376639383664623764623630333062333431363466 +39353666643966336266376434303564343233326166613937343531313463383137613635303262 +66363236353233633838323533386562336235643561323238656461393033303435343237363665 +37313963633137333861343566366533346538666236376165363362373863356462326436633033 +66653461356565303239323261633131306562643335373936386132616236306334386162303561 +66623838376136333939313036386661643735373434323761383462633062646638643362383962 +38636163313936363065633737326137393139316335366361633733323637613634353038353331 +66306331306437366331373366616364666362346230613964373930303135613966343731323532 +32623366363639386631343764343339333630323634626666643261336536393661343630336166 +65303864613736386139323964386362646435353861326164306331323132636461363163383765 +37326438663633663238646665616238633036356430643237646235333165356263313332666136 +3266636636623465323839653337396365343761393931323766 diff --git a/ansible/config/hosts b/ansible/config/hosts new file mode 100644 index 000000000..f41dff48b --- /dev/null +++ b/ansible/config/hosts @@ -0,0 +1,26 @@ +$ANSIBLE_VAULT;1.1;AES256 +61316536663337633665303836353533626635613865646562623963636266613963366365316430 +3539373163653866643661336238333430346534656639300a366135656633656163366161623230 +30313038663731313863653065323234393338353136376663626536326236323463333232633637 +3162306165666534660a303463663039646561636632633034626164393937616431366437373232 +64376234303237326437323565653561366166343038393338643765303164366465336538323962 +62613239343036393433656435323030353932626130626533303365353361363663376235366434 +30626331613162383564326261663438663230303234376266666130313437313363303633636434 +65326265343038396264303531336131656130363834643532353865613532383637343631636161 +33386234336639666461333435393638643436373538636336313537646264313462613965323632 +63386661383863353337343763383662393765336263643866336531653739363234356334396132 +64373139613631653366393031333931393961626334663737343239306162643065616334643431 +63643564316632613539616235356364326366363034633265663930303837633138633962303133 +32666366643533346663366466376636306361303235396538363563303265376636343739366664 +36623664643538386661643461653336303866666432303430633934336365646661336262636266 +31656564333461393934386636366638656130306162363436313533663062623965343933626636 +34663561353633393862613031313636633861373531346238396237353963613834626531666632 +33623664373266653236653634393135623334666132376534353136316232346537646462363666 +62323330373961376639316464383233643533323061313536643036613332363130396437353361 +62633438643331653332353439386334353862306137383466303538353564363962646563633565 +35663337383461353730306161366430613066636633393039323631653261333262636463663866 +63333963636564356435376265393137623438393836626164613664633634393431313335393634 +37313631383164636139363862643061383634326366613466333339323065363364386462616361 +38323065303961656265643935633239386133393338323865636634313439343737363664316138 +39306636633331346130326465663438306565383438626130613563353932336232636166343263 +39643162373066643861616635306138386331653334323130346334396638653236 diff --git a/ansible/config/icdc-devops.pem b/ansible/config/icdc-devops.pem new file mode 100644 index 000000000..79e2d2bfd --- /dev/null +++ b/ansible/config/icdc-devops.pem @@ -0,0 +1,97 @@ +$ANSIBLE_VAULT;1.1;AES256 +66666436333133363563336561303764326432663630643161346431366361656632303039626138 +3931303230666134613162336461666437643862323862610a393966616531346536643132373735 +64356238646661366339343738653038383030373535353532336134653339323436363335343561 +6636653766623839350a613136633261326633613635373635303837336361336134346539353564 +66303034303565326264616138663665333361633262303934653733623666613339336333323032 +61393262633838616436636565643066373936306466353034313839306439383665623764393237 +38396631323939626364626230616261663439393039333661393838613661306331656138643364 +31383038636466623134356136373562383835353831643731393962373030363137623735616535 +32373262353032623930373565313038643437666162363565363766663862376139636234353732 +35646437343063316336303833383634613537663138616134636336363563303035393636623366 +65343666303562633566386364333865643137663261373739373835333932323561366362323330 +36383130643436336631366334376535636463363037643966323839653363313666343261393132 +65613938386637316437623639393961363562653433653439663261363765663433396633386635 +65316662353935343039353730333361336230323132383930623563633361633165346564396162 +32623337343463336336306239366636373730313961633436393937626531303834353032303538 +33633363376432626136653534376238303662313465633933623463373633383265353139303036 +35336661653036656361376561663366393862316237383630666363306239326366613033316164 +61326539376436353562643638333338633834396538303964373335336130316431363233323839 +33356434653433313834633431616665623736623833643934633933363863306134623063653436 +66306133363832663330646462346563343335613034326566666431393162373334343639333830 +66376234353633666138353862626433353863633061653864353139336463623264656561313935 +61636538386438643233613334396431316566333635383832333130336361656436653238343464 +34633334613866373933643066323834383263393933636165633434636435373766393364646363 +64636537343063316363393137346632623165326161303036373536636534313437373463616466 +66356464636566383965636131643336303863626262346334323034643030333138623263323232 +30316137646330636465373464326366373736303362313266376666383366666432323731366532 +61356332316530613938393364313730333338653866336637396564383165623534336535633764 +30616634366464396665653761616132656161663830613330393938313530613738353263656539 +62646633343364613136396135353333313730373630643433613964643261653365613933346433 +36323964643132623466323365373233353466633139643962366137373066343165313561343263 +66633234376634343861626361643830393430643234326264333932646635613565343333613637 +37643065326131393939356233653335623563313561376634643633303666353038623462626637 +64326533396630623965333832333331393361353138326165316330656131393563323863366336 +35613661383065336137313566313365623734326637313530303062656239313061633166376336 +32303364396639366230643537623233663664643436653537316633323864353231353139383937 +37663766396630313732323062396161316236356537316632316461363166626262393462353930 +63323635333636393634613963383732356530636130623461383634306565356330313634356137 +66303036643239346231333239376532313739346461666466626639633336653264343463333164 +38356564396362623137323835343239353037616533316665393037633766323830656530653236 +65373464653639363133343431393261316266353935656639353231613437613832363136663239 +64623031393262653665356235393731393133643732306338376561646566313763373239656366 +38616461313133343637353330303038313363666366353264633938396231616230393664336332 +36653165323032636236386534333264386166633837633030303232383232386165333338323438 +39303631313461643763343537616639363431396661336233303430636461663936313739653239 +62336361623437323534393632346464623262363438633761313437376532313662363032336132 +63356238636237356334613532353639653538303433643331623033356230623163646134313337 +31626565366465363934393332353739663831393463666131336663666635316332376132306665 +37336464626232303736313965353530383838343661666533356165633138346332376566663861 +33653737633264646537323634343966396236386162623265626639313636303938626339366564 +63386236383233363237623566353461646433383635313830653735313464323262323335633635 +34383539623138366462393230326665626532363861653764393165643730343433643062333563 +33303362623933396539643761313263333063343363613832313239336333626532666665313039 +38613966373261616332666636646638333333346434656438623633303732336631363963653965 +34626165623038353761323033356431616331363139393435656162383864373533663762323562 +37643432653866666461343461383032363861393664346465393164383834303532643433353634 +30663533666364306337636439653231613262646231336464303438356632653430333265376333 +65393164616434306438623339666430633633343635393665333836626337613565323861386237 +62663561376635643564396466363439666432386433333538636661393765323333366231323265 +65373232376230663830623337303930366530336531633431373939643364376162343939316431 +63646563323130383762386332663766376336313834316436656265373632353139643430373738 +65363036363339346363303538613939656332333565666563323166323737353533616233336365 +63306630623334376132366662333338343838656335366663316266313766663362363633303764 +31366666663762626237336330333734393132623434656262363132623831303966663230326532 +34626636353331373630336464323936663337383265306566333232653866346137303132616564 +65346633393964326361346565333734656565306331636362313134346137393632613761373639 +33396162353537653565623761636135393332626334303336373862386261333135343962613236 +61643264343336633232633630313436616138656133303738656432613230343930626530363838 +63366464653864646132626139396339666435623334623236396432613138353465383732346663 +38316465356463653536623030336333613335346134626163323166363764613539363030376166 +65383636393963313632636438343063323862663534646262396634323164363761653066623263 +65323237306133656331333633333732303137636161656438633263613332336536646132633937 +37643034666465313433316638623762636465663037323334643032326639366264373066643234 +35353761353334376434383931346538363661356336653464373764643665313931333561373263 +39383961646664336433356163343163343135323466636366613566643162663366623962326133 +39343964613433646162353463306165303231626339316364656136393761663036393134616230 +39663236346266643736616135323634343762643238373234303634623332326566343432323866 +38333637633661636138633732366534613664353934303633316530613165353230366465313062 +30373932373432313562373834643561376563663530333239353932393366323738396464396665 +34343539313433376462346334383135363165646266396664303037646338396633383935393439 +61613630376266386532393230653163636633306565303634323038346563616134346439386666 +66343131633530363736643836306130653837323130393533653364613139633765626564663537 +65316631653136323266336534643865636531343636336637633462366230326437613739636264 +62353230306231656366326232356238633730373266396466373430346432623661316338663536 +61623736616630376138663661346663656164353135366466333935623538623865316361616537 +65353565323636343831333832666136616561336437376261653565356532313937646161313532 +66383032616163343764636662633762376132616332613063353766343933396435366362316164 +62343864613430363935343532343562313733336236663538333735386366656137653935613561 +61333065643161653066383562303131376430656339313164326165393561363838333432323034 +34356263623561643936336566356533376133313830356232336665383131633333303538663830 +63346535353433633231376432656166613030646538383937303534323363653233343532356539 +37393635373765336361363865323566383531373963616335306139616666636463613433653538 +38313563343033313433393463356136616461356137316135643531313032393934623933646263 +30656234653637336337356531303834376439353635663435633165366136326232663064653966 +62336139303530383236653030326635353633666363313830326336343431393133633131333731 +61316564356161373638316264313964633066643638393364613030366363353533363730393539 +37656539323866313239 diff --git a/ansible/config/icdc-env-vars.yaml b/ansible/config/icdc-env-vars.yaml new file mode 100644 index 000000000..984d358db --- /dev/null +++ b/ansible/config/icdc-env-vars.yaml @@ -0,0 +1,58 @@ +$ANSIBLE_VAULT;1.1;AES256 +64643730366139653762636137386264343936363532393336613030313136356438633638356333 +3639613839376234616563366634336361373262306534350a333064616233623162626633343461 +38346563306164383465613036393865363262376466313436336665323364313337643238333335 +3837303030643336630a636566646333376562613235373737313161666439356462363638366165 +66613237326631306336616265306332623830336136316261646330633833613638653332353136 +34393365396465633038646538643638383462646231616131373439303961656335363564363566 +33663663623061313634343766643466396231643530326630393362623334366363386631353531 +64643530613335346163636234636133333938363537626662393238376235353566656161303134 +30616461363835333231623061326563313036633163303864363763643737653666646436383433 +37626463306436386533383433326232343464393962353339363364376662633033656536326266 +37663639336530623035363738653834326636626663653639663732393231343931343439626466 +63363261623034376531396431313333316236396263323031373765333035663361623036373430 +39316336353361316563633062663031396632626362333362623333393132613734346239316265 +30306133613363653336356331643236356663336462383766613066653937313830646161333638 +33383262616566646635343266326532376265386563333730336239383437396139613432653965 +38343364333961313962646630353432623439623862383363623366613431363633336133333336 +31383366393366653464343663313631666436363139386634376365346435306530326663643532 +64306136386433636164363165393030306439343462343538613332643539373830386332626662 +66613561663530306538623934366333353635613464343832616633313330373863333262366665 +65353464636463656638366365623639323837343164343539646461313133333239386263623533 +36343839323231316131363962623865613339393132366331626334663939313938623864323162 +30616437613438383861613039316535616262336531613364306331653837336466326632383332 +63633838396666623530663130616533356463376165363635343533663636346133336630336266 +61376564393635373965666261393363373838613836366364633436653035376433346333376235 +61613830323232396636646565356538333862313834656662616231316134353035363162626362 +38326530633562353563633634303464643366363639353466326639313034653534663566643530 +63353165303539333932343661383564313866613363373837656238356566373830393761313665 +61636164373631303061376639373964396363333131633233643263666664336338633761326466 +66323866653962616536656135653634636431306431303165616630376661353635353039613937 +35343633663133373732313838366336653163386132656266623138343133373034393630616365 +66623131383663643939633964656137626464366166363438663335656432356364353532353236 +64666365353265613462613635656566353962333664343032363430633562646662636330633831 +39616433626261393163343434313637313339373962653634366663663131363830613936363338 +39353665666264616137363431306463636430653039613439303861656539336563376165303635 +63353666343934326139646631386338653132626233613035393332663838396465616566323861 +66653839366535663730393030303231383866343664376434316463396131656564663037393232 +37383233623636353235613631393530663837313362323466323233383762353330376530366337 +33653361356365653137636437383638313338333632303630323632383636323964326363653531 +33373834643561363330653933663837313662306531633232373630363338356665626639636538 +65663631333762346633646232373837643235306436373565333636346562636361363234316566 +39303966666338333133613239636566323064656133383466313861643561353165333230373136 +63303831303163383964646631653630373038386537323166663838346166366437613038663638 +64343062636435363839616362633833333966333931373935373537383837333837373434303833 +33323630323233356134306131613035353934343863346430653364333839383463386437656465 +31383831616163616566396335366433666533306663653535323663363862383663656661383361 +61366634343139633137653434323662353931323461356165366237353432353335323661393637 +37666333613432386461346466343839636663343262663339616666623138666639363338623838 +63313038613261326335373637313038396266326465666532643336333566636231643232366233 +36643466616437393735636139383037313032363965643664393665623334663632633939666437 +30316262663936373632663463643063323864656238663963313336396534383233636364396466 +63373563303136616335653233373437316336306262623965363730366361346539623862306131 +34636165376263623733633230326233316237643936303061313737393664363831386135366465 +34626630313436313735353066616630656561336636616461633161636639343837303564326665 +66616663663933636536613830633037343330363761613038643131663537626463366561656632 +63313830353163393539353066643339386432353334333230386132326637353936363462306539 +65643537366135346464306232633864653766386630633532653166633837376366393131363236 +3333 diff --git a/ansible/config/icdc-jenkins.yaml b/ansible/config/icdc-jenkins.yaml new file mode 100644 index 000000000..f21c4b1b3 --- /dev/null +++ b/ansible/config/icdc-jenkins.yaml @@ -0,0 +1,245 @@ +$ANSIBLE_VAULT;1.1;AES256 +38343739386436336363323335383166623461363435613736316534663936623362366537366562 +3439633061393938633465356433393338656434316463390a646638613435323535616535343732 +65373837636438376436333362613132656663303630363766303839656330663531333438663761 +3033656164623963630a316564323363663932663632356439383035303163656165333235343235 +66316562373631323734626134303166353163383532633131393639366239646261356662363566 +33663563623634653833633035386662356139343661306531333362326666613239303766336334 +65373361643538656165313231636234353436646539656331323363663730333837623036613736 +36613433663865336336333062356334616338383631336261396239313137663738613862306364 +33363133633563383330333335366365306135626239643139326235373831663831333932313338 +36643661653865393137633663613039326136636237343933616630633664626237346131373933 +65636165313434343065336538383335303633323238313635313463633838626637633735393266 +61366164356434616632363366656165333566636332323163366464353066313064353632373364 +63353564663735663334653266653232386364666464343730353661363033653764303832386337 +36326639313762623763326434303461303763653139393734656364653234363638393438663139 +39333039386331383238663632623263613833663632643133663138643433323032396463376365 +31333534323432363139626163646162373938656438613439353831656333386537353034623863 +61306633396333643531306538633465376166363734616439333937323337303834646361623738 +63323736656463313535396433653737313762663135313734663063366234346234333266396633 +34636635663461643163306263623563646163396564613636313264626437633930303263623236 +36323430306466626161326562643732343933643562353662636266613439643934623863616265 +64393731383935616432626365646638643432366462353935666165373565306339393065653933 +32363966393061373035303964363764363736373130313734646130316533663637386530373661 +64396661336637613262363337616665343332643061353936653238383831326138376330313162 +62633265623163346363613332613261333639636330663037346364323366313361316561613663 +31616637623038626330656239663766353833646365353230333064653938306464623331353537 +63353262306635613337663330613966646465613530383261653639656230616462316161333862 +64373438626464636366643533666431383365303730346338353337666235303634373739366635 +34613232656536643339366439336334323639613063623039363333653561353433356336393435 +64653764363461633733373461323163376139616635343665363635346163333135333131383039 +32356436626365313837336661313539343439316266363266376235343937643366663332346130 +34636130373661306331616335393966373935643162303933656663386634356663326436353131 +30633231383537646137346435383462343336326539653363353864636464336664323237643363 +61363831613539373461633430326464663231643231613664613732343564356532376634313731 +64663361363439386361303536343533666331353662313136353734343231323665643738633065 +39376461346466393666663136666133653663303331383639333161663830306366316434346165 +63613963363935323936393762393363326438396337623635616235653630393931363732663864 +30366132353133326134353038373161636161393331373139323630663263386137353766363335 +62343830636561616465626365343436363064353135376236633966363763623765613765643265 +64363633303630653334616533303337663561623463336232613163666366336330346364303062 +38386330613831396561653231343434326530663164666334386261306234386366313134336665 +62616166663464643832343136353266636332386431666531306566656330326265613966363961 +30626463373537303731383036643930323834653836663737303939363832633637616562623339 +39333561303666646663333535613863303038346638623963646233353164636631636466613563 +33353264376636623330613964346131653962376265636365643534393766633036663834323964 +62616166313831363164643061633239323136666135643436356164313435343135323838396232 +36313234393338653261376165643861366634643661363063306636643831326436623030393534 +39616334626564336265663265643138323764373965616139633762613330323730653338316334 +39303631396162623230383731636562323663353133333361316336373930623439333032366635 +39373465623861323432636139343761666130613934623732366336626435663432333566303732 +64386135383037313037616562633338643337383239373361306531623363643935336336373866 +30623164666666306137383834376364346637356438613431333832356336666464333364653639 +38396137613161386535306439653763313439653265303835303139333433346564356164616638 +63313464393339316631666135353161306663373330386663363634643830373839313432306163 +39626266383633376261646264333937313933363637616538366161653534646433363064383035 +66643834393937616237663566313634316661646564326439626536633832363037303235613636 +63653638326339313631326139643964326530653330343638303263356333373264366434623830 +32626334306537333265383330666336323635343838656534303465376232306639326534666338 +64393762663933613133653436643036346430346264616334663664346262663234623164353935 +66623065336133333961343837366364653831373932326265326631326635656335373963306666 +37356365306133666336636162663230646237653939336230376661326136646239656535633764 +65396266623533343239356662356535303130623266356363383436356662613332333335303031 +37653333336361656232313635323634353932336138666630616533353864346262636666343963 +33323261383166316465393132653764353761333237383833653766656431353366393835323063 +34353838303664323032393161386234663238326437373031323934383831663037393431383832 +32326339343935353766353066613266623065646130646532333966663035336366363066373637 +34626330353661653064323766323532663331623333336331383161656536376232613764613163 +37663739346134366135373365303932313939353261383462373961626535633137306463383333 +64643337333032353131313036396135313830613835303237386133376338636632333862643630 +33326234616663613035363633353562656565383662323461663465366139393233643730333832 +33633136636339623362366562663730383663643332356331323235326237303066613239656362 +63363233353065663038313461356638313638353032626333326232613833356461653630373966 +39383135623962643235663063653130333336363032643638666238646634643539666636343330 +35306531313262663439656266336631353236323037303532656162336533646538353135306166 +35353939376362326333623136616137316136323539316632633332653535313739343933306633 +66353936393838343962363933356631373362323238313065373932376661346136336364383231 +33336630636265333861653065623237653332623732303732336562386362393861316561363436 +36633430633733373337306435613365643631616165316132393735303065396262386333616265 +39613036303266373031646233623831386431613064656463313264363037656533613039663538 +36383331396462383164666137623835393164363031393162656638663633323466383565393662 +66626462653035306263653631363831623037653035633337643433623931623530306433383632 +30636435326632346264373834613334626630326361363939643864653564643862656135623039 +64633234653933356236636532336363316333366232306135666438376536613238323137656237 +65353063356662633739333432323665653335343964666437653436333935663937303039633466 +62393236616632373530396164323264616534663765376638303464353733633233336338333961 +31653964613265373862303665326563333464353930393565326430356464333438323733356234 +31626534323934633266626566363733316432643738336363393863346461376666396431313361 +62353464316435353966646139656535373061656539356262326263326634326335303766363266 +37346563653766333935646435323532613061393865333766643138303261323566656563363239 +61353830343831373234613039323866333462326131333264333064333661613461343938343034 +38653635623265643561336537656566396634663462383135353037373766313963383962623536 +32646664666633383337386638356138626634346462353534656565656335653436343664643732 +38636633326332313833306638396637386431396133646464343834333030313130626264613437 +32633433646532353838393366326639316661313361366530646331343063613363316139373862 +65363163623030636230663038633665313661343632323036396363653563636431616530363233 +39336234643364323631653466616662633138303531373135653063383735646334623632666138 +38383439633861623261653735663364633339626638346164353838383264663634336466353261 +35383739373433626561316135396665663033663836373765366132323634323265313230303461 +31653963643161613961386164616635333836633534313964656161633637316262653566306433 +62353762346237656438313737333261333061653238643835333031373061356564336334653238 +37393431336334313566616266316535373231313836636531393335626432643330613262316561 +66383462396537646137653365383432666439613933373763376137383262353738626538333163 +65613030383132626135373165346266653764323236363232393430356632326238363731376330 +30633830366662373464366236323939616163393138643830316435613136393939613936333037 +35613864633661323963373530303730643466656466376366383137316634653536303330373638 +33646263366634633033613037303961333631666265366535306338646337646639306466613234 +63353632613330316636323363656664376336316136383332303163396136303566333864623764 +65366234656463623430313135643531386538303539633234383535343664306337323362303534 +32376361613966313830366165613763653064326563646431626137623539333733396431333862 +63346139623063303763656364373733393864643539646337656231613634366236343036656562 +32386239316162323366323131386234313732343063326331343735343632383763353562373461 +37643134343463373534613263326363323538336232363466363866353133656166336162393964 +61303632356234373931393036653237373862666661353131353036366333663563613832626530 +63333632383633303438636562303639343531653762636264656136333032303931373762333463 +33313636376331393734313065393130303633633362666666356566326634623462343464313738 +66616262396431396531643635663265306434656534373330356131313531653061666262343734 +35376131386334646139303065313336366365323133363265336136346662613863356638353366 +30353631343862366365623766646238616233383361343230373066316661623566323833346565 +36613162353165353633376334626134343165373233326462613235366337336266343265623964 +35363932636665303837636533636463393237386538353334656335373633346337383838333638 +31356365633962393366313238646465306134333835363161653134316565663231356437303463 +39633434623466303335393736396662343538346237376531663935316166616165363162386238 +61333061363038616161623439343933633331643233666262356531396163613966303033653834 +31663262653165393363613365363532663265646436396437326538623431626438356432393035 +62326239616434303735303935613236376137353938323435626233323935373530616434363166 +38386537633736383439366463663863333361623066366637623563643766613239396263613338 +65356563376563613634656631353036373837616564636535313131383737336261356661393634 +34616639643033626663666632386364643662616238646262663436613433623034373237646562 +33326639376530373563356437353130383732363232373432623264346162366466386138653466 +36323534663932613064303638373832383664633030333664653066646335333361623563343861 +30653461663830366531306538393936373964383066623936636461356166313066633564643130 +37346131343535633236396334653461383335353131646331373066323163343237336636316366 +32303264613130623734323861636561376162363061313835346430633264616238386536326464 +66356665343130643966333831366632656130633137666533663962376261353539653666376164 +37663765343964313163326663663631623465663731323237343537326332653933366232373934 +34646239323661343530383031303565393563376139343235356333376461336132356364336238 +30323365326161376336326437333635353839383366623936303931346664343763343232633265 +32646338623335663938663438373730353633633964373230316534633130396433353131653166 +62643463663836323638333166396562343835313138303739386263366333356566663635313564 +39353161393736663435656261393362633531393561303838376265613362316666303465653566 +32303061633635383130373335356635333338313230316434666461616461353930346263626664 +38356630613934383730626132663636336230376633336664326236333165613062626265343761 +32313061623764613631636630336462313365623761616364626332653965653437666535363236 +36373062393736646133393632396537336666343264383032626166643033613538353339613864 +61316433643064343763393961633266323931366164636535653061663930373435326631623538 +35376538313631633531633964356463303532333139623462633836383164663861643163356132 +62356439616632346463323461363432326238613965326665626434363838623464386234363662 +31306331626636623765373035373236663931373966396235383633336538303964313334653762 +64646466616261306637643637383435666534653330326332346465623235346133626565633561 +65353764326238316664356233343439646235383164383261613865323664303162613234313564 +30633634303333393834623037653866613932333235646536343066373339663639633533396664 +32653636333638383036313865346336306534343431313763636538373231666230373532623134 +33393264313161376433303866613764323964663338303662623631356438663433643962326338 +37326534323335343765396465663934363737623332363461366435323936643433396531396363 +36336433393366653732333830386632313634313261343030626263383563323162616330666336 +30323261323239393030313234333338353166323437353630363039626435623765326262633735 +66363066313464363336643163613466383739383262646461333635653138373735636237376261 +30303366363039616232326437373439336132636431356365366562356465393735373633343765 +64303439653330393832623061383065376338306439353735323632323036656164323832303837 +31623634323337326531616637336164333735623436363235343138336538343535643663366664 +64313439303866616664316364323831643461613539313734313835646631653665633437363263 +66623539306538663138626238313961393135363538623162376234326364643565633030646462 +38353537343865653932376264346634353564383465343263306435396565333761353661363934 +62326631633735376662366264613231333337353533303438323838666433323465643132653731 +31646339383066383065316131386633353430356364653834666439333039313130663832636433 +30653466343566663732363566373333633936623436633762663461346237386331383135373339 +33326538323633353164323433323033376334616233623834336336363230383132656536663065 +31333631343264623435333438636433663865386535366366666239306436643734663764396264 +33653165626636353732643366386464613361383161653530626437326633356636396232643066 +31333864356634393831366431343838303234316537343236656537303531376461336666656631 +32666362626161346561356462313434623163343064626230616435303066383838376439376233 +32393030376637346334303039333364346639616431666533303435386234303163616233613365 +38656636373133353131643239626463383536373964303738363164306433316336633534393739 +35326633393261313061393835353330313664306238313033333965303839666636323364636537 +34393333376435373239663134623536663733306661613432343363353663653863633735356336 +37653034396134303433643239343938376332633636376662663034333238323235666134333066 +38323039343238633731353062303135666636346239363735656139323336386665323264636533 +34313334346337343132316361356664373032653465346534313233636533336231646336376166 +39633031383633393261613766316539363237383065643761363831396465313063343331316163 +31623034363238333438393962333465346239356561323039383437363863646632393933356336 +39646637343465666262336538613135656136666133393932663965613734646364393930663438 +63313166376464366263376665383835636464643938336234306164656531303536383437613632 +35643734386134636538626363333333616264393734336231383235363134653430623434626638 +63646433633761303362623236383038363465383133393433633265343738643132613437393930 +39383135323434666666643833333131393866626137393232613436383830626338366233393333 +34626237616133303665653339646365313666633566343363316136323965663939363334626166 +37643334326162346138326562376161616263613831636261613230646336653330346563393834 +34346134376162626530326539303964653432356332613336313964663365303431393362353930 +34353437393433616438316366386231656434626332353334363734643330316230343733613539 +61333865386639393837343266323333316638393935366133336565313265633262346635393538 +64353337633834646634353161336563616539653836343039666161303938316233613939343061 +32353762333161623464316632336531643932373864666565306462663530636664663836336265 +63623063313130656663336565666531396561623831626134396236636132373231346465323438 +63303665393164316637323365376465656139643461646666323732383330366632623836633532 +33633136616664346262393037646238383666356131303038306261613631653234326266343366 +31326133383338633666356132373435353464656537346162373737306437343738366139656438 +33383637366230336462303933366236653339306263336437396361653430666235343938636337 +63626165366463653632346435643339623161373338613435653236646661386130366262336665 +32613364646639396638346461306539313637363637373635633833663135623630623436353230 +30653866313532303132363333306634396332323761666466343362633761343634326461393138 +33366564653435613330373965353930613165336363313438343034323630353762633964646631 +63303064346230623263313565383032373233316337623735626430313530323263393736623466 +66376132643336623032306239623839393864323230626338633163343262663662616565353336 +32376232306637623637376462643830353230623631363937396464393530396633613139393631 +35363766653938656234653733363531313061643931323336313662313466613464626533653436 +33363065663836336363396663303930623631383439663062616332386138343937633631386462 +38626333343239333937653930313466383764313838343038656265646666336637393434323062 +38653065663232313638353430636364386236373765613665663234346137353766313832663131 +64353166303230646363353735393863313564353936383865303261613635643533393465376535 +33666566376131363234306366376232323664353637623034386439666638643438623062646262 +36396165343235653236333633376161353931613239303165633537613337356466336135336166 +35316337656636613833353831393139626433353362333139383730656236666364616266656534 +33316530653337646337346662363063646430373765326434383437623335653962303963356566 +31656232303766366136353863623735323961343961666439383137633062653537633565613530 +34633131653965373836623736313263396134346537613863396339613765376365393836656263 +64383465663265633033363537313838343437326366373835303531663635643434303335303732 +39393137633764646631343633306264393932646630376161303432653362303433323935336637 +61376132653161616365396664613163343366343036313030383264643239363732343131353762 +63316234356131396666623264646164643836366537396638626336393734396335303030393163 +65353736633339303563643661306437393931616161393562666230646262643930303466313034 +31336232316263356537333238656339383832633632353863643730633736623635303434633965 +33386630376334323536336337666134383038316165663432633561333562653830616263343430 +38656261633133636161393237363932303537643062303161613438633430333933643232336537 +35356666303265363561643163643538643639653065376663353438353334373762353466316165 +64313734313661396530643663663265656561383839633531633961663261656232376465663534 +62353530336238613432373738303562616431346232383830306337386663336162383562303366 +63376335306635623630343539663938616463353230653662393661383137306234623830306662 +63393930333634663738323934316564626261313332336234353065306437623439326134666535 +61303439323035643431646233353961366466393731366531353636316635663437383265383763 +34396132306466303636323234613634613134346236393837303962316261663534613061626666 +37656332656533383034303031626539323530626133623166376364396436616330633264636631 +32613264333931343465633132393166333636383165396232323365663864636261616131323633 +36643336316434376437393639343739333739666162346364383733353764643734396239313631 +62326235336362363834353666363735343061343730613662646133653832323235653262316633 +32386562343639316535336438616363316636353730343663353065313261326437653733626163 +31356663653838346138656266363937333562313236396465336531366562656631336335356331 +65383336643034353763333038386664636362306461306361643665323565373031383231636566 +37356637663431663230373033636636323065356562316334353764643531646430313866373064 +39663933656162303134633662386561643936336265656235623735316537373234333361363865 +63306464366163306362326361353562623338653634343236373462633332613638393936656463 +31313532336538346265393736353063633833313036343364653238383630346133313838353263 +65346665396365313239646364316131373665663861366633363365616163353764633534366638 +38313538313361303663316533616666623038353264383238383637346233316364666133353836 +31626463356532303463623535393362623837316434393734613666343437376339376136313234 +34363835336330356166 diff --git a/ansible/config/icdc_demo-env-vars.yaml b/ansible/config/icdc_demo-env-vars.yaml new file mode 100644 index 000000000..b381717b8 --- /dev/null +++ b/ansible/config/icdc_demo-env-vars.yaml @@ -0,0 +1,51 @@ +$ANSIBLE_VAULT;1.1;AES256 +34643336363535313838663833343664643032356561393839393265333537613735356539666333 +3830313939633138363333323466393336626463353762320a623061636235313461666631346163 +61653334303039643631343765656333626362336162666537646562343530613933326635396231 +6337363135303065310a616432636339356165393336626232333035396532316431363065656566 +38306437633566653739343765343630323664656562393662363566643462306235316134333963 +37633531643336393530323131333463303966353665646138326463346436336430653034303732 +62666163356465323736316563623466353532373662376662666261613565396639623139343535 +34653662366238373231333534646137626661376230663038306266656636666161313864633264 +63323566663130663735633636356430336264346331333636363563333137393966363039656666 +33623262623462616436653736386232343838396132383239616663323339363433376339393133 +39336261336163303562353130393366646562636137383734383665396239653137363536333435 +32376561323037623937333264343763616636366162623033613134376566396437623634356563 +61306262306639623561613562343865383632303934356533363234313366373430656165666230 +64346331646636623137356639343235613864616366313533353263323366383637383764316333 +36383765663036666237376666666561393730393664366562626432633331633232396336393264 +37623763613839396166396434333166396131316265316334613365313437343833613862626435 +62353063633833633962363334636666663661646639306566383139653035666161393737386466 +63653139363530353335376131663666373264653965656238326332623839653262616531333538 +31303336623564306664346466383633353335363833336537376466356631383939373631313361 +36343566376233306236353561333632333536653239333431336339373866393531613937653164 +38303932363334663666643733666131393335393863636133643766363634653939396234633833 +31646338373631386232613739313062653164326132636561636564396432366262326439393261 +62663031643439373934666330663332306137313262663835336231666438373161373961373936 +66353833363138336139313632663666626338383636346462323432383562343363393539613663 +66386432633366313639333963333565343363613936333165366636383033663438323763356433 +65306266646662333664323136386537616364386465636636666531643662653865366637356362 +66323830623037313736623161653166333862626133653536623735366464313461396531363731 +64346164643662363661613631393266386531653636636439616364383462316533336263383635 +66363630303337663366366439666538333338303231383031383233353737353636396630666331 +64666664653430333835386666623734636530633666643065363233623037666430343461653932 +65393438613934353166343365336132333338626233396563393437376135666135376362336266 +39646238323464356433663937633238383463343165666234303064326636653832653466376365 +34313239333461376237333332343334613761653032393435613233656566626465636234373934 +63383430396339353562633966613534333236356130376235616131613262366630323664383534 +35386161343937323531323864366632353962326465323765636137643531613861613066376466 +64636461366139666165336439336465316532636662376266363861303933373231323830376631 +34346432333334323733663636323038356366376464333534306339663632613633643531316333 +32623330626365316565376166613432323866653164356335636337663233313064323066663933 +61303864393462613465376131323236373231316430653165646135333931623464636332663937 +37663166656366393632653664383964326530633639633133353061316236646165633165613431 +64613734376666353636626165633439623830396265393564613932636264663734666530393663 +63323535396438346361373765393163653135656138376637653061656564326463326663646363 +62636664393939663833653038383463333265633039326566626363396230633166303565303836 +30666461323232366633333736366134306564383262323439326361633334376166373032363839 +64616635353530643531313737313563363262623265336435643639613230663532663766326663 +37393031393339663564373638656266626566663231656537306463353363333565363332666139 +63643561366263613436653939646362613230663036666361336439376632626165316635656535 +32313633353436643064646331313866383162373035343539353663363261366231653333623133 +63636136336132663034646639616661386139313066613862316534393531333233353034373361 +3862313064623139656232656661343230383736313663613231 diff --git a/ansible/config/migrations.yml b/ansible/config/migrations.yml new file mode 100644 index 000000000..774a1b7f8 --- /dev/null +++ b/ansible/config/migrations.yml @@ -0,0 +1,11 @@ +$ANSIBLE_VAULT;1.1;AES256 +36643165386666336332346265386636333030316165386663616235323133383136336532303539 +6161343136353336656564373439393864663036323130630a356664346663303132653462336135 +33306133633239313261323334653131363761643064306365343532643439356233633630393037 +3636336234633762650a623938306465353562613236663163643530303434356535306361646134 +63346661326235393938393135623035366235656433356336326439313333306466383661343632 +37633531663432393738646666313535306639653664383864393535386436336535326266323563 +31666139363962643834333832316231333765633936353435646336376661646531633731366131 +66316133616536386632366365373139346536303665323764373665373235643133393530643066 +34666233346431336630653064306461633034306238303439326231626339653463356262656664 +3636303137616161343231316165303833333935363464663736 diff --git a/ansible/ctdc-data-loader.yml b/ansible/ctdc-data-loader.yml new file mode 100644 index 000000000..ac5ca258a --- /dev/null +++ b/ansible/ctdc-data-loader.yml @@ -0,0 +1,16 @@ +--- +- name: load data to neo4j db + hosts: loader + connection: local + gather_facts: yes + become: yes + + tasks: + - name: perform data loading + include_role: + name: data-processing + tasks_from: ctdc-data-loader + vars: + # data_bucket: nci-cbiit-ctdc-{{tier}} + data_bucket: nci-cbiit-ctdc-dev + ansible_python_interpreter: /usr/bin/python2.7 \ No newline at end of file diff --git a/ansible/ctdc-file-loader.yml b/ansible/ctdc-file-loader.yml new file mode 100644 index 000000000..51c87abe3 --- /dev/null +++ b/ansible/ctdc-file-loader.yml @@ -0,0 +1,12 @@ +--- +- name: load match file + hosts: loader + connection: local + gather_facts: yes + + tasks: + - name: perform data loading + include_role: + name: data-processing + tasks_from: ctdc-file-loader + \ No newline at end of file diff --git a/ansible/ctdc-file-validator.yml b/ansible/ctdc-file-validator.yml new file mode 100644 index 000000000..4e32c88e6 --- /dev/null +++ b/ansible/ctdc-file-validator.yml @@ -0,0 +1,12 @@ +--- +- name: validate metadata for data file + hosts: loader + connection: local + gather_facts: yes + + tasks: + - name: perform data validation + include_role: + name: data-processing + tasks_from: ctdc-file-validator + \ No newline at end of file diff --git a/ansible/ctdc-git-tag.yml b/ansible/ctdc-git-tag.yml new file mode 100644 index 000000000..127e29fd9 --- /dev/null +++ b/ansible/ctdc-git-tag.yml @@ -0,0 +1,22 @@ +- name: Tag ctdc repository daily + + hosts: localhost + connection: local + + vars: + retention: 30 + date_regex: '([12]\d{3}_(0[1-9]|1[0-2])_(0[1-9]|[12]\d|3[01])_(0[1-9]|1[0-2])_([0-5]\d))' + current_date: "{{ lookup('pipe','date +%Y_%m_%d_%H_%M') }}" + git_tag: "{{current_date}}" + git_home: "{{ ansible_env.HOME }}/git-tag" + + vars_files: + - config/icdc-env-vars.yaml + + tasks: + - name: tag repository + include_role: + name: git-tag + vars: + git_url: https://{{git_username}}:{{git_password}}@github.com/CBIIT/ctdc-codebase + diff --git a/ansible/ctdc.yml b/ansible/ctdc.yml new file mode 100644 index 000000000..12bf3a703 --- /dev/null +++ b/ansible/ctdc.yml @@ -0,0 +1,14 @@ +--- +- name: setup ctn server + hosts: ctdc + become: yes + + vars_files: + - config/ctn-env-vars.yaml + + roles: + - common + # - { role: docker,tags: ['sandbox'] } + - ctdc + # - sumologic + # - newrelic \ No newline at end of file diff --git a/ansible/data-loader.yml b/ansible/data-loader.yml new file mode 100644 index 000000000..4dc6f4cd1 --- /dev/null +++ b/ansible/data-loader.yml @@ -0,0 +1,13 @@ +--- +- name: load data to neo4j db + hosts: loader + connection: local + gather_facts: yes + + tasks: + - name: perform data loading + include_role: + name: data-processing + tasks_from: data-loader + vars: + data_bucket: nci-cbiit-ctdc-{{tier}} \ No newline at end of file diff --git a/ansible/deploy-backend.yml b/ansible/deploy-backend.yml new file mode 100644 index 000000000..143f26d56 --- /dev/null +++ b/ansible/deploy-backend.yml @@ -0,0 +1,13 @@ +--- +- name: deploy stage of cicd pipeline + hosts: cicd + connection: local + gather_facts: no + + tasks: + - name: deploy stage + include_role: + name: cicd + tasks_from: deploy-backend + vars: + data_bucket: nci-cbiit-ctdc-{{tier}} \ No newline at end of file diff --git a/ansible/deploy-bento-ccdi.yml b/ansible/deploy-bento-ccdi.yml new file mode 100644 index 000000000..6e7f1cf80 --- /dev/null +++ b/ansible/deploy-bento-ccdi.yml @@ -0,0 +1,12 @@ +--- +- name: deploy stage of bento pipeline + hosts: ccdi + gather_facts: yes + become: yes + + tasks: + - name: deploy bento-ccdi + include_role: + name: bento-ccdi + tasks_from: deploy + \ No newline at end of file diff --git a/ansible/deploy-bento-ctdc.yml b/ansible/deploy-bento-ctdc.yml new file mode 100644 index 000000000..aff429958 --- /dev/null +++ b/ansible/deploy-bento-ctdc.yml @@ -0,0 +1,12 @@ +--- +- name: deploy stage of bento pipeline + hosts: ctdc-{{tier}} + gather_facts: yes + become: yes + + tasks: + - name: deploy bento-ctdc + include_role: + name: bento-ctdc + tasks_from: deploy + \ No newline at end of file diff --git a/ansible/deploy-bento-demo.yml b/ansible/deploy-bento-demo.yml new file mode 100644 index 000000000..b4ef14bf9 --- /dev/null +++ b/ansible/deploy-bento-demo.yml @@ -0,0 +1,12 @@ +--- +- name: deploy stage of bento pipeline + hosts: bento + connection: local + gather_facts: no + + tasks: + - name: deploy stage + include_role: + name: bento-demo + tasks_from: deploy + \ No newline at end of file diff --git a/ansible/deploy-bento-file-downloader.yml b/ansible/deploy-bento-file-downloader.yml new file mode 100644 index 000000000..6a7fd5503 --- /dev/null +++ b/ansible/deploy-bento-file-downloader.yml @@ -0,0 +1,12 @@ +--- +- name: deploy stage of bento-file-downloader pipeline + hosts: bento + connection: local + gather_facts: yes + + tasks: + - name: deploy stage + include_role: + name: bento-file-downloader + tasks_from: deploy + \ No newline at end of file diff --git a/ansible/deploy-bento-gke.yml b/ansible/deploy-bento-gke.yml new file mode 100644 index 000000000..5bd8dcf27 --- /dev/null +++ b/ansible/deploy-bento-gke.yml @@ -0,0 +1,12 @@ +--- +- name: deploy stage of bento pipeline + hosts: gke + connection: local + gather_facts: yes + + tasks: + - name: deploy stage + include_role: + name: bento-gke + tasks_from: deploy + \ No newline at end of file diff --git a/ansible/deploy-bento-icdc.yml b/ansible/deploy-bento-icdc.yml new file mode 100644 index 000000000..804c40cbd --- /dev/null +++ b/ansible/deploy-bento-icdc.yml @@ -0,0 +1,12 @@ +--- +- name: deploy stage of bento pipeline + hosts: icdc-{{tier}} + gather_facts: yes + become: yes + + tasks: + - name: deploy bento-icdc + include_role: + name: bento-icdc + tasks_from: deploy + \ No newline at end of file diff --git a/ansible/deploy-bento-ins.yml b/ansible/deploy-bento-ins.yml new file mode 100644 index 000000000..f1601c5a4 --- /dev/null +++ b/ansible/deploy-bento-ins.yml @@ -0,0 +1,12 @@ +--- +- name: deploy stage of bento pipeline + hosts: bento + connection: local + gather_facts: no + + tasks: + - name: deploy bento-icdc + include_role: + name: bento-ins + tasks_from: deploy + \ No newline at end of file diff --git a/ansible/deploy-cloudone-ctdc.yml b/ansible/deploy-cloudone-ctdc.yml new file mode 100644 index 000000000..aff429958 --- /dev/null +++ b/ansible/deploy-cloudone-ctdc.yml @@ -0,0 +1,12 @@ +--- +- name: deploy stage of bento pipeline + hosts: ctdc-{{tier}} + gather_facts: yes + become: yes + + tasks: + - name: deploy bento-ctdc + include_role: + name: bento-ctdc + tasks_from: deploy + \ No newline at end of file diff --git a/ansible/deploy-cloudone-ecs.yml b/ansible/deploy-cloudone-ecs.yml new file mode 100644 index 000000000..8f5af88d9 --- /dev/null +++ b/ansible/deploy-cloudone-ecs.yml @@ -0,0 +1,12 @@ +--- +- name: deploy stage of cloudone ecs pipeline + hosts: cicd + connection: local + gather_facts: yes + + tasks: + - name: deploy stage + include_role: + name: cloudone-ecs + tasks_from: deploy + \ No newline at end of file diff --git a/ansible/deploy-ctdc.yml b/ansible/deploy-ctdc.yml new file mode 100644 index 000000000..48afc0eab --- /dev/null +++ b/ansible/deploy-ctdc.yml @@ -0,0 +1,9 @@ +--- +- name: build stage of cicd pipeline + hosts: cicd + connection: local + gather_facts: no + + roles: + - ctdc-pipeline + \ No newline at end of file diff --git a/ansible/deploy-frontend.yml b/ansible/deploy-frontend.yml new file mode 100644 index 000000000..fe55597c3 --- /dev/null +++ b/ansible/deploy-frontend.yml @@ -0,0 +1,11 @@ +--- +- name: deploy stage of cicd pipeline + hosts: cicd + connection: local + gather_facts: no + + tasks: + - name: deploy stage + include_role: + name: cicd + tasks_from: deploy-frontend \ No newline at end of file diff --git a/ansible/deploy-icdc-data-dictionary.yml b/ansible/deploy-icdc-data-dictionary.yml new file mode 100644 index 000000000..8836e3bd4 --- /dev/null +++ b/ansible/deploy-icdc-data-dictionary.yml @@ -0,0 +1,12 @@ +--- +- name: deploy stage of icdc pipeline + hosts: icdc-{{tier}} + gather_facts: yes + become: yes + + tasks: + - name: deploy stage + include_role: + name: icdc-data-dictionary + tasks_from: deploy + \ No newline at end of file diff --git a/ansible/deploy-icdc-file-downloader.yml b/ansible/deploy-icdc-file-downloader.yml new file mode 100644 index 000000000..6495f9e94 --- /dev/null +++ b/ansible/deploy-icdc-file-downloader.yml @@ -0,0 +1,12 @@ +--- +- name: deploy stage of bento pipeline + hosts: icdc-{{tier}} + gather_facts: yes + become: yes + + tasks: + - name: deploy stage + include_role: + name: icdc-file-downloader + tasks_from: deploy + \ No newline at end of file diff --git a/ansible/deploy-icdc.yml b/ansible/deploy-icdc.yml new file mode 100644 index 000000000..1afb836c1 --- /dev/null +++ b/ansible/deploy-icdc.yml @@ -0,0 +1,9 @@ +--- +- name: build stage of cicd pipeline + hosts: cicd + connection: local + gather_facts: no + + roles: + - icdc-pipeline + \ No newline at end of file diff --git a/ansible/deploy-open-target-backend.yml b/ansible/deploy-open-target-backend.yml new file mode 100644 index 000000000..47a9e9c7b --- /dev/null +++ b/ansible/deploy-open-target-backend.yml @@ -0,0 +1,12 @@ +--- +- name: deploy stage of ppdc open-target-backend + hosts: ppdc-otp-{{tier}} + gather_facts: yes + become: yes + + tasks: + - name: deploy ppdc open-target-backend + include_role: + name: open-target-backend + tasks_from: deploy + diff --git a/ansible/deploy-ppdc-otg.yml b/ansible/deploy-ppdc-otg.yml new file mode 100644 index 000000000..5272330bf --- /dev/null +++ b/ansible/deploy-ppdc-otg.yml @@ -0,0 +1,12 @@ +--- +- name: deploy stage of ppdc otg + hosts: ppdc-otg-{{tier}} + gather_facts: yes + become: yes + + tasks: + - name: deploy ppdc-otg + include_role: + name: ppdc-otg + tasks_from: deploy + \ No newline at end of file diff --git a/ansible/deploy-ppdc-otp.yml b/ansible/deploy-ppdc-otp.yml new file mode 100644 index 000000000..7e6338806 --- /dev/null +++ b/ansible/deploy-ppdc-otp.yml @@ -0,0 +1,12 @@ +--- +- name: deploy stage of ppdc otp + hosts: ppdc-otp-{{tier}} + gather_facts: yes + become: yes + + tasks: + - name: deploy ppdc-otp + include_role: + name: ppdc-otp + tasks_from: deploy + \ No newline at end of file diff --git a/ansible/docker.yml b/ansible/docker.yml index 5bdc03399..110a1021a 100644 --- a/ansible/docker.yml +++ b/ansible/docker.yml @@ -1,8 +1,13 @@ --- - name: setup jenkins server - hosts: all + hosts: docker + gather_facts: no + connection: local become: yes - roles: - - setup-docker + - common + - docker + + + diff --git a/ansible/ecs-agent.yml b/ansible/ecs-agent.yml new file mode 100644 index 000000000..e1166b7a4 --- /dev/null +++ b/ansible/ecs-agent.yml @@ -0,0 +1,12 @@ +--- +- name: setup ecs agent + hosts: all + connection: local + become: yes + gather_facts: yes + + roles: + - common + - docker + - ecs-agent + \ No newline at end of file diff --git a/ansible/fail-bento-ctdc.yml b/ansible/fail-bento-ctdc.yml new file mode 100644 index 000000000..ef571073e --- /dev/null +++ b/ansible/fail-bento-ctdc.yml @@ -0,0 +1,11 @@ +--- +- name: deploy stage of bento pipeline + hosts: ctdc-{{tier}} + gather_facts: yes + + tasks: + - name: fail bento-ctdc deployment + include_role: + name: bento-ctdc + tasks_from: fail-build + \ No newline at end of file diff --git a/ansible/fail-bento-icdc.yml b/ansible/fail-bento-icdc.yml new file mode 100644 index 000000000..7acdac1a6 --- /dev/null +++ b/ansible/fail-bento-icdc.yml @@ -0,0 +1,11 @@ +--- +- name: deploy stage of bento pipeline + hosts: icdc-{{tier}} + gather_facts: yes + + tasks: + - name: fail bento-icdc deployment + include_role: + name: bento-icdc + tasks_from: fail-build + \ No newline at end of file diff --git a/ansible/file-monitor.yml b/ansible/file-monitor.yml new file mode 100644 index 000000000..7323fb41c --- /dev/null +++ b/ansible/file-monitor.yml @@ -0,0 +1,28 @@ +--- +- name: check file microservice status + hosts: localhost + gather_facts: yes + connection: local + + vars: + - urls: + dev: + tier: Dev + url: https://caninecommons-dev.cancer.gov/api/files/ping + + qa: + tier: QA + url: https://caninecommons-qa.cancer.gov/api/files/ping + stage: + tier: Stage + url: https://caninecommons-stage.cancer.gov/api/files/ping + + prod: + tier: Prod + url: https://caninecommons.cancer.gov/api/files/ping + + tasks: + - name: get url status + include_role: + name: url-monitor + loop: "{{lookup('dict', urls, wantlist=True)}}" diff --git a/ansible/gdc-build.yml b/ansible/gdc-build.yml new file mode 100644 index 000000000..315952a37 --- /dev/null +++ b/ansible/gdc-build.yml @@ -0,0 +1,11 @@ +--- +- name: bento cicd pipeline + hosts: bento + connection: local + gather_facts: no + + tasks: + - name: This is a build stage + include_role: + name: bento-gdc + tasks_from: build \ No newline at end of file diff --git a/ansible/gdc-deploy.yml b/ansible/gdc-deploy.yml new file mode 100644 index 000000000..4c3811a76 --- /dev/null +++ b/ansible/gdc-deploy.yml @@ -0,0 +1,13 @@ +--- +- name: deploy stage of bento pipeline + hosts: bento + connection: local + gather_facts: no + + tasks: + - name: deploy stage + include_role: + name: bento-gdc + tasks_from: deploy + vars: + data_bucket: nci-cbiit-ctdc-{{tier}} \ No newline at end of file diff --git a/ansible/git-copy.yml b/ansible/git-copy.yml new file mode 100644 index 000000000..6f4dc79a6 --- /dev/null +++ b/ansible/git-copy.yml @@ -0,0 +1,9 @@ +--- +- name: copy git contents + hosts: localhost + connection: local + gather_facts: no + + roles: + - git-copy + \ No newline at end of file diff --git a/ansible/github-actions-runner.yml b/ansible/github-actions-runner.yml new file mode 100644 index 000000000..95a5aa423 --- /dev/null +++ b/ansible/github-actions-runner.yml @@ -0,0 +1,12 @@ +--- +- name: setup github-actions-runner + hosts: docker + gather_facts: no + connection: local + become: yes + + roles: + - github-actions-runner + + + diff --git a/ansible/gmb-build.yml b/ansible/gmb-build.yml new file mode 100644 index 000000000..d76c6eb07 --- /dev/null +++ b/ansible/gmb-build.yml @@ -0,0 +1,13 @@ +--- +- name: bento cicd pipeline + hosts: bento + connection: local + gather_facts: no + become: yes + + tasks: + - name: This is a build stage + include_role: + name: bento + tasks_from: build + vars_from: gmb \ No newline at end of file diff --git a/ansible/gmb-deploy.yml b/ansible/gmb-deploy.yml new file mode 100644 index 000000000..ca0e1e6da --- /dev/null +++ b/ansible/gmb-deploy.yml @@ -0,0 +1,14 @@ +--- +- name: deploy stage of bento pipeline + hosts: bento + connection: local + gather_facts: no + + tasks: + - name: deploy stage + include_role: + name: bento + tasks_from: deploy + vars_from: gmb + vars: + data_bucket: nci-cbiit-ctdc-{{tier}} \ No newline at end of file diff --git a/ansible/hosts b/ansible/hosts new file mode 100644 index 000000000..8eccebc40 --- /dev/null +++ b/ansible/hosts @@ -0,0 +1,33 @@ +[cicd] +localhost + +[loader] +localhost + +[bento] +localhost + +[ppdc-otg] +localhost + +[ppdc-otp] +localhost + +[ccdi] +localhost + +[gke] +localhost + +[all] +localhost + +[docker] +localhost + +[ccdc] +localhost + +[OpenPedCan-api] +localhost + diff --git a/ansible/icdc-data-dump-push.yml b/ansible/icdc-data-dump-push.yml new file mode 100644 index 000000000..6f86d3fc3 --- /dev/null +++ b/ansible/icdc-data-dump-push.yml @@ -0,0 +1,12 @@ +--- +- name: push dump file to s3 + hosts: loader + connection: local + gather_facts: yes + + tasks: + - name: push dump file + include_role: + name: data-processing + tasks_from: icdc-data-dump-push + vars_from: icdc_data_dump \ No newline at end of file diff --git a/ansible/icdc-data-dump.yml b/ansible/icdc-data-dump.yml new file mode 100644 index 000000000..45ee0b0bb --- /dev/null +++ b/ansible/icdc-data-dump.yml @@ -0,0 +1,12 @@ +--- +- name: dump data from neo4j db + hosts: icdc-neo4j + gather_facts: yes + become: yes + + tasks: + - name: perform data dump + include_role: + name: data-processing + tasks_from: icdc-data-dump + vars_from: icdc_data_dump \ No newline at end of file diff --git a/ansible/icdc-data-loader.yml b/ansible/icdc-data-loader.yml new file mode 100644 index 000000000..ebbe5df72 --- /dev/null +++ b/ansible/icdc-data-loader.yml @@ -0,0 +1,16 @@ +--- +- name: load data to neo4j db + hosts: loader + connection: local + gather_facts: yes + become: yes + + tasks: + - name: perform data loading + include_role: + name: data-processing + tasks_from: icdc-data-loader + vars: + # data_bucket: nci-cbiit-ctdc-dev + ansible_python_interpreter: /usr/bin/python2.7 + data_bucket: nci-cbiit-caninedatacommons-dev \ No newline at end of file diff --git a/ansible/icdc-demo.yml b/ansible/icdc-demo.yml new file mode 100644 index 000000000..acca77475 --- /dev/null +++ b/ansible/icdc-demo.yml @@ -0,0 +1,14 @@ +--- +- name: setup icdc_demo server + hosts: demo + become: yes + + vars_files: + - config/icdc_demo-env-vars.yaml + + roles: + - common + - icdc_demo + # - docker-as-service + # - sumologic + # - newrelic \ No newline at end of file diff --git a/ansible/icdc-git-tag.yml b/ansible/icdc-git-tag.yml new file mode 100644 index 000000000..9db6f5ec3 --- /dev/null +++ b/ansible/icdc-git-tag.yml @@ -0,0 +1,22 @@ +- name: Tag icdc repository daily + + hosts: localhost + connection: local + + vars: + retention: 30 + date_regex: '([12]\d{3}_(0[1-9]|1[0-2])_(0[1-9]|[12]\d|3[01])_(0[1-9]|1[0-2])_([0-5]\d))' + current_date: "{{ lookup('pipe','date +%Y_%m_%d_%H_%M') }}" + git_tag: "{{current_date}}" + git_home: "{{ ansible_env.HOME }}/git-tag" + + vars_files: + - config/icdc-env-vars.yaml + + tasks: + - name: tag repository + include_role: + name: git-tag + vars: + git_url: https://{{git_username}}:{{git_password}}@github.com/CBIIT/icdc-codebase + diff --git a/ansible/icdc.yml b/ansible/icdc.yml new file mode 100644 index 000000000..13c68d20a --- /dev/null +++ b/ansible/icdc.yml @@ -0,0 +1,14 @@ +--- +- name: setup k9dc server + hosts: tomcat + become: yes + + vars_files: + - config/icdc-env-vars.yaml + + roles: + - common + - { role: docker,tags: ['sandbox'] } + - tomcat + #- sumologic + #- newrelic \ No newline at end of file diff --git a/ansible/inventory/aws_ec2.yml b/ansible/inventory/aws_ec2.yml new file mode 100644 index 000000000..3c2733f9f --- /dev/null +++ b/ansible/inventory/aws_ec2.yml @@ -0,0 +1,9 @@ +plugin: aws_ec2 +regions: + - us-east-1 +# keyed_groups may be used to create custom groups +strict: False +keyed_groups: + - key: tags.Name + separator: '' + \ No newline at end of file diff --git a/ansible/inventory/hosts b/ansible/inventory/hosts new file mode 100644 index 000000000..f5aacfc43 --- /dev/null +++ b/ansible/inventory/hosts @@ -0,0 +1,27 @@ +[cicd] +localhost + +[loader] +localhost + +[bento] +localhost + +[ppdc-otg] +localhost + +[ppdc-otp] +localhost + +[ccdi] +localhost + +[gke] +localhost + +[all] +localhost + +[docker] +localhost + diff --git a/ansible/jenkins.yml b/ansible/jenkins.yml new file mode 100644 index 000000000..99fdf3be2 --- /dev/null +++ b/ansible/jenkins.yml @@ -0,0 +1,20 @@ +--- +- name: setup jenkins server + hosts: jenkins + become: yes + + vars_files: + - config/icdc-env-vars.yaml + + # pre_tasks: + # - name: Wait 600 seconds for target connection to become reachable/usable + # wait_for_connection: + # - name: Gathering facts + # setup: + + roles: + - common + - sumologic + - newrelic + - docker + - jenkins diff --git a/ansible/k9dc.yml b/ansible/k9dc.yml deleted file mode 100644 index 9fa4ceff2..000000000 --- a/ansible/k9dc.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -- name: setup k9dc server - hosts: k9dc - connection: local - become: yes - gather_facts: no - - roles: - - docker - - k9dc - - sumologic diff --git a/ansible/katalon.yml b/ansible/katalon.yml new file mode 100644 index 000000000..7e8789500 --- /dev/null +++ b/ansible/katalon.yml @@ -0,0 +1,9 @@ +--- +- name: execute katalon tests + hosts: localhost + connection: local + gather_facts: no + + roles: + - katalon + diff --git a/ansible/load-schema.yml b/ansible/load-schema.yml new file mode 100644 index 000000000..49986e617 --- /dev/null +++ b/ansible/load-schema.yml @@ -0,0 +1,20 @@ +--- +- name: load data to neo4j db + hosts: all + connection: local + become: yes + gather_facts: yes + + tasks: + - name: perform data loading + include_role: + name: data-processing + tasks_from: schema + apply: + become: yes + become_user: bento + become_flag: -i + vars: + # data_bucket: nci-cbiit-ctdc-{{tier}} + ansible_python_interpreter: /usr/bin/python3 + \ No newline at end of file diff --git a/ansible/migrations.yml b/ansible/migrations.yml new file mode 100644 index 000000000..2eb4fd177 --- /dev/null +++ b/ansible/migrations.yml @@ -0,0 +1,11 @@ +--- +- name: migrate sumologic to fed + hosts: sumologic + become: yes + + vars_files: + - config/migrations.yml + + roles: + - migrations + diff --git a/terraform/icdc/playbook/nat.yml b/ansible/nat.yml similarity index 100% rename from terraform/icdc/playbook/nat.yml rename to ansible/nat.yml diff --git a/ansible/neo4j-ip.yml b/ansible/neo4j-ip.yml new file mode 100644 index 000000000..02fa42dba --- /dev/null +++ b/ansible/neo4j-ip.yml @@ -0,0 +1,12 @@ +- name: gather instance facts + ec2_instance_facts: + region: us-east-1 + filters: + "tag:Name": "{{project}}-{{tier}}-neo4j-4" + "instance-state-name": running + "tag:Environment": "{{tier}}" + register: neo4j + +- name: set instance name + set_fact: + neo4j_ip: "{{ neo4j.instances[0].network_interfaces[0].private_ip_address }}" \ No newline at end of file diff --git a/ansible/neo4j-loader-icdc-get.yml b/ansible/neo4j-loader-icdc-get.yml new file mode 100644 index 000000000..4e04ad907 --- /dev/null +++ b/ansible/neo4j-loader-icdc-get.yml @@ -0,0 +1,11 @@ +--- +- name: get data for neo4j dataloader + hosts: loader + connection: local + gather_facts: yes + + tasks: + - name: get dump file + include_role: + name: neo4j-loader + tasks_from: neo4j-loader-icdc-get \ No newline at end of file diff --git a/ansible/neo4j-loader-icdc.yml b/ansible/neo4j-loader-icdc.yml new file mode 100644 index 000000000..e36994da5 --- /dev/null +++ b/ansible/neo4j-loader-icdc.yml @@ -0,0 +1,22 @@ +--- +- name: load data to neo4j db + hosts: icdc-neo4j + gather_facts: yes + become: yes + + tasks: + - name: perform data loading + include_role: + name: neo4j-loader + tasks_from: neo4j-loader-icdc + +- name: restart backend containers + hosts: icdc-hosts + gather_facts: yes + become: yes + + tasks: + - name: restart backend + include_role: + name: neo4j-loader + tasks_from: icdc-restart-backend \ No newline at end of file diff --git a/ansible/neo4j-loader.yml b/ansible/neo4j-loader.yml new file mode 100644 index 000000000..2640f56a2 --- /dev/null +++ b/ansible/neo4j-loader.yml @@ -0,0 +1,22 @@ +--- +- name: load data to neo4j db + hosts: neo4j + gather_facts: yes + become: yes + + tasks: + - name: perform data loading + include_role: + name: neo4j-loader + tasks_from: neo4j-loader + +- name: restart backend container + hosts: loader + connection: local + gather_facts: yes + + tasks: + - name: restart backend + include_role: + name: neo4j-loader + tasks_from: bento-restart-backend \ No newline at end of file diff --git a/ansible/neo4j-version-update.yml b/ansible/neo4j-version-update.yml new file mode 100644 index 000000000..3424aee9b --- /dev/null +++ b/ansible/neo4j-version-update.yml @@ -0,0 +1,11 @@ +--- +- name: upgrade neo4j server + hosts: neo4j + become: yes + #become_method: sudo + +# vars_files: +# - config/icdc-env-vars.yaml + + roles: + - neo4j-version-update diff --git a/ansible/neo4j-version4-update.yml b/ansible/neo4j-version4-update.yml new file mode 100644 index 000000000..0077a160f --- /dev/null +++ b/ansible/neo4j-version4-update.yml @@ -0,0 +1,12 @@ +--- +- name: upgrade neo4j server + hosts: localhost + become: yes + connection: local + #become_method: sudo + +# vars_files: +# - config/icdc-env-vars.yaml + + roles: + - neo4j-version-update diff --git a/ansible/neo4j.yml b/ansible/neo4j.yml index bc6d7f7bc..38b47a7df 100644 --- a/ansible/neo4j.yml +++ b/ansible/neo4j.yml @@ -1,10 +1,17 @@ --- - name: setup neo4j server hosts: neo4j - connection: local become: yes - gather_facts: no - + + vars_files: + - config/icdc-env-vars.yaml + roles: + - common + # - { role: docker,tags: ['sandbox'] } + # - docker - neo4j - - sumologic + # - sumologic + # - newrelic + # - sumologic-journalctl + diff --git a/ansible/newrelic.yml b/ansible/newrelic.yml deleted file mode 100644 index 721f83d39..000000000 --- a/ansible/newrelic.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- name: setup newrelic infrastructure - hosts: newrelic - connection: local - become: yes - gather_facts: no - - roles: - - newrelic - diff --git a/ansible/open-target-backend.yml b/ansible/open-target-backend.yml new file mode 100644 index 000000000..a7675ef2d --- /dev/null +++ b/ansible/open-target-backend.yml @@ -0,0 +1,18 @@ +--- +- name: deploy open-target-backend + hosts: localhost + connection: local + become: yes + + tasks: + - name: install common packages + include_role: + name: open-target-backend + tasks_from: common + + - name: deploy backend + include_role: + name: open-target-backend + tasks_from: backend + + diff --git a/ansible/open-target-database.yml b/ansible/open-target-database.yml new file mode 100644 index 000000000..6459bec2f --- /dev/null +++ b/ansible/open-target-database.yml @@ -0,0 +1,13 @@ +--- +- name: setup neo4j server + hosts: localhost + connection: local + become: yes + + tasks: + - name: setup clickhouse database + include_role: + name: open-target-backend + tasks_from: database + + diff --git a/ansible/pass-bento-ctdc.yml b/ansible/pass-bento-ctdc.yml new file mode 100644 index 000000000..f7608f284 --- /dev/null +++ b/ansible/pass-bento-ctdc.yml @@ -0,0 +1,11 @@ +--- +- name: deploy stage of bento pipeline + hosts: ctdc-{{tier}} + gather_facts: yes + + tasks: + - name: pass bento-ctdc deployment + include_role: + name: bento-ctdc + tasks_from: pass-build + \ No newline at end of file diff --git a/ansible/pass-bento-icdc.yml b/ansible/pass-bento-icdc.yml new file mode 100644 index 000000000..3d93e610a --- /dev/null +++ b/ansible/pass-bento-icdc.yml @@ -0,0 +1,11 @@ +--- +- name: deploy stage of bento pipeline + hosts: icdc-{{tier}} + gather_facts: yes + + tasks: + - name: fail bento-icdc deployment + include_role: + name: bento-icdc + tasks_from: pass-build + \ No newline at end of file diff --git a/ansible/redis-intergration.yml b/ansible/redis-intergration.yml new file mode 100644 index 000000000..b7f269131 --- /dev/null +++ b/ansible/redis-intergration.yml @@ -0,0 +1,11 @@ +--- +- name: check file microservice status + hosts: "{{env}}" + gather_facts: yes + become: yes + + tasks: + - name: get url status + include_role: + name: redis-intergration + \ No newline at end of file diff --git a/ansible/redis.yml b/ansible/redis.yml new file mode 100644 index 000000000..5a932ac30 --- /dev/null +++ b/ansible/redis.yml @@ -0,0 +1,12 @@ +--- +- name: Redis stage of bento pipeline + hosts: bento + connection: local + gather_facts: no + + tasks: + - name: flush redis cache + include_role: + name: redis + + \ No newline at end of file diff --git a/ansible/redis_icdc.yml b/ansible/redis_icdc.yml new file mode 100644 index 000000000..1a4e1bbba --- /dev/null +++ b/ansible/redis_icdc.yml @@ -0,0 +1,15 @@ +--- +- name: Redis stage of icdc pipeline + hosts: icdc + #connection: local + #gather_facts: no + gather_facts: yes + become: yes + + tasks: + - name: flush redis cache + include_role: + name: redis + tasks_from: redis_icdc + + \ No newline at end of file diff --git a/ansible/release-bento-ctdc.yml b/ansible/release-bento-ctdc.yml new file mode 100644 index 000000000..fea629925 --- /dev/null +++ b/ansible/release-bento-ctdc.yml @@ -0,0 +1,11 @@ +--- +- name: deploy stage of bento pipeline + hosts: ctdc-{{tier}} + gather_facts: yes + + tasks: + - name: release bento-ctdc + include_role: + name: bento-ctdc + tasks_from: release + \ No newline at end of file diff --git a/ansible/release-bento-icdc.yml b/ansible/release-bento-icdc.yml new file mode 100644 index 000000000..553a5e8e9 --- /dev/null +++ b/ansible/release-bento-icdc.yml @@ -0,0 +1,11 @@ +--- +- name: deploy stage of bento pipeline + hosts: icdc-{{tier}} + gather_facts: yes + + tasks: + - name: release bento-icdc + include_role: + name: bento-icdc + tasks_from: release + \ No newline at end of file diff --git a/ansible/roles/OpenPedCan/files/access_db.env b/ansible/roles/OpenPedCan/files/access_db.env new file mode 100644 index 000000000..646377ee5 --- /dev/null +++ b/ansible/roles/OpenPedCan/files/access_db.env @@ -0,0 +1,11 @@ +# Docker env vars for accessing database. + +# Environment file format reference: +# https://docs.docker.com/engine/reference/commandline/run/#set-environment-variables--e---env---env-file + +# User for database read-only access. +# +# Note that password cannot contain : or \ for simplicity. +# https://www.postgresql.org/docs/current/libpq-pgpass.html +DB_USERNAME=rserver_admin +DB_PASSWORD=6Ny7oNY9qJLt \ No newline at end of file diff --git a/ansible/roles/OpenPedCan/files/common_db.env b/ansible/roles/OpenPedCan/files/common_db.env new file mode 100644 index 000000000..b7b9c63c4 --- /dev/null +++ b/ansible/roles/OpenPedCan/files/common_db.env @@ -0,0 +1,7 @@ +DB_PORT=5432 +DB_HOST=db +DB_DRIVER=PostgreSQL Unicode + +DB_NAME=open_ped_can_db +BULK_EXP_SCHEMA=bulk_expression +BULK_EXP_TPM_HISTOLOGY_TBL=bulk_expression_tpm_histology \ No newline at end of file diff --git a/ansible/roles/OpenPedCan/files/load_db.env b/ansible/roles/OpenPedCan/files/load_db.env new file mode 100644 index 000000000..5f2aeed22 --- /dev/null +++ b/ansible/roles/OpenPedCan/files/load_db.env @@ -0,0 +1,25 @@ +# Docker env vars for loading database. + +# User for loading database with read-write access. +# +# Note that password cannot contain : or \ for simplicity. +# Ref: https://www.postgresql.org/docs/current/libpq-pgpass.html +DB_READ_WRITE_USERNAME=my_db_rw_username +DB_READ_WRITE_PASSWORD=my_db_rw_user_password + +# The following env vars in this file cannot be changed without modifying +# other files, because various commands and scripts assume that they have the +# following values. + +# postgres docker image env vars. +# +# Ref: https://hub.docker.com/_/postgres +POSTGRES_USER=postgres +POSTGRES_DB=postgres +POSTGRES_PASSWORD=6Ny7oNY9qJLt + +POSTGRES_HOST_AUTH_METHOD=scram-sha-256 +POSTGRES_INITDB_ARGS=--auth-local=scram-sha-256 --auth-host=scram-sha-256 + +# Path to the directory that contain database build outputs in container. +BUILD_OUTPUT_DIR_PATH=/home/open-ped-can-api-db/db/build_outputs diff --git a/ansible/roles/OpenPedCan/tasks/build.yml b/ansible/roles/OpenPedCan/tasks/build.yml new file mode 100644 index 000000000..82c6b6e36 --- /dev/null +++ b/ansible/roles/OpenPedCan/tasks/build.yml @@ -0,0 +1,54 @@ +--- +- name: install docker and docker-compose + pip: + name: "{{item}}" + state: present + loop: + - docker + - docker-compose + +- name: create OpenPedCan-api-secrets directory + file: + path: "{{workspace}}/OpenPedCan-api-secrets" + state: directory + +- name: copy DB environment files + template: src={{item.src}} dest={{item.dest}} + with_items: + - { src: 'access_db.env.j2', dest: '{{workspace}}//OpenPedCan-api-secrets//access_db.env' } + - { src: 'common_db.env.j2', dest: '{{workspace}}//OpenPedCan-api-secrets//common_db.env' } + - { src: 'load_db.env.j2', dest: '{{workspace}}//OpenPedCan-api-secrets//load_db.env' } + +- name: login into ecr + #shell: "$(/bin/aws ecr get-login-password --region us-east-1 | docker login --username AWS --password-stdin {{ecr}})" + shell: "docker login -u AWS -p $(aws ecr get-login-password --region us-east-1) {{ecr}}" + ignore_errors: True + register: ecr_login + +- name: listing the contents + shell: ls + register: shell_result + args: + chdir: "{{workspace}}/" + +- debug: + var: shell_result.stdout_lines + +- name: build {{ecr}}/OpenPedCan:HttpServer-{{version}} image + docker_image: + path: "{{workspace}}/{{project}}/" + dockerfile: "{{workspace}}/{{project}}/Dockerfile" + pull: yes + name: "{{ecr}}/openpedcan-dev-ecr" + tag: "httpserver-{{version}}" + push: yes + + +# - name: build {{ecr}}/OpenPedCan:DatabaseServer-{{version}} image +# docker_image: +# path: "{{workspace}}/{{project}}" +# dockerfile: "{{workspace}}/{{project}}/db/db.Dockerfile" +# pull: yes +# name: "{{ecr}}/openpedcan-dev-ecr" +# tag: "databaseserver-{{version}}" +# push: yes diff --git a/ansible/roles/OpenPedCan/tasks/deploy.yml b/ansible/roles/OpenPedCan/tasks/deploy.yml new file mode 100644 index 000000000..9a2e02ce2 --- /dev/null +++ b/ansible/roles/OpenPedCan/tasks/deploy.yml @@ -0,0 +1,271 @@ +--- +- name: create task OpenPedCan httpsServer definition + ecs_taskdefinition: + network_mode: bridge + family: "{{ stack_name }}-httpserver" + memory: '1024' + cpu: '512' + state: present + region: "{{region}}" + containers: + - name: httpserver + essential: true + image: "{{ecr}}/openpedcan-dev-ecr:httpserver-{{version}}" + environment: + - name: REACT_APP_ENVIRONMENT + value: "{{tier}}" + - name: NEW_RELIC_LICENSE_KEY + value: "{{ newrelic_license_key }}" + - name: NEW_RELIC_APP_NAME + value: "{{ stack_name }}-backend" + - name: NEW_RELIC_DISTRIBUTED_TRACING_ENABLED + value: true + - name: NEW_RELIC_HOST + value: "gov-collector.newrelic.com" + - name: NEW_RELIC_NO_CONFIG_FILE + value: true + # - name: RDB_HOST + # value: "{{ rds_host }}" + # - name: RDB_PORT + # value: "{{ rds_port }}" + # - name: RDB_USER + # value: "{{ rds_user }}" + # - name: RDB_PASSWORD + # value: "{{ rds_password }}" + - name: DB_READ_WRITE_USERNAME + value: "{{ db_user }}" + - name: DB_READ_WRITE_PASSWORD + value: "{{ db_password }}" + - name: POSTGRES_USER + value: "{{ db_user }}" + - name: POSTGRES_DB + value: "postgres" + - name: POSTGRES_PASSWORD + value: "{{ db_password }}" + - name: POSTGRES_HOST_AUTH_METHOD + value: "scram-sha-256" + - name: POSTGRES_INITDB_ARGS + value: "--auth-local=scram-sha-256 --auth-host=scram-sha-256" + - name: DB_PORT + value: "5432" + - name: DB_HOST + value: "{{ db_host }}" + - name: DB_DRIVER + value: "PostgreSQL Unicode" + - name: DB_NAME + value: "{{ db_user }}" + - name: DB_USERNAME + value: "{{ db_user }}" + - name: DB_PASSWORD + value: "{{ db_password }}" + - name: Driver + value: "PostgreSQL Unicode" + #value: "DB_DRIVER" + - name: Server + value: "{{ db_host }}" + - name: Port + value: "5432" + - name: Uid + value: "{{ db_user }}" + - name: Pwd + value: "{{ db_password }}" + - name: Database + value: "5432" + - name: BULK_EXP_SCHEMA + value: "BULK_EXP_SCHEMA" + - name: BULK_EXP_TPM_HISTOLOGY_TBL + value: "BULK_EXP_TPM_HISTOLOGY_TBL" + - name: BUILD_OUTPUT_DIR_PATH + value: "/home/open-ped-can-api-db/db/build_outputs" + - name: DB_HOME_DIR_PATH + value: "/home/open-ped-can-api-db/" + portMappings: + - containerPort: "8080" + hostPort: "8080" + protocol: tcp + register: task_output + +# - name: create task OpenPedCan Database Server definition {{workspace}} +# ecs_taskdefinition: +# network_mode: bridge +# family: "{{ stack_name }}-dbserver" +# memory: '1024' +# cpu: '512' +# state: present +# region: "{{region}}" +# containers: +# - name: dbserver +# essential: true +# image: "{{ecr}}/openpedcan-dev-ecr:databaseserver-{{version}}" +# environment: +# - name: REACT_APP_ENVIRONMENT +# value: "{{tier}}" +# - name: NEW_RELIC_LICENSE_KEY +# value: "{{ newrelic_license_key }}" +# - name: NEW_RELIC_APP_NAME +# value: "{{ stack_name }}-backend" +# - name: NEW_RELIC_DISTRIBUTED_TRACING_ENABLED +# value: true +# - name: NEW_RELIC_HOST +# value: "gov-collector.newrelic.com" +# - name: NEW_RELIC_NO_CONFIG_FILE +# value: true +# # - name: RDB_HOST +# # value: "{{ rds_host }}" +# # - name: RDB_PORT +# # value: "{{ rds_port }}" +# # - name: RDB_USER +# # value: "{{ rds_user }}" +# # - name: RDB_PASSWORD +# # value: "{{ rds_password }}" +# - name: DB_READ_WRITE_USERNAME +# value: "{{ db_read_write_user }}" +# - name: DB_READ_WRITE_PASSWORD +# value: "{{ db_password }}" +# - name: POSTGRES_USER +# value: "{{ postgres_user }}" +# - name: POSTGRES_DB +# value: "postgres" +# - name: DB_NAME +# value: "open_ped_can_db" +# - name: POSTGRES_PASSWORD +# value: "{{ db_password }}" +# - name: POSTGRES_HOST_AUTH_METHOD +# value: "scram-sha-256" +# - name: POSTGRES_INITDB_ARGS +# value: "--auth-local=scram-sha-256 --auth-host=scram-sha-256" +# - name: DB_PORT +# value: "5432" +# - name: DB_HOST +# value: "{{ db_host }}" +# - name: DB_DRIVER +# value: "PostgreSQL Unicode" +# - name: DB_USERNAME +# value: "{{ db_user }}" +# - name: DB_PASSWORD +# value: "{{ db_password }}" +# - name: Driver +# value: "PostgreSQL Unicode" +# #value: "DB_DRIVER" +# - name: Server +# value: "{{ db_host }}" +# - name: Port +# value: "5432" +# - name: Uid +# value: "{{ uid }}" +# - name: Pwd +# value: "{{ db_password }}" +# - name: Database +# value: "5432" +# - name: BULK_EXP_SCHEMA +# value: "BULK_EXP_SCHEMA" +# - name: BULK_EXP_TPM_HISTOLOGY_TBL +# value: "BULK_EXP_TPM_HISTOLOGY_TBL" +# - name: BUILD_OUTPUT_DIR_PATH +# value: "/home/open-ped-can-api-db/db/build_outputs" +# - name: DB_HOME_DIR_PATH +# value: "/home/open-ped-can-api-db/" +# - name: DB_LOCATION +# value: "aws_s3" +# portMappings: +# - containerPort: "8081" +# hostPort: "8081" +# protocol: tcp +# register: task_output + +- name: query task definition HttpServer + ecs_taskdefinition_info: + task_definition: OpenPedCan-httpserver + region: "{{region}}" + register: task_httpserver + + +# - name: show task output +# debug: +# msg: "{{task_httpserver}}" + +# - name: query task definition DatabaseServer +# ecs_taskdefinition_info: +# task_definition: OpenPedCan-dbserver +# region: "{{region}}" +# register: task_databaseserver + +# - name: show service service +# debug: +# msg: "{{task_databaseserver}}" + +- name: query ecs service httpserver + ecs_service_facts: + cluster: OpenPedCan-dev + service: OpenPedCan-httpserver + details: true + region: "{{region}}" + register: service_httpserver + +# - name: show service service +# debug: +# msg: "{{service_httpserver}}" + +# - name: query ecs service DatabaseServer +# ecs_service_facts: +# cluster: OpenPedCan-dev +# service: OpenPedCan-dbserver +# details: true +# region: "{{region}}" +# register: service_databaseserver + +# - name: show service service +# debug: +# msg: "{{service_databaseserver}}" + +- name: set facts + set_fact: + httpserver_revision: "{{task_httpserver.revision}}" + #databaseserver_revision: "{{task_databaseserver.revision}}" + #task_databaseserver_name: "{{task_databaseserver.family}}" + task_httpserver_name: "{{task_httpserver.family}}" + lb_frontend: "{{service_httpserver.services[0].loadBalancers}}" + role_arn: "{{service_httpserver.services[0].roleArn}}" + #lb_dbserver: "{{service_databaseserver.services[0].loadBalancers}}" + + +- name: update httpserver service + ecs_service: + state: present + name: OpenPedCan-httpserver + cluster: OpenPedCan-dev + task_definition: "{{task_httpserver_name}}:{{httpserver_revision}}" + role: "{{role_arn}}" + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + load_balancers: "{{ lb_frontend }}" + region: "{{region}}" + register: service_httpserver_output + + +# - name: update databaseserver service +# ecs_service: +# state: present +# name: OpenPedCan-dbserver +# cluster: OpenPedCan-dev +# task_definition: "{{task_databaseserver_name}}:{{databaseserver_revision}}" +# role: "{{role_arn}}" +# deployment_configuration: +# minimum_healthy_percent: 0 +# maximum_percent: 100 +# desired_count: 1 +# load_balancers: "{{ lb_dbserver }}" +# region: "{{region}}" +# register: service_processor_output + +- name: listing the contents + shell: ls + register: shell_result + args: + chdir: "{{workspace}}/" + +- debug: + var: shell_result.stdout_lines + diff --git a/ansible/roles/OpenPedCan/templates/access_db.env.j2 b/ansible/roles/OpenPedCan/templates/access_db.env.j2 new file mode 100644 index 000000000..5aade01af --- /dev/null +++ b/ansible/roles/OpenPedCan/templates/access_db.env.j2 @@ -0,0 +1,11 @@ +# Docker env vars for accessing database. + +# Environment file format reference: +# https://docs.docker.com/engine/reference/commandline/run/#set-environment-variables--e---env---env-file + +# User for database read-only access. +# +# Note that password cannot contain : or \ for simplicity. +# https://www.postgresql.org/docs/current/libpq-pgpass.html +DB_USERNAME={{ db_user }} +DB_PASSWORD={{ db_password }} \ No newline at end of file diff --git a/ansible/roles/OpenPedCan/templates/common_db.env.j2 b/ansible/roles/OpenPedCan/templates/common_db.env.j2 new file mode 100644 index 000000000..b7b9c63c4 --- /dev/null +++ b/ansible/roles/OpenPedCan/templates/common_db.env.j2 @@ -0,0 +1,7 @@ +DB_PORT=5432 +DB_HOST=db +DB_DRIVER=PostgreSQL Unicode + +DB_NAME=open_ped_can_db +BULK_EXP_SCHEMA=bulk_expression +BULK_EXP_TPM_HISTOLOGY_TBL=bulk_expression_tpm_histology \ No newline at end of file diff --git a/ansible/roles/OpenPedCan/templates/load_db.env.j2 b/ansible/roles/OpenPedCan/templates/load_db.env.j2 new file mode 100644 index 000000000..f767d1986 --- /dev/null +++ b/ansible/roles/OpenPedCan/templates/load_db.env.j2 @@ -0,0 +1,25 @@ +# Docker env vars for loading database. + +# User for loading database with read-write access. +# +# Note that password cannot contain : or \ for simplicity. +# Ref: https://www.postgresql.org/docs/current/libpq-pgpass.html +DB_READ_WRITE_USERNAME={{ db_user }} +DB_READ_WRITE_PASSWORD={{ db_password }} + +# The following env vars in this file cannot be changed without modifying +# other files, because various commands and scripts assume that they have the +# following values. + +# postgres docker image env vars. +# +# Ref: https://hub.docker.com/_/postgres +POSTGRES_USER={{ db_user }} +POSTGRES_DB=postgres +POSTGRES_PASSWORD={{ db_password }} + +POSTGRES_HOST_AUTH_METHOD=scram-sha-256 +POSTGRES_INITDB_ARGS=--auth-local=scram-sha-256 --auth-host=scram-sha-256 + +# Path to the directory that contain database build outputs in container. +BUILD_OUTPUT_DIR_PATH=/home/open-ped-can-api-db/db/build_outputs diff --git a/ansible/roles/OpenPedCan/vars/main.yml b/ansible/roles/OpenPedCan/vars/main.yml new file mode 100644 index 000000000..0cadd5403 --- /dev/null +++ b/ansible/roles/OpenPedCan/vars/main.yml @@ -0,0 +1,24 @@ +--- +# vars file for cicd + +platform: aws +stack_name: OpenPedCan +tier: "{{ lookup('env','TIER') }}" +version: "{{ lookup('env','VERSION') }}" +workspace: "{{ lookup('env','WORKSPACE') }}" +project: OpenPedCan-api +ecr: "{{ lookup('aws_ssm', 'OpenPedCad-api-ecr', region='us-east-1' ) }}" +docker_user: "{{ lookup('env','DOCKER_USER') }}" +docker_password: "{{ lookup('env','DOCKER_PASSWORD') }}" +region: us-east-1 +newrelic_license_key: "{{ lookup('aws_ssm', 'newrelic_license_key', region='us-east-1' ) }}" +sumo_access_id: "{{ lookup('aws_ssm', 'sumo_access_id', region='us-east-1' ) }}" +sumo_access_key: "{{ lookup('aws_ssm', 'sumo_access_key', region='us-east-1' ) }}" +syslog_host: "{{ lookup('aws_ssm', 'syslog_host', region='us-east-1' ) }}" +db_user: "{{ lookup('aws_ssm', 'OpenPedCan-api_dbuser', region='us-east-1' ) }}" +db_password: "{{ lookup('aws_ssm', 'OpenPedCan-api_dbpass', region='us-east-1' ) }}" +db_host: "{{ lookup('aws_ssm', 'OpenPedCan-api-db-host', region='us-east-1' ) }}" +db_read_write_user: "openpedcan" +postgres_user: "postgres" +uid: "rserver" +# hostname: "{{ansible_fqdn}}" \ No newline at end of file diff --git a/ansible/roles/alb-ops/.travis.yml b/ansible/roles/alb-ops/.travis.yml new file mode 100644 index 000000000..36bbf6208 --- /dev/null +++ b/ansible/roles/alb-ops/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/ansible/roles/k9dc/README.md b/ansible/roles/alb-ops/README.md similarity index 100% rename from ansible/roles/k9dc/README.md rename to ansible/roles/alb-ops/README.md diff --git a/ansible/roles/alb-ops/defaults/main.yml b/ansible/roles/alb-ops/defaults/main.yml new file mode 100644 index 000000000..8632a6036 --- /dev/null +++ b/ansible/roles/alb-ops/defaults/main.yml @@ -0,0 +1,6 @@ +--- +# defaults file for alb-ops + +account_id: "{{ lookup('env','ACCOUNT_ID') }}" +response_text: > + <\/head>
<\/div>
We'll be right back!<\/span>


Sorry, we're down for maintenance activity right now. Please check back later or contact ICDCHelpDesk@mail.nih.gov<\/a><\/span><\/div>
<\/div><\/div><\/div><\/div><\/html> \ No newline at end of file diff --git a/ansible/roles/alb-ops/handlers/main.yml b/ansible/roles/alb-ops/handlers/main.yml new file mode 100644 index 000000000..281558aed --- /dev/null +++ b/ansible/roles/alb-ops/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for alb-ops \ No newline at end of file diff --git a/ansible/roles/alb-ops/meta/main.yml b/ansible/roles/alb-ops/meta/main.yml new file mode 100644 index 000000000..227ad9c34 --- /dev/null +++ b/ansible/roles/alb-ops/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.9 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/ansible/roles/alb-ops/tasks/info.yml b/ansible/roles/alb-ops/tasks/info.yml new file mode 100644 index 000000000..45a280861 --- /dev/null +++ b/ansible/roles/alb-ops/tasks/info.yml @@ -0,0 +1,80 @@ +--- + +- name: gather info about alb + elb_application_lb_info: + names: + - "{{alb|json_query(jq)}}" + region: "{{region}}" + register: alb_info + +- name: set facts + set_fact: + certificate_arn: "{{alb_info.load_balancers[0].listeners[0].certificates[0].certificate_arn}}" + http_listener_arn: "{{alb_info.load_balancers[0].listeners[1].listener_arn}}" + https_listener_arn: "{{alb_info.load_balancers[0].listeners[0].listener_arn}}" + backend_arn: "{{backend_target|json_query(jq)}}" + frontend_arn: "{{frontend_target|json_query(jq)}}" + +- debug: + msg: "{{alb_info}}" + +- name: copy application.properties file to /src/main/resources/ + template: + src: "{{item.src}}" + dest: "{{item.dest}}" + loop: + - { src: 'fixed-actions.json.j2', dest: "{{workspace}}/fixed-actions.json" } + - { src: 'fixed-conditions.json.j2',dest: "{{workspace}}/fixed-conditions.json"} + - { src: 'backend-conditions.json.j2',dest: "{{workspace}}/backend-conditions.json"} + - { src: 'frontend-conditions.json.j2',dest: "{{workspace}}/frontend-conditions.json"} + - { src: 'backend-actions.json.j2', dest: "{{workspace}}/backend-actions.json" } + - { src: 'frontend-actions.json.j2', dest: "{{workspace}}/frontend-actions.json" } + + +# - name: create fixed response rule +# shell: > +# aws elbv2 create-rule +# --listener-arn {{listener_arn}} +# --region {{region}} +# --priority 1 +# --conditions file://{{workspace}}/fixed-conditions.json +# --actions file://{{workspace}}/fixed-actions.json +# register: fixed_rule + +# - name: store rule fixed response arn to file +# copy: +# content: "{{ fixed_rule.stdout}}" +# dest: "{{workspace}}/{{project}}-{{tier}}-fixed.json" + +# - name: create test frontend rule +# shell: > +# aws elbv2 create-rule +# --listener-arn {{listener_arn}} +# --region {{region}} +# --priority 1 +# --conditions file://{{workspace}}/frontend-conditions.json +# --actions file://{{workspace}}/frontend-actions.json +# register: frontend_rule + +# - name: store frontend test rule arn to file +# copy: +# content: "{{ frontend_rule.stdout}}" +# dest: "{{workspace}}/{{project}}-{{tier}}-frontend.json" + +# - name: create test backend rule +# shell: > +# aws elbv2 create-rule +# --listener-arn {{listener_arn}} +# --region {{region}} +# --priority 1 +# --conditions file://{{workspace}}/backend-conditions.json +# --actions file://{{workspace}}/backend-actions.json +# register: backend_rule + +# - name: store backend test rule arn to file +# copy: +# content: "{{ backend_rule.stdout}}" +# dest: "{{workspace}}/{{project}}-{{tier}}-backend.json" + + + diff --git a/ansible/roles/alb-ops/tasks/main.yml b/ansible/roles/alb-ops/tasks/main.yml new file mode 100644 index 000000000..e1fd09997 --- /dev/null +++ b/ansible/roles/alb-ops/tasks/main.yml @@ -0,0 +1,2 @@ + +# tasks file for alb-ops diff --git a/ansible/roles/alb-ops/tasks/mytest.yml b/ansible/roles/alb-ops/tasks/mytest.yml new file mode 100644 index 000000000..a130e631b --- /dev/null +++ b/ansible/roles/alb-ops/tasks/mytest.yml @@ -0,0 +1,11 @@ +--- +- name: ReadJsonfile + hosts: localhost + tasks: + - name: Display the JSON file content + shell: cat dev-rule-arn.json + register: result + + - name: save the Json data to a Variable as a Fact + set_fact: + jsondata: "{{ result.stdout | from_json }}" \ No newline at end of file diff --git a/ansible/roles/alb-ops/tasks/remove.yml b/ansible/roles/alb-ops/tasks/remove.yml new file mode 100644 index 000000000..a25cefcc9 --- /dev/null +++ b/ansible/roles/alb-ops/tasks/remove.yml @@ -0,0 +1,82 @@ + +- name: get {{item}} rule file content + command: cat {{workspace}}/{{project}}-{{tier}}-{{item}}.json + register: result + +- name: retrieve {{item}} rule-arn + set_fact: + jsondata: "{{ result.stdout | from_json }}" + +- name: remove {{item}} alb rule + command: > + aws elbv2 delete-rule + --rule-arn {{jsondata|json_query('Rules[].RuleArn')|join('')}} + --region {{region}} + +- name: purge {{item}} rule-arn.txt file + file: + path: "{{workspace}}/{{project}}-{{tier}}-{{item}}.json" + state: absent + + +# - name: get backend rule file content +# command: cat {{workspace}}/{{project}}-{{tier}}-backend.json +# register: backed_result + +# - name: retrieve backend rule-arn +# set_fact: +# jsondata: "{{ backend_result.stdout | from_json }}" + +# - name: remove backend alb rule +# command: > +# aws elbv2 delete-rule +# --rule-arn {{jsondata|json_query('Rules[].RuleArn')|join('')}} +# --region {{region}} +# register: rule + +# - name: purge fixed rule-arn.txt file +# file: +# path: "{{workspace}}/{{project}}-{{tier}}-backend.json" +# state: absent + + +# - name: get fixed response file content +# command: cat {{workspace}}/{{project}}-{{tier}}-fixed.json +# register: fixed_result + +# - name: retrieve rule-arn +# set_fact: +# jsondata: "{{ fixed_result.stdout | from_json }}" + +# - name: remove fixed response alb rule +# command: > +# aws elbv2 delete-rule +# --rule-arn {{jsondata|json_query('Rules[].RuleArn')|join('')}} +# --region {{region}} + +# - name: purge rule-arn.txt file +# file: +# path: "{{workspace}}/{{project}}-{{tier}}-fixed.json" +# state: absent + + +# - name: get frontend {{tier}}-rule-arn file content +# command: cat {{workspace}}/{{project}}-{{tier}}-frontend.json +# register: frontend_result + + + +# - name: retrieve frontend rule-arn +# set_fact: +# jsondata: "{{ frontend_result.stdout | from_json }}" + +# - name: remove frontend alb rule +# command: > +# aws elbv2 delete-rule +# --rule-arn {{jsondata|json_query('Rules[].RuleArn')|join('')}} +# --region {{region}} + +# - name: purge frontend rule-arn.txt file +# file: +# path: "{{workspace}}/{{project}}-{{tier}}-frontend.json" +# state: absent \ No newline at end of file diff --git a/ansible/roles/alb-ops/tasks/rules.yml b/ansible/roles/alb-ops/tasks/rules.yml new file mode 100644 index 000000000..9faec9ba3 --- /dev/null +++ b/ansible/roles/alb-ops/tasks/rules.yml @@ -0,0 +1,14 @@ +- name: create {{item.name}} response rule + shell: > + aws elbv2 create-rule + --listener-arn {{item.listener_arn}} + --region {{region}} + --priority {{ item.priority }} + --conditions file://{{workspace}}/{{item.name}}-conditions.json + --actions file://{{workspace}}/{{item.name}}-actions.json + register: rule + +- name: store rule {{item.name}} response arn to file + copy: + content: "{{ rule.stdout}}" + dest: "{{workspace}}/{{project}}-{{tier}}-{{item.name}}.json" \ No newline at end of file diff --git a/ansible/roles/alb-ops/templates/backend-actions.json.j2 b/ansible/roles/alb-ops/templates/backend-actions.json.j2 new file mode 100644 index 000000000..d32ccf493 --- /dev/null +++ b/ansible/roles/alb-ops/templates/backend-actions.json.j2 @@ -0,0 +1,9 @@ +[{ + "Type": "forward", + "ForwardConfig": { + "TargetGroups": [{ + "TargetGroupArn": "{{backend_arn}}", + "Weight": 1 + }] + } +}] diff --git a/ansible/roles/alb-ops/templates/backend-conditions.json.j2 b/ansible/roles/alb-ops/templates/backend-conditions.json.j2 new file mode 100644 index 000000000..2b2f1cbfb --- /dev/null +++ b/ansible/roles/alb-ops/templates/backend-conditions.json.j2 @@ -0,0 +1,48 @@ +{% if tier == "prod" %} +[ + { + "Field": "source-ip", + "SourceIpConfig": { + "Values": [ + "128.231.0.0/16" + ] + } + }, + { + "Field": "host-header", + "HostHeaderConfig": { + "Values": ["caninecommons.cancer.gov"] + } + }, + { + "Field": "path-pattern", + "PathPatternConfig": { + "Values": ["/v1/graphql/*"] + } + } +] +{% else %} +[ + + { + "Field": "source-ip", + "SourceIpConfig": { + "Values": [ + "128.231.0.0/16" + ] + } + }, + { + "Field": "host-header", + "HostHeaderConfig": { + "Values": ["caninecommons-{{tier}}.cancer.gov"] + } + }, + { + "Field": "path-pattern", + "PathPatternConfig": { + "Values": ["/v1/graphql/*"] + } + } +] +{% endif %} diff --git a/ansible/roles/alb-ops/templates/fixed-actions.json.j2 b/ansible/roles/alb-ops/templates/fixed-actions.json.j2 new file mode 100644 index 000000000..61698749a --- /dev/null +++ b/ansible/roles/alb-ops/templates/fixed-actions.json.j2 @@ -0,0 +1,10 @@ +[ + { + "Type": "fixed-response", + "FixedResponseConfig": { + "MessageBody": "{{response_text}}", + "StatusCode": "200", + "ContentType": "text/html" + } + } +] diff --git a/ansible/roles/alb-ops/templates/fixed-conditions.json.j2 b/ansible/roles/alb-ops/templates/fixed-conditions.json.j2 new file mode 100644 index 000000000..82284d13a --- /dev/null +++ b/ansible/roles/alb-ops/templates/fixed-conditions.json.j2 @@ -0,0 +1,19 @@ +{% if tier == "prod" %} +[ + { + "Field": "host-header", + "HostHeaderConfig": { + "Values": ["caninecommons.cancer.gov"] + } + } +] +{% else %} +[ + { + "Field": "host-header", + "HostHeaderConfig": { + "Values": ["caninecommons-{{tier}}.cancer.gov"] + } + } +] +{% endif %} diff --git a/ansible/roles/alb-ops/templates/frontend-actions.json.j2 b/ansible/roles/alb-ops/templates/frontend-actions.json.j2 new file mode 100644 index 000000000..22d1fc765 --- /dev/null +++ b/ansible/roles/alb-ops/templates/frontend-actions.json.j2 @@ -0,0 +1,9 @@ +[{ + "Type": "forward", + "ForwardConfig": { + "TargetGroups": [{ + "TargetGroupArn": "{{frontend_arn}}", + "Weight": 1 + }] + } +}] diff --git a/ansible/roles/alb-ops/templates/frontend-conditions.bak b/ansible/roles/alb-ops/templates/frontend-conditions.bak new file mode 100644 index 000000000..4b76a6877 --- /dev/null +++ b/ansible/roles/alb-ops/templates/frontend-conditions.bak @@ -0,0 +1,19 @@ +{% if tier == "prod" %} +[ + { + "Field": "path-pattern", + "PathPatternConfig": { + "Values": ["/*"] + } + } +] +{% else %} +[ + { + "Field": "path-pattern", + "PathPatternConfig": { + "Values": ["/*"] + } + } +] +{% endif %} diff --git a/ansible/roles/alb-ops/templates/frontend-conditions.json.j2 b/ansible/roles/alb-ops/templates/frontend-conditions.json.j2 new file mode 100644 index 000000000..075b5a1c7 --- /dev/null +++ b/ansible/roles/alb-ops/templates/frontend-conditions.json.j2 @@ -0,0 +1,47 @@ +{% if tier == "prod" %} +[ + { + "Field": "source-ip", + "SourceIpConfig": { + "Values": [ + "128.231.0.0/16" + ] + } + }, + { + "Field": "host-header", + "HostHeaderConfig": { + "Values": ["caninecommons.cancer.gov"] + } + }, + { + "Field": "path-pattern", + "PathPatternConfig": { + "Values": ["/*"] + } + } +] +{% else %} +[ + { + "Field": "source-ip", + "SourceIpConfig": { + "Values": [ + "128.231.0.0/16" + ] + } + }, + { + "Field": "host-header", + "HostHeaderConfig": { + "Values": ["caninecommons-{{tier}}.cancer.gov"] + } + }, + { + "Field": "path-pattern", + "PathPatternConfig": { + "Values": ["/*"] + } + } +] +{% endif %} diff --git a/ansible/roles/alb-ops/templates/maintenance_message_externalized_css.html b/ansible/roles/alb-ops/templates/maintenance_message_externalized_css.html new file mode 100644 index 000000000..cf39bef26 --- /dev/null +++ b/ansible/roles/alb-ops/templates/maintenance_message_externalized_css.html @@ -0,0 +1 @@ +
diff --git a/ansible/roles/k9dc/tests/inventory b/ansible/roles/alb-ops/tests/inventory similarity index 100% rename from ansible/roles/k9dc/tests/inventory rename to ansible/roles/alb-ops/tests/inventory diff --git a/ansible/roles/alb-ops/tests/test.yml b/ansible/roles/alb-ops/tests/test.yml new file mode 100644 index 000000000..343446716 --- /dev/null +++ b/ansible/roles/alb-ops/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - alb-ops \ No newline at end of file diff --git a/ansible/roles/alb-ops/vars/main.yml b/ansible/roles/alb-ops/vars/main.yml new file mode 100644 index 000000000..83d565126 --- /dev/null +++ b/ansible/roles/alb-ops/vars/main.yml @@ -0,0 +1,32 @@ +--- +# vars file for alb-ops +tier: "{{ lookup('env','TIER') }}" +workspace: "{{ lookup('env','WORKSPACE') }}" +# project: "{{ lookup('env','PROJECT') }}" +# action: "{{ lookup('env','ACTION') }}" +prority: + +region: us-east-1 +jq: "{{project}}.{{tier}}" +alb: + icdc: + dev: DEV-A-Appli-Caninedata-8UHLKJYN + qa: QA-Ca-Appli-6P2VXOVVW4SD + stage: canin-Appli-EXXC73ANOU8B + prod: PROD-Appli-88K1Y11U2APF + ctdc: + dev: "test" + +backend_target: + icdc: + dev: arn:aws:elasticloadbalancing:us-east-1:{{account_id}}:targetgroup/DEV-A-Canine-commons-http-tg/654e7fcf56dc13ef + qa: arn:aws:elasticloadbalancing:us-east-1:{{account_id}}:targetgroup/QA-Ca-Canin-FM2RHRD6VVY/d293b03da689a5bb + stage: arn:aws:elasticloadbalancing:us-east-1:{{account_id}}:targetgroup/canin-Canin-1OHVRPX6UZ350/d1b608b3022efd53 + prod: arn:aws:elasticloadbalancing:us-east-1:{{account_id}}:targetgroup/PROD-Canin-3J4F8ZFL5OUQ/17dca99e24f359b3 +frontend_target: + icdc: + dev: arn:aws:elasticloadbalancing:us-east-1:{{account_id}}:targetgroup/DEV-A-Canine-commons-http-80-tg/16b915b2e610fe58 + qa: arn:aws:elasticloadbalancing:us-east-1:{{account_id}}:targetgroup/QA-Da-canine-commons-http-tg/936393a0ff307c71 + stage: arn:aws:elasticloadbalancing:us-east-1:{{account_id}}:targetgroup/STG-C-Canine-commons-http-80-tg/77be25a563e14fe2 + prod: arn:aws:elasticloadbalancing:us-east-1:{{account_id}}:targetgroup/PROD-Canin-ZFL5OUQ-http-80-tg/b418ad4b49f2b5d2 + diff --git a/ansible/roles/bento-ccdi/.travis.yml b/ansible/roles/bento-ccdi/.travis.yml new file mode 100644 index 000000000..36bbf6208 --- /dev/null +++ b/ansible/roles/bento-ccdi/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/ansible/roles/bento-ccdi/README.md b/ansible/roles/bento-ccdi/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/bento-ccdi/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/bento-ccdi/defaults/main.yml b/ansible/roles/bento-ccdi/defaults/main.yml new file mode 100644 index 000000000..fc5714050 --- /dev/null +++ b/ansible/roles/bento-ccdi/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for cicd diff --git a/ansible/roles/bento-ccdi/files/inject.template.js b/ansible/roles/bento-ccdi/files/inject.template.js new file mode 100644 index 000000000..9c7915530 --- /dev/null +++ b/ansible/roles/bento-ccdi/files/inject.template.js @@ -0,0 +1,11 @@ +window.injectedEnv = { + REACT_APP_BACKEND_GETUSERINFO_API: '${REACT_APP_BACKEND_GETUSERINFO_API}', + REACT_APP_LOGIN_URL: '${REACT_APP_LOGIN_URL}', + REACT_APP_USER_LOGOUT_URL: '${REACT_APP_USER_LOGOUT_URL}', + REACT_APP_BACKEND_API: '${REACT_APP_BACKEND_API}', + REACT_APP_ABOUT_CONTENT_URL: '${REACT_APP_ABOUT_CONTENT_URL}', + REACT_APP_BE_VERSION: '${REACT_APP_BE_VERSION}', + REACT_APP_FE_VERSION: '${REACT_APP_FE_VERSION}', + REACT_APP_APPLICATION_VERSION: '${REACT_APP_APPLICATION_VERSION}', + REACT_APP_GA_TRACKING_ID: '${REACT_APP_GA_TRACKING_ID}', +}; diff --git a/ansible/roles/bento-ccdi/files/nginx-entrypoint.sh b/ansible/roles/bento-ccdi/files/nginx-entrypoint.sh new file mode 100755 index 000000000..70875490e --- /dev/null +++ b/ansible/roles/bento-ccdi/files/nginx-entrypoint.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +WWW_DIR=/usr/share/nginx/html +INJECT_FILE_SRC="${WWW_DIR}/inject.template.js" +INJECT_FILE_DST="${WWW_DIR}/injectEnv.js" +envsubst < "${INJECT_FILE_SRC}" > "${INJECT_FILE_DST}" + +[ -z "$@" ] && nginx -g 'daemon off;' || $@ \ No newline at end of file diff --git a/ansible/roles/bento-ccdi/handlers/main.yml b/ansible/roles/bento-ccdi/handlers/main.yml new file mode 100644 index 000000000..3c94d4172 --- /dev/null +++ b/ansible/roles/bento-ccdi/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for cicd \ No newline at end of file diff --git a/ansible/roles/bento-ccdi/meta/main.yml b/ansible/roles/bento-ccdi/meta/main.yml new file mode 100644 index 000000000..227ad9c34 --- /dev/null +++ b/ansible/roles/bento-ccdi/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.9 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/ansible/roles/bento-ccdi/tasks/build.yml b/ansible/roles/bento-ccdi/tasks/build.yml new file mode 100644 index 000000000..fcb2fbbca --- /dev/null +++ b/ansible/roles/bento-ccdi/tasks/build.yml @@ -0,0 +1,180 @@ +--- +- name: set dev environment facts + set_fact: + NEO4J_IN_ip: "{{ lookup('env','NEO4J_IN_DEV_IP') }}" + bearer: "{{ lookup('env','BEARER') }}" + when: tier == "dev" + +- name: set qa environment facts + set_fact: + NEO4J_IN_ip: "{{ lookup('env','NEO4J_IN_QA_IP') }}" + bearer: "{{ lookup('env','BEARER') }}" + when: tier == "qa" + +- name: set stage environment facts + set_fact: + NEO4J_IN_ip: "{{ lookup('env','NEO4J_IN_PERF_IP') }}" + bearer: "{{ lookup('env','BEARER') }}" + when: tier == "perf" + +- name: set prod environment facts + set_fact: + NEO4J_IN_ip: "{{ lookup('env','NEO4J_IN_PROD_IP') }}" + bearer: "{{ lookup('env','BEARER') }}" + when: tier == "prod" + +- name: remove the application_example.properties file + file: + path: "{{workspace}}/src/main/resources/application_example.properties" + state: absent + +- name: copy application.properties file to /src/main/resources/ + template: + src: "{{workspace}}/src/main/resources/application.properties.j2" + dest: "{{workspace}}/src/main/resources/application.properties" + +- name: create graphql directory in backend + file: + state: directory + path: "{{workspace}}/src/main/resources/graphql" + +- name: create yaml directory in backend + file: + state: directory + path: "{{workspace}}/src/main/resources/yaml" + +- name: copy schema from frontend to resources + template: + src: "{{workspace}}/CCDI-Portal-WebPortal/graphql/{{ schema_file }}" + dest: "{{workspace}}/src/main/resources/graphql/{{ schema_file }}" + +- name: verify redis schema file exists + stat: + path: "{{workspace}}/CCDI-Portal-WebPortal/graphql/{{ redis_schema_file }}" + register: redis_schema + +- name: verify redis init queries file exists + stat: + path: "{{workspace}}/CCDI-Portal-WebPortal/yaml/{{ redis_init_queries_file }}" + register: redis_queries + +- name: verify test queries file exists + stat: + path: "{{workspace}}/CCDI-Portal-WebPortal/yaml/{{ test_queries_file }}" + register: test_queries + +- name: copy redis schema from frontend to resources + template: + src: "{{workspace}}/CCDI-Portal-WebPortal/graphql/{{ redis_schema_file }}" + dest: "{{workspace}}/src/main/resources/graphql/{{ redis_schema_file }}" + when: redis_schema.stat.exists + +- name: copy redis init queries from frontend to resources + template: + src: "{{workspace}}/CCDI-Portal-WebPortal/yaml/{{ redis_init_queries_file }}" + dest: "{{workspace}}/src/main/resources/yaml/{{ redis_init_queries_file }}" + when: redis_queries.stat.exists + +- name: copy test queries from frontend to resources + template: + src: "{{workspace}}/CCDI-Portal-WebPortal/yaml/{{ test_queries_file }}" + dest: "{{workspace}}/src/main/resources/yaml/{{ test_queries_file }}" + when: test_queries.stat.exists + +- name: build springboot code + command: mvn package -DskipTests + args: + chdir: "{{workspace}}" + +- name: copy Bento-0.0.1.war to api.war + copy: + remote_src: yes + src: "{{workspace}}/target/Bento-0.0.1.war" + dest: "{{workspace}}/target/ROOT.war" + +- name: log into DockerHub + docker_login: + username: "{{docker_user}}" + password: "{{docker_password}}" + +- name: build cbiitssrepo/{{stack_name}}-backend image + docker_image: + build: + path: "{{workspace}}" + dockerfile: "{{workspace}}/icdc-devops/docker/dockerfiles/backend-dockerfile" + pull: yes + nocache: yes + name: cbiitssrepo/{{stack_name}}-backend + tag: "{{backend_version}}-{{build_number}}" + push: yes + force_source: yes + source: build + +- name: Add tag latest to cbiitssrepo/in-backend image + docker_image: + name: "cbiitssrepo/{{stack_name}}-backend:{{backend_version}}-{{build_number}}" + repository: cbiitssrepo/{{stack_name}}-backend:latest + force_tag: yes + push: yes + source: local +########################################### +- name: remove .env + file: + state: absent + path: "{{workspace}}/CCDI-Portal-WebPortal/.env" + +- name: copy nginx conf + copy: + remote_src: yes + src: '{{workspace}}/icdc-devops/docker/dockerfiles/nginx.conf' + dest: '{{workspace}}/CCDI-Portal-WebPortal/nginx.conf' + +- name: copy entrypoint.sh to workspace + copy: + src: "nginx-entrypoint.sh" + dest: "{{workspace}}/CCDI-Portal-WebPortal/nginx-entrypoint.sh" + mode: 0755 +# - name: copy environment file to {{workspace}}/bento-frontend +# template: +# src: env.j2 +# dest: "{{workspace}}/bento-frontend/.env" +- name: run npm install in {{workspace}}/CCDI-Portal-WebPortal/ + command: "{{item}}" + args: + chdir: "{{workspace}}/CCDI-Portal-WebPortal" + loop: + # - npm install npm@7.7.6 -g + - npm install + - npm run build + +- name: copy env to dist + copy: + # remote_src: yes + src: inject.template.js + dest: "{{workspace}}/CCDI-Portal-WebPortal/dist/inject.template.js" + mode: 0755 + +- name: build cbiitssrepo/{{stack_name}}-frontend image + docker_image: + build: + path: "{{workspace}}/CCDI-Portal-WebPortal" + dockerfile: "{{workspace}}/icdc-devops/docker/dockerfiles/frontend-icdc-dockerfile" + pull: yes + nocache: yes + name: cbiitssrepo/{{stack_name}}-frontend + tag: "{{frontend_version}}-{{build_number}}" + push: yes + force_source: yes + source: build + +- name: Add tag latest to cbiitssrepo/in-frontend image + docker_image: + name: "cbiitssrepo/{{stack_name}}-frontend:{{frontend_version}}-{{build_number}}" + repository: cbiitssrepo/{{stack_name}}-frontend:latest + force_tag: yes + push: yes + source: local + + + + \ No newline at end of file diff --git a/ansible/roles/bento-ccdi/tasks/deploy.yml b/ansible/roles/bento-ccdi/tasks/deploy.yml new file mode 100644 index 000000000..24de4145e --- /dev/null +++ b/ansible/roles/bento-ccdi/tasks/deploy.yml @@ -0,0 +1,430 @@ +--- +############################################################################################################################ + +# Task Definitions + +############################################################################################################################ + +- name: create task definition - {{stack_name}}-backend + ecs_taskdefinition: + containers: + - name: backend + essential: true + image: "cbiitssrepo/{{stack_name}}-backend:{{backend_version}}" + environment: + - name: NEO4J_URL + value: bolt://{{ neo4j_ip }}:7687 + - name: NEO4J_USER + value: "{{ neo4j_user }}" + - name: NEO4J_PASSWORD + value: "{{ neo4j_password }}" + - name: NEO4J_GRAPHQL_ENDPOINT + value: http://{{ neo4j_ip }}:7474/graphql/ + - name: NEO4J_AUTHORIZATION + value: "{{ neo4j_bearer }}" + - name: BENTO_API_VERSION + value: "{{backend_version}}" + - name: REDIS_ENABLE + value: "{{ enable_redis }}" + - name: REDIS_USE_CLUSTER + value: "{{ use_cluster }}" + - name: REDIS_HOST + value: "{{ redis_host[tier] }}" + - name: REDIS_PORT + value: "{{ redis_port }}" + - name: REDIS_FILTER_ENABLE + value: "{{ enable_redis_filter }}" + - name: NEW_RELIC_LICENSE_KEY + value: "{{ newrelic_license_key }}" + # - name: NEW_RELIC_APP_NAME + # value: "{{ app_name }}-backend" + # - name: NEW_RELIC_DISTRIBUTED_TRACING_ENABLED + # value: true + # - name: NEW_RELIC_HOST + # value: "gov-collector.newrelic.com" + # - name: NEW_RELIC_LOG_FILE_NAME + # value: "STDOUT" + # - name: JAVA_OPTS + # value: "-javaagent:/usr/local/tomcat/newrelic/newrelic.jar" + # entryPoint: + # - "/bin/ash" + # - "-c" + # - 'wget "https://download.newrelic.com/newrelic/java-agent/newrelic-agent/current/newrelic-java.zip" -O newrelic-java.zip && unzip newrelic-java.zip && bin/catalina.sh run' + portMappings: + - containerPort: "8080" + hostPort: "8080" + # logConfiguration: + # logDriver: syslog + # options: + # syslog-address: tcp://{{ syslog_host }}:514 + # tag: "{{ app_name }}-backend" + # syslog-format: "rfc5424micro" + network_mode: bridge + family: "{{stack_name }}-{{tier}}-backend" + memory: '1024' + cpu: '512' + state: present + region: "{{region}}" + register: task_output + +- name: create task definition - in-frontend + ecs_taskdefinition: + containers: + - name: frontend + essential: true + image: "cbiitssrepo/{{stack_name}}-frontend:{{frontend_version}}" + environment: + - name: REACT_APP_BACKEND_API + value: "{% if tier == 'prod' %}https://{{stack_name}}.bento-tools.org/v1/graphql/{% else %}https://{{stack_name}}-{{ tier }}.bento-tools.org/v1/graphql/{% endif %}" + - name: REACT_APP_FILE_SERVICE_API + value: "{% if tier == 'prod' %}https://bento-tools.org/api/files/{% else %}https://{{ tier }}.bento-tools.org/api/files/{% endif %}" + - name: REACT_APP_BE_VERSION + value: "{{backend_version}}" + - name: REACT_APP_FE_VERSION + value: "{{frontend_version}}" + - name: REACT_APP_ABOUT_CONTENT_URL + value: "{{ backend_content_url }}" + # - name: NEW_RELIC_LICENSE_KEY + # value: "{{ newrelic_license_key }}" + # - name: NEW_RELIC_APP_NAME + # value: "{{ app_name }}-frontend" + # - name: NEW_RELIC_DISTRIBUTED_TRACING_ENABLED + # value: true + # - name: NEW_RELIC_HOST + # value: "gov-collector.newrelic.com" + # - name: NEW_RELIC_NO_CONFIG_FILE + # value: true + portMappings: + - containerPort: "80" + hostPort: "80" + # logConfiguration: + # logDriver: syslog + # options: + # syslog-address: tcp://{{ syslog_host }}:514 + # tag: "{{ app_name }}-frontend" + # syslog-format: "rfc5424micro" + network_mode: bridge + family: "{{stack_name }}-{{tier}}-frontend" + state: present + memory: '512' + cpu: '128' + region: "{{region}}" + register: task_output + +# - name: create task definition - sumo syslog +# ecs_taskdefinition: +# containers: +# - name: sumologic-syslog +# essential: true +# image: "sumologic/collector:latest-syslog" +# environment: +# - name: SUMO_COLLECTOR_NAME +# value: "{{ app_name }}-syslog" +# - name: SUMO_ACCESS_ID +# value: "{{ sumo_access_id }}" +# - name: SUMO_ACCESS_KEY +# value: "{{ sumo_access_key }}" +# - name: SUMO_COLLECTOR_NAME_PREFIX +# value: "" +# - name: SUMO_CLOBBER +# value: "true" +# portMappings: +# - containerPort: "514" +# hostPort: "514" +# network_mode: bridge +# family: bento-{{tier}}-sumo_syslog +# state: present +# memory: '512' +# cpu: '128' +# region: "{{region}}" +# register: task_output + +# - name: create task definition - sumo docker +# ecs_taskdefinition: +# containers: +# - name: sumologic-docker +# essential: true +# image: "sumologic/collector:latest" +# environment: +# - name: SUMO_COLLECTOR_NAME +# value: "{{ app_name }}-docker" +# - name: SUMO_ACCESS_ID +# value: "{{ sumo_access_id }}" +# - name: SUMO_ACCESS_KEY +# value: "{{ sumo_access_key }}" +# - name: SUMO_COLLECTOR_NAME_PREFIX +# value: "" +# - name: SUMO_CLOBBER +# value: "true" +# mountPoints: +# - containerPath: /var/run/docker.sock +# sourceVolume: docker-sock +# readOnly: true +# volumes: +# - name: docker-sock +# host: +# sourcePath: /var/run/docker.sock +# network_mode: bridge +# family: bento-{{tier}}-sumo_docker +# state: present +# memory: '512' +# cpu: '128' +# region: "{{region}}" +# register: task_output + +# - name: create task definition - newrelic docker +# ecs_taskdefinition: +# containers: +# - name: newrelic-docker +# essential: true +# image: "newrelic/infrastructure-bundle:latest" +# environment: +# - name: NRIA_LICENSE_KEY +# value: "{{ newrelic_license_key }}" +# - name: NRIA_DISPLAY_NAME +# value: "{{ app_name }}-docker" +# - name: NEW_RELIC_HOST +# value: "gov-collector.newrelic.com" +# - name: STATUS_URL +# value: "{% if tier == 'prod' %}https://bento-tools.org/nginx_status{% else %}https://{{ tier }}.bento-tools.org/nginx_status{% endif %}" +# entryPoint: +# - "/bin/ash" +# - "-c" +# - 'echo -e "integrations:\n - name: nri-nginx\n env:\n REMOTE_MONITORING: true\n METRICS: 1" > /etc/newrelic-infra/integrations.d/nginx-config.yml && /sbin/tini -- /usr/bin/newrelic-infra-service' +# mountPoints: +# - containerPath: /var/run/docker.sock +# sourceVolume: docker-sock +# readOnly: true +# - containerPath: /host +# sourceVolume: docker-host +# readOnly: true +# volumes: +# - name: docker-sock +# host: +# sourcePath: /var/run/docker.sock +# - name: docker-host +# host: +# sourcePath: / +# network_mode: bridge +# family: bento-{{tier}}-nr_docker +# state: present +# memory: '128' +# cpu: '128' +# region: "{{region}}" +# register: task_output + +############################################################################################################################ + +# Task Definition Queries + +############################################################################################################################ + +- name: query task definition - bento frontend + ecs_taskdefinition_info: + task_definition: "{{stack_name }}-{{tier}}-frontend" + region: "{{region}}" + register: task_frontend + +- name: query task definition - bento backend + ecs_taskdefinition_info: + task_definition: "{{stack_name }}-{{tier}}-backend" + region: "{{region}}" + register: task_backend + +- name: query task definition - bento file downloader + ecs_taskdefinition_info: + task_definition: "{{stack_name }}-{{tier}}-file-downloader" + region: "{{region}}" + register: task_file_downloader + + +# - name: query task definition - sumologic syslog +# ecs_taskdefinition_info: +# task_definition: bento-{{tier}}-sumo_syslog +# region: "{{region}}" +# register: task_sumo_syslog + +# - name: query task definition - sumologic docker +# ecs_taskdefinition_info: +# task_definition: bento-{{tier}}-sumo_docker +# region: "{{region}}" +# register: task_sumo_docker + +# - name: query task definition - newrelic docker +# ecs_taskdefinition_info: +# task_definition: bento-{{tier}}-nr_docker +# region: "{{region}}" +# register: task_nr_docker + +############################################################################################################################ + +# Service Queries + +############################################################################################################################ + +- name: query backend service + ecs_service_info: + cluster: "{{stack_name }}-{{tier}}" + service: "{{stack_name }}-{{tier}}-backend" + details: true + region: "{{region}}" + register: service_backend + +- name: query ecs service + ecs_service_info: + cluster: "{{stack_name }}-{{tier}}" + service: "{{stack_name }}-{{tier}}-frontend" + details: true + region: "{{region}}" + register: service_frontend + +- name: query file downloader service + ecs_service_info: + cluster: "{{stack_name }}-{{tier}}" + service: "{{stack_name }}-{{tier}}-file-downloader" + details: true + region: "{{region}}" + register: service_file_downloader + +# - name: query sumologic syslog service +# ecs_service_info: +# cluster: bento-{{tier}} +# service: bento-{{tier}}-sumo_syslog +# details: true +# region: "{{region}}" +# register: service_sumo_syslog + +# - name: query sumologic docker service +# ecs_service_info: +# cluster: bento-{{tier}} +# service: bento-{{tier}}-sumo_docker +# details: true +# region: "{{region}}" +# register: service_sumo_docker + +# - name: query newrelic docker service +# ecs_service_info: +# cluster: bento-{{tier}} +# service: bento-{{tier}}-nr_docker +# details: true +# region: "{{region}}" +# register: service_nr_docker + +############################################################################################################################ + +- name: set facts + set_fact: + frontend_url: "{% if tier == 'prod' %}https://{{stack_name}}.bento-tools.org/{% else %}https://{{stack_name}}-{{ tier }}.bento-tools.org/{% endif %}" + frontend_revision: "{{task_frontend.revision}}" + backend_revision: "{{task_backend.revision}}" + file_downloader_revision: "{{task_file_downloader.revision}}" + task_backend_name: "{{task_backend.family}}" + task_frontend_name: "{{task_frontend.family}}" + task_file_downloader_name: "{{task_file_downloader.family}}" + # task_sumo_syslog_name: "{{task_sumo_syslog.family}}" + # task_sumo_docker_name: "{{task_sumo_docker.family}}" + # task_nr_docker_name: "{{task_nr_docker.family}}" + lb_frontend: "{{service_frontend.services[0].loadBalancers}}" + lb_backend: "{{service_backend.services[0].loadBalancers}}" + lb_file_downloader: "{{service_file_downloader.services[0].loadBalancers}}" + role_arn: "{{service_backend.services[0].roleArn}}" + +- name: show + debug: + msg: "{{role_arn}}" + +############################################################################################################################ + +# Update Services + +############################################################################################################################ + +# - name: update sumologic syslog service +# ecs_service: +# state: present +# name: bento-{{tier}}-sumo_syslog +# cluster: bento-{{tier}} +# task_definition: "{{task_sumo_syslog_name}}" +# force_new_deployment: no +# deployment_configuration: +# minimum_healthy_percent: 0 +# maximum_percent: 100 +# desired_count: 1 +# region: "{{region}}" +# register: service_sumo_syslog_output + +# - name: update sumologic docker service +# ecs_service: +# state: present +# name: bento-{{tier}}-sumo_docker +# cluster: bento-{{tier}} +# task_definition: "{{task_sumo_docker_name}}" +# force_new_deployment: no +# deployment_configuration: +# minimum_healthy_percent: 0 +# maximum_percent: 100 +# desired_count: 1 +# region: "{{region}}" +# register: service_sumo_docker_output + +# - name: update newrelic docker service +# ecs_service: +# state: present +# name: bento-{{tier}}-nr_docker +# cluster: bento-{{tier}} +# task_definition: "{{task_nr_docker_name}}" +# force_new_deployment: no +# deployment_configuration: +# minimum_healthy_percent: 0 +# maximum_percent: 100 +# desired_count: 1 +# region: "{{region}}" +# register: service_nr_docker_output + +- name: update backend service + ecs_service: + state: present + name: "{{stack_name }}-{{tier}}-backend" + cluster: "{{stack_name }}-{{tier}}" + task_definition: "{{task_backend_name}}:{{backend_revision}}" + role: "{{role_arn}}" + force_new_deployment: yes + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + load_balancers: "{{ lb_backend }}" + region: "{{region}}" + register: service_backend_output + +- name: update frontend service + ecs_service: + state: present + name: "{{stack_name }}-{{tier}}-frontend" + cluster: "{{stack_name }}-{{tier}}" + task_definition: "{{task_frontend_name}}:{{frontend_revision}}" + role: "{{role_arn}}" + force_new_deployment: yes + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + load_balancers: "{{ lb_frontend }}" + region: "{{region}}" + register: service_frontend_output + +- name: update file downloader service + ecs_service: + state: present + name: "{{stack_name }}-{{tier}}-file-downloader" + cluster: "{{stack_name }}-{{tier}}" + task_definition: "{{task_file_downloader_name}}:{{file_downloader_revision}}" + role: "{{role_arn}}" + force_new_deployment: yes + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + load_balancers: "{{ lb_file_downloader }}" + region: "{{region}}" + register: service_file_downloader_output \ No newline at end of file diff --git a/terraform/icdc/modules/eip/variables.tf b/ansible/roles/bento-ccdi/tasks/main.yml similarity index 100% rename from terraform/icdc/modules/eip/variables.tf rename to ansible/roles/bento-ccdi/tasks/main.yml diff --git a/ansible/roles/bento-ccdi/tasks/redis.yml b/ansible/roles/bento-ccdi/tasks/redis.yml new file mode 100644 index 000000000..46787e625 --- /dev/null +++ b/ansible/roles/bento-ccdi/tasks/redis.yml @@ -0,0 +1,5 @@ +- name: confirm redis redis_host + debug: + msg: "{{redis_host[tier]}}" +- name: flush redis cache + shell: echo -e "get abc \nFLUSHALL ASYNC" | redis-cli -h {{ redis_host[tier]}} -p 6379 -c diff --git a/ansible/roles/bento-ccdi/templates/env.j2 b/ansible/roles/bento-ccdi/templates/env.j2 new file mode 100644 index 000000000..17c24f011 --- /dev/null +++ b/ansible/roles/bento-ccdi/templates/env.j2 @@ -0,0 +1,9 @@ +{% if tier == "prod" %} +REACT_APP_BACKEND_API=https://api.bento-tools.org/v1/graphql/ +REACT_APP_APPLICATION_VERSION={{version}} +REACT_APP_ABOUT_CONTENT_URL=https://raw.githubusercontent.com/CBIIT/bento-frontend/master/src/content/{{tier}}/aboutPagesContent.yaml +{% else %} +REACT_APP_BACKEND_API=https://api-{{tier}}.bento-tools.org/v1/graphql/ +REACT_APP_APPLICATION_VERSION={{version}} +REACT_APP_ABOUT_CONTENT_URL=https://raw.githubusercontent.com/CBIIT/bento-frontend/master/src/content/{{tier}}/aboutPagesContent.yaml +{% endif %} \ No newline at end of file diff --git a/ansible/roles/bento-ccdi/templates/nginx-config.yml.j2 b/ansible/roles/bento-ccdi/templates/nginx-config.yml.j2 new file mode 100644 index 000000000..848a0dfb3 --- /dev/null +++ b/ansible/roles/bento-ccdi/templates/nginx-config.yml.j2 @@ -0,0 +1,5 @@ +integrations: + - name: nri-nginx + env: + REMOTE_MONITORING: true + METRICS: 1 \ No newline at end of file diff --git a/ansible/roles/bento-ccdi/tests/inventory b/ansible/roles/bento-ccdi/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/bento-ccdi/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/bento-ccdi/tests/test.yml b/ansible/roles/bento-ccdi/tests/test.yml new file mode 100644 index 000000000..c9b79015b --- /dev/null +++ b/ansible/roles/bento-ccdi/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - cicd \ No newline at end of file diff --git a/ansible/roles/bento-ccdi/vars/main.yml b/ansible/roles/bento-ccdi/vars/main.yml new file mode 100644 index 000000000..0b9440634 --- /dev/null +++ b/ansible/roles/bento-ccdi/vars/main.yml @@ -0,0 +1,43 @@ +--- +# vars file for cicd +stack_name: ccdi +enable_redis: true +platform: aws +redis_host: + dev: "ccdi-dev-redis-cluster.l5vrvc.clustercfg.use1.cache.amazonaws.com:6379" + qa: "localhost" + perf: "localhost" + icdc: "localhost" + prod: "localhost" +redis_password: "" +redis_port: 6379 +#enable_redis_filter: "{{ lookup('env','ENABLE_REDIS_FILTER') }}" +use_cluster: 'false' +tier: "{{ lookup('env','TIER') }}" +version: "{{ lookup('env','VERSION') }}" +workspace: "{{ lookup('env','WORKSPACE') }}" +docker_user: "{{ lookup('env','DOCKER_USER') }}" +docker_password: "{{ lookup('env','DOCKER_PASSWORD') }}" +build_number: "{{ lookup('env','BUILD_NUMBER')}}" +region: us-east-1 +newrelic_license_key: "{{ lookup('aws_ssm', 'newrelic_license_key', region='us-east-1' ) }}" +sumo_access_id: "{{ lookup('aws_ssm', 'sumo_access_id', region='us-east-1' ) }}" +sumo_access_key: "{{ lookup('aws_ssm', 'sumo_access_key', region='us-east-1' ) }}" +syslog_host: "{{ lookup('aws_ssm', 'syslog_host', region='us-east-1' ) }}" +app_name: bento-{{platform}}-{{tier}} + +#### bento-icdc changes ######################################################################################################### +schema_file: "{% if tier == 'icdc' %}icdc.graphql{% else %}bento-extended.graphql{% endif %}" +################################################################################################################################# + +frontend_version: "{{ lookup('env','FE_VERSION') }}" +backend_version: "{{ lookup('env','BE_VERSION') }}" +bento_api_version: "{{ backend_version }}" +backend_frontend_version: "{{frontend_version }}" +backend_content_url: https://raw.githubusercontent.com/CBIIT/bento-frontend/master/src/content/{{ tier }}/aboutPagesContent.yaml +neo4j_ip: "{{ lookup('env','NEO4J_IP') }}" +neo4j_bearer: "{{ lookup('env','BEARER') }}" +neo4j_user: "{{ lookup('env','NEO4J_USER') }}" +neo4j_password: "{{ lookup('env','NEO4J_PASS') }}" + +# hostname: "{{ansible_fqdn}}" \ No newline at end of file diff --git a/ansible/roles/bento-cloud-run/.travis.yml b/ansible/roles/bento-cloud-run/.travis.yml new file mode 100644 index 000000000..36bbf6208 --- /dev/null +++ b/ansible/roles/bento-cloud-run/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/ansible/roles/bento-cloud-run/README.md b/ansible/roles/bento-cloud-run/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/bento-cloud-run/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/bento-cloud-run/defaults/main.yml b/ansible/roles/bento-cloud-run/defaults/main.yml new file mode 100644 index 000000000..9085d3dfc --- /dev/null +++ b/ansible/roles/bento-cloud-run/defaults/main.yml @@ -0,0 +1,3 @@ +--- +# defaults file for cicd +platform: aws \ No newline at end of file diff --git a/ansible/roles/bento-cloud-run/handlers/main.yml b/ansible/roles/bento-cloud-run/handlers/main.yml new file mode 100644 index 000000000..3c94d4172 --- /dev/null +++ b/ansible/roles/bento-cloud-run/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for cicd \ No newline at end of file diff --git a/ansible/roles/bento-cloud-run/meta/main.yml b/ansible/roles/bento-cloud-run/meta/main.yml new file mode 100644 index 000000000..227ad9c34 --- /dev/null +++ b/ansible/roles/bento-cloud-run/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.9 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/ansible/roles/bento-cloud-run/tasks/build.yml b/ansible/roles/bento-cloud-run/tasks/build.yml new file mode 100644 index 000000000..8c083fdae --- /dev/null +++ b/ansible/roles/bento-cloud-run/tasks/build.yml @@ -0,0 +1,142 @@ +--- +- name: install wget and curl + yum: + name: + - wget + - curl + - java-11-openjdk-devel + +- name: install maven and nodejs to build + + shell: > + wget https://www-us.apache.org/dist/maven/maven-3/{{maven_version}}/binaries/apache-maven-{{maven_version}}-bin.tar.gz -P /tmp + && tar xf /tmp/apache-maven-{{maven_version}}-bin.tar.gz -C /usr/local + && ln -f -s /usr/local/apache-maven-{{maven_version}} /usr/local/maven + && curl -sL https://rpm.nodesource.com/setup_13.x | bash - + && yum install -y nodejs + args: + warn: False + +- name: clone backend github + git: + repo: 'https://github.com/CBIIT/bento-backend' + dest: "{{workspace}}/backend" + force: yes + version: Integration + +- name: clone frontend github + git: + repo: 'https://github.com/CBIIT/bento-frontend' + dest: "{{workspace}}/frontend" + force: yes + version: integration + +# - name: gather neo4j facts +# ec2_instance_facts: +# region: "{{region}}" +# filters: +# "tag:Name": "{{stack_name}}-{{database_asg_name}}" +# "instance-state-name": running +# register: database + +- name: set neo4j bearer + shell: echo "Basic $(echo -n "neo4j:{{neo4j_password}}" | base64)" + register: output_bearer + +- name: set instance ip + set_fact: + # neo4j_ip: "{{ database.instances[0].network_interfaces[0].private_ip_address }}" + bearer: "{{output_bearer.stdout_lines | first}}" + +- name: remove the application_example.properties file + file: + path: "{{workspace}}/backend/src/main/resources/application_example.properties" + state: absent + +- name: copy application.properties file to /src/main/resources/ + template: + src: application.properties.j2 + dest: "{{workspace}}/backend/src/main/resources/application.properties" + +- name: build backend code + command: /usr/local/maven/bin/mvn package -DskipTests + args: + chdir: "{{workspace}}/backend" + +- name: copy Bento-0.0.1.war to api.war + copy: + remote_src: yes + src: "{{workspace}}/backend/target/Bento-0.0.1.war" + dest: "{{workspace}}/backend/target/ROOT.war" + +- name: copy nginx conf + copy: + remote_src: yes + src: "{{workspace}}/backend/dockerfiles/nginx.conf" + dest: "{{workspace}}/frontend/nginx.conf" + +- name: login to docker + expect: + command: gcloud auth configure-docker + responses: + Do you want to continue (Y/n)?: Y + +- name: build custodian/bento-backend image + docker_image: + build: + path: "{{workspace}}/backend" + dockerfile: "{{workspace}}/backend/dockerfiles/backend-dockerfile" + pull: yes + name: gcr.io/{{gcp_project}}/bento-backend + tag: latest + force_tag: yes + source: build + force_source: yes + push: yes + +- name: copy environment file to {{workspace}}/bento-frontend + template: + src: env.j2 + dest: "{{workspace}}/frontend/.env" + +- name: run npm install in {{workspace}}/bento-frontend/ + shell: "{{item}}" + args: + chdir: "{{workspace}}/frontend" + ignore_errors: True + loop: + - npm install + +- name: run npm build in frontend + shell: npm run-script build + args: + chdir: "{{workspace}}/frontend" + ignore_errors: True + +- name: build custodian/bento-frontend image + docker_image: + build: + path: "{{workspace}}/frontend" + dockerfile: "{{workspace}}/backend/dockerfiles/frontend-cloudrun-dockerfile" + pull: yes + nocache: yes + name: gcr.io/{{gcp_project}}/bento-frontend + tag: latest + force_tag: yes + source: build + force_source: yes + push: yes + +# - name: post schemas +# uri: +# url: http://{{neo4j_ip}}:7474/graphql/idl/ +# method: POST +# body: "{{ lookup('file','{{workspace}}/backend/src/main/resources/graphql/bento-extended.graphql') }}" +# headers: +# Accept: "application/json" +# Authorization: "{{bearer}}" +# register: schema + +# - name: schema output +# debug: +# msg: "{{schema}}" \ No newline at end of file diff --git a/terraform/icdc/modules/route-53/outputs.tf b/ansible/roles/bento-cloud-run/tasks/main.yml similarity index 100% rename from terraform/icdc/modules/route-53/outputs.tf rename to ansible/roles/bento-cloud-run/tasks/main.yml diff --git a/ansible/roles/bento-cloud-run/templates/application.properties.j2 b/ansible/roles/bento-cloud-run/templates/application.properties.j2 new file mode 100644 index 000000000..dbd24364b --- /dev/null +++ b/ansible/roles/bento-cloud-run/templates/application.properties.j2 @@ -0,0 +1,10 @@ +spring.mvc.throw-exception-if-no-handler-found=true +neo4j.graphql.endpoint=http://{{neo4j_ip}}:7474/graphql/ +neo4j.graphql.endpoint.schema_endpoint=idl/ +graphql.schema=graphql/bento-extended.graphql +neo4j.authorization={{bearer}} +spring.mvc.view.prefix=/WEB-INF/ +spring.mvc.view.suffix=.jsp +error.redirect_url=http://localhost/error.html +allow_grapqh_query = true +allow_graphql_mutation =false \ No newline at end of file diff --git a/ansible/roles/bento-cloud-run/templates/env.j2 b/ansible/roles/bento-cloud-run/templates/env.j2 new file mode 100644 index 000000000..cb72ecd20 --- /dev/null +++ b/ansible/roles/bento-cloud-run/templates/env.j2 @@ -0,0 +1,3 @@ +REACT_APP_BACKEND_API=https://bento-cloudrun-backend-rcghjjgqfa-uk.a.run.app/v1/graphql/ +REACT_APP_APPLICATION_VERSION={{version}} +REACT_APP_ABOUT_CONTENT_URL=https://raw.githubusercontent.com/CBIIT/bento-frontend/master/src/content/{{tier}}/aboutPagesContent.yaml diff --git a/ansible/roles/bento-cloud-run/tests/inventory b/ansible/roles/bento-cloud-run/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/bento-cloud-run/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/bento-cloud-run/tests/test.yml b/ansible/roles/bento-cloud-run/tests/test.yml new file mode 100644 index 000000000..c9b79015b --- /dev/null +++ b/ansible/roles/bento-cloud-run/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - cicd \ No newline at end of file diff --git a/ansible/roles/bento-cloud-run/vars/main.yml b/ansible/roles/bento-cloud-run/vars/main.yml new file mode 100644 index 000000000..040cdcd62 --- /dev/null +++ b/ansible/roles/bento-cloud-run/vars/main.yml @@ -0,0 +1,19 @@ +--- +# vars file for cicd +# tier: "{{ lookup('env','TIER') }}" +# version: "{{ lookup('env','TAG') }}" +# workspace: "{{ lookup('env','WORKSPACE') }}" +# docker_user: "{{ lookup('env','DOCKER_USER') }}" +# docker_password: "{{ lookup('env','DOCKER_PASSWORD') }}" +# build_number: "{{ lookup('env','BUILD_NUMBER')}}" +# region: us-east-1 +# newrelic_license_key: "{{ lookup('aws_ssm', 'newrelic_license_key', region='us-east-1' ) }}" +# sumo_collector_url: "{{ lookup('aws_ssm', 'sumo_collector_url', region='us-east-1' ) }}" +# app_name: bento-{{platform}}-{{tier}} +#hostname: "{{ansible_fqdn}}" + +workspace: "/tmp/workspace" +region: us-east-1 +maven_version: 3.6.3 +version: master +stack_name: bento \ No newline at end of file diff --git a/ansible/roles/bento-ctdc/.travis.yml b/ansible/roles/bento-ctdc/.travis.yml new file mode 100644 index 000000000..36bbf6208 --- /dev/null +++ b/ansible/roles/bento-ctdc/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/ansible/roles/bento-ctdc/README.md b/ansible/roles/bento-ctdc/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/bento-ctdc/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/bento-ctdc/defaults/main.yml b/ansible/roles/bento-ctdc/defaults/main.yml new file mode 100644 index 000000000..20627cd7f --- /dev/null +++ b/ansible/roles/bento-ctdc/defaults/main.yml @@ -0,0 +1,7 @@ +--- +# defaults file for cicd +platform: aws +release: fail +project: ctdc +prod: prod + diff --git a/ansible/roles/bento-ctdc/files/inject.template.js b/ansible/roles/bento-ctdc/files/inject.template.js new file mode 100644 index 000000000..302bf3a1a --- /dev/null +++ b/ansible/roles/bento-ctdc/files/inject.template.js @@ -0,0 +1,12 @@ +window.injectedEnv = { + REACT_APP_BACKEND_GETUSERINFO_API: '${REACT_APP_BACKEND_GETUSERINFO_API}', + REACT_APP_LOGIN_URL: '${REACT_APP_LOGIN_URL}', + REACT_APP_USER_LOGOUT_URL: '${REACT_APP_USER_LOGOUT_URL}', + REACT_APP_BACKEND_API: '${REACT_APP_BACKEND_API}', + REACT_APP_ABOUT_CONTENT_URL: '${REACT_APP_ABOUT_CONTENT_URL}', + REACT_APP_BE_VERSION: '${REACT_APP_BE_VERSION}', + REACT_APP_FE_VERSION: '${REACT_APP_FE_VERSION}', + REACT_APP_APPLICATION_VERSION: '${REACT_APP_APPLICATION_VERSION}', + REACT_APP_GA_TRACKING_ID: '${REACT_APP_GA_TRACKING_ID}', + REACT_APP_FILE_SERVICE_API: '${REACT_APP_FILE_SERVICE_API}', +}; diff --git a/ansible/roles/bento-ctdc/files/nginx-entrypoint.sh b/ansible/roles/bento-ctdc/files/nginx-entrypoint.sh new file mode 100644 index 000000000..70875490e --- /dev/null +++ b/ansible/roles/bento-ctdc/files/nginx-entrypoint.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +WWW_DIR=/usr/share/nginx/html +INJECT_FILE_SRC="${WWW_DIR}/inject.template.js" +INJECT_FILE_DST="${WWW_DIR}/injectEnv.js" +envsubst < "${INJECT_FILE_SRC}" > "${INJECT_FILE_DST}" + +[ -z "$@" ] && nginx -g 'daemon off;' || $@ \ No newline at end of file diff --git a/ansible/roles/bento-ctdc/handlers/main.yml b/ansible/roles/bento-ctdc/handlers/main.yml new file mode 100644 index 000000000..3c94d4172 --- /dev/null +++ b/ansible/roles/bento-ctdc/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for cicd \ No newline at end of file diff --git a/ansible/roles/bento-ctdc/meta/main.yml b/ansible/roles/bento-ctdc/meta/main.yml new file mode 100644 index 000000000..227ad9c34 --- /dev/null +++ b/ansible/roles/bento-ctdc/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.9 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/ansible/roles/bento-ctdc/tasks/build.yml b/ansible/roles/bento-ctdc/tasks/build.yml new file mode 100644 index 000000000..e31b95a67 --- /dev/null +++ b/ansible/roles/bento-ctdc/tasks/build.yml @@ -0,0 +1,205 @@ +--- +################################################################################## + +- name: get backend commit ID + shell: git rev-parse HEAD + args: + chdir: "{{workspace}}" + register: backend_tag + +- name: get frontend commit ID + shell: git rev-parse HEAD + args: + chdir: "{{workspace}}/ctdc-frontend" + register: frontend_tag + +- name: echo backend tag + debug: + msg: "{{ backend_tag.stdout_lines }}" + +- name: echo frontend tag + debug: + msg: "{{ frontend_tag.stdout_lines }}" + +################################################################################## + +- name: remove the application_example.properties file + file: + path: "{{workspace}}/src/main/resources/application_example.properties" + state: absent + +- name: copy application.properties file to /src/main/resources/ + template: + src: "{{workspace}}/src/main/resources/application.properties.j2" + dest: "{{workspace}}/src/main/resources/application.properties" + +- name: create graphql directory in backend + file: + state: directory + path: "{{workspace}}/src/main/resources/graphql" + +- name: create yaml directory in backend + file: + state: directory + path: "{{workspace}}/src/main/resources/yaml" + +- name: copy schema from frontend to resources + template: + src: "{{workspace}}/ctdc-frontend/graphql/{{ schema_file }}" + dest: "{{workspace}}/src/main/resources/graphql/{{ schema_file }}" + +- name: verify redis schema file exists + stat: + path: "{{workspace}}/ctdc-frontend/graphql/{{ redis_schema_file }}" + register: redis_schema + +- name: copy redis schema from frontend to resources + template: + src: "{{workspace}}/ctdc-frontend/graphql/{{ redis_schema_file }}" + dest: "{{workspace}}/src/main/resources/graphql/{{ redis_schema_file }}" + when: redis_schema.stat.exists + +- name: verify redis init queries file exists + stat: + path: "{{workspace}}/ctdc-frontend/yaml/{{ redis_init_queries_file }}" + register: redis_queries + +- name: copy redis init queries from frontend to resources + template: + src: "{{workspace}}/ctdc-frontend/yaml/{{ redis_init_queries_file }}" + dest: "{{workspace}}/src/main/resources/yaml/{{ redis_init_queries_file }}" + when: redis_queries.stat.exists + +- name: verify test queries file exists + stat: + path: "{{workspace}}/ctdc-frontend/yaml/{{ test_queries_file }}" + register: test_queries + +- name: copy test queries from frontend to resources + template: + src: "{{workspace}}/ctdc-frontend/yaml/{{ test_queries_file }}" + dest: "{{workspace}}/src/main/resources/yaml/{{ test_queries_file }}" + when: test_queries.stat.exists + +- name: build springboot code + command: mvn package -DskipTests + args: + chdir: "{{workspace}}" +- name: copy Bento-0.0.1.war to api.war + copy: + remote_src: yes + src: "{{workspace}}/target/Bento-0.0.1.war" + dest: "{{workspace}}/target/ROOT.war" + +- name: log into ncidockerhub + docker_login: + username: "{{docker_user}}" + password: "{{docker_password}}" + registry: https://ncidockerhub.nci.nih.gov + tls: yes + ca_cert: /local/home/commonsdocker/.docker/ca.pem + client_cert: /local/home/commonsdocker/.docker/jenkinscert.pem + client_key: /local/home/commonsdocker/.docker/jenkinskey.pem + tls_hostname: "{{tls_hostname}}" + docker_host: "{{docker_host}}" + + +- name: build cbiitssrepo/ctdc-backend image + docker_image: + build: + path: "{{workspace}}" + dockerfile: "{{workspace}}/icdc-devops/docker/dockerfiles/backend-ctdc-dockerfile" + pull: yes + nocache: yes + name: ncidockerhub.nci.nih.gov/icdc/ctdc-backend + tag: "{{backend_version}}" + push: yes + force_source: yes + source: build + tls: yes + ca_cert: /local/home/commonsdocker/.docker/ca.pem + client_cert: /local/home/commonsdocker/.docker/jenkinscert.pem + client_key: /local/home/commonsdocker/.docker/jenkinskey.pem + tls_hostname: "{{tls_hostname}}" + docker_host: "{{docker_host}}" + +- name: Add tag latest to cbiitssrepo/ctdc-backend image + docker_image: + name: "ncidockerhub.nci.nih.gov/icdc/ctdc-backend:{{backend_version}}" + repository: ncidockerhub.nci.nih.gov/icdc/ctdc-backend:latest + force_tag: yes + push: yes + source: local + tls: yes + ca_cert: /local/home/commonsdocker/.docker/ca.pem + client_cert: /local/home/commonsdocker/.docker/jenkinscert.pem + client_key: /local/home/commonsdocker/.docker/jenkinskey.pem + tls_hostname: "{{tls_hostname}}" + docker_host: "{{docker_host}}" + +- name: remove .env + file: + state: absent + path: "{{workspace}}/ctdc-frontend/.env" + +- name: copy nginx conf + copy: + remote_src: yes + src: '{{workspace}}/icdc-devops/docker/dockerfiles/nginx.conf' + dest: '{{workspace}}/ctdc-frontend/nginx.conf' + +- name: copy entrypoint.sh to workspace + copy: + src: "nginx-entrypoint.sh" + dest: "{{workspace}}/ctdc-frontend/nginx-entrypoint.sh" + mode: 0755 + +- name: run npm install in {{workspace}}/ctdc-frontend/ + command: "{{item}}" + args: + chdir: "{{workspace}}/ctdc-frontend" + loop: + - npm install npm@latest -g + - npm install + - npm run build + +- name: copy env to dist + copy: + # remote_src: yes + src: inject.template.js + dest: "{{workspace}}/ctdc-frontend/dist/inject.template.js" + mode: 0755 + +- name: build cbiitssrepo/ctdc-frontend image + docker_image: + build: + path: "{{workspace}}/ctdc-frontend" + dockerfile: "{{workspace}}/icdc-devops/docker/dockerfiles/frontend-ctdc-dockerfile" + pull: yes + nocache: yes + name: ncidockerhub.nci.nih.gov/icdc/ctdc-frontend + tag: "{{frontend_version}}" + push: yes + force_source: yes + source: build + tls: yes + ca_cert: /local/home/commonsdocker/.docker/ca.pem + client_cert: /local/home/commonsdocker/.docker/jenkinscert.pem + client_key: /local/home/commonsdocker/.docker/jenkinskey.pem + tls_hostname: "{{tls_hostname}}" + docker_host: "{{docker_host}}" + +- name: Add tag latest to cbiitssrepo/ctdc-frontend image + docker_image: + name: "ncidockerhub.nci.nih.gov/icdc/ctdc-frontend:{{frontend_version}}" + repository: ncidockerhub.nci.nih.gov/icdc/ctdc-frontend:latest + force_tag: yes + push: yes + source: local + tls: yes + ca_cert: /local/home/commonsdocker/.docker/ca.pem + client_cert: /local/home/commonsdocker/.docker/jenkinscert.pem + client_key: /local/home/commonsdocker/.docker/jenkinskey.pem + tls_hostname: "{{tls_hostname}}" + docker_host: "{{docker_host}}" + diff --git a/ansible/roles/bento-ctdc/tasks/deploy.yml b/ansible/roles/bento-ctdc/tasks/deploy.yml new file mode 100644 index 000000000..690287a31 --- /dev/null +++ b/ansible/roles/bento-ctdc/tasks/deploy.yml @@ -0,0 +1,86 @@ +--- +- name: ensure libselinux-python3 is installed + command: yum -y install libselinux-python3 + +- name: log into DockerHub + docker_login: + username: "{{ docker_user }}" + password: "{{ docker_password }}" + registry: https://ncidockerhub.nci.nih.gov + +- name: verify host + file: + path: /tmp/myworkspace + state: directory + + +################################################################### + +- name: remove frontend container + docker_container: + name: frontend + state: absent + +- name: remove backend container + docker_container: + name: backend + state: absent + +- name: clean up + shell: docker system prune -a -f + +- name: ensure log and docker directory exists + file: + path: "{{item.dest}}" + state: directory + owner: "{{item.owner}}" + loop: + - { dest: '/local/content/docker', owner: 'root' } + - { dest: '/local/content/ctdc/logs',owner: 'tomcat' } + - { dest: '/local/content/ctdc/file-downloader', owner: 'root' } + - { dest: '/local/content/ctdc/nginx', owner: 'root' } + + + +- name: update serivces and compose files + template: + src: "{{item.src}}" + dest: "{{item.dest}}" + loop: + - {src: 'app.yml.j2',dest: '/local/content/docker/app.yml'} + - {src: 'app.service.j2',dest: '/etc/systemd/system/app.service'} + - {src: 'app.timer.j2',dest: '/etc/systemd/system/app.timer'} + +- name: start frontend and backend containers + docker_compose: + project_src: /local/content/docker + files: app.yml + state: present + +- name: reload systemd + systemd: + daemon_reload: yes + +- name: enable log-agents and app.timer services + service: + name: app.timer + enabled: yes + +- name: ensure app.service is disabled + service: + name: app.service + enabled: no + + +- name: "wait for {{ frontend_url }} to become available" + uri: + url: "{{ frontend_url }}" + follow_redirects: none + method: GET + register: _result + until: ('status' in _result) and (_result.status == 200) + retries: 100 + delay: 10 + + + diff --git a/ansible/roles/bento-ctdc/tasks/fail-build.yml b/ansible/roles/bento-ctdc/tasks/fail-build.yml new file mode 100644 index 000000000..17b3b25b7 --- /dev/null +++ b/ansible/roles/bento-ctdc/tasks/fail-build.yml @@ -0,0 +1,51 @@ +- name: log into DockerHub + docker_login: + username: "{{ docker_user }}" + password: "{{ docker_password }}" + registry: https://ncidockerhub.nci.nih.gov + +# - name: remove old deployment +# docker_compose: +# project_src: /local/content/docker +# files: log-agents.yml +# state: absent + +# - name: remove old deployment +# docker_compose: +# project_src: /local/content/docker +# files: app.yml +# state: absent + +- name: check if prev_app.yml exist + stat: + path: "/local/content/docker/prev_app.yml" + register: prev_app + + +- name: rename prev_app.yml app.yml file + copy: + src: "/local/content/docker/prev_app.yml" + dest: "/local/content/docker/app.yml" + remote_src: yes + when: prev_app.stat.exists or release == "fail" + +- name: start frontend and backend containers + docker_compose: + project_src: /local/content/docker + files: app.yml + state: present + +- name: Pause to allow updates to process + pause: + seconds: 15 + +- name: "wait for {{ frontend_url }} to become available" + uri: + url: "{{ frontend_url }}" + follow_redirects: none + method: GET + register: _result + until: ('status' in _result) and (_result.status == 200) + retries: 100 + delay: 10 + diff --git a/ansible/roles/bento-ctdc/tasks/main.yml b/ansible/roles/bento-ctdc/tasks/main.yml new file mode 100644 index 000000000..e69de29bb diff --git a/ansible/roles/bento-ctdc/tasks/pass-build.yml b/ansible/roles/bento-ctdc/tasks/pass-build.yml new file mode 100644 index 000000000..6ab460219 --- /dev/null +++ b/ansible/roles/bento-ctdc/tasks/pass-build.yml @@ -0,0 +1,18 @@ + +- name: ensure /local/content/docker directory exist + file: + path: /local/content/docker + state: directory + +- name: check if app.yml exist + stat: + path: "/local/content/docker/app.yml" + register: prev_app + + +- name: save previous app.yml file + copy: + src: "/local/content/docker/app.yml" + dest: "/local/content/docker/prev_app.yml" + remote_src: yes + when: not prev_app.stat.exists or release == "pass" diff --git a/ansible/roles/bento-ctdc/tasks/stop_site.yml b/ansible/roles/bento-ctdc/tasks/stop_site.yml new file mode 100644 index 000000000..068d32ca1 --- /dev/null +++ b/ansible/roles/bento-ctdc/tasks/stop_site.yml @@ -0,0 +1,41 @@ +--- + +- name: verify host + file: + path: /tmp/myworkspace + state: directory + +- name: clean up + shell: docker system prune -a -f + +- name: ensure /local/content/docker exists + file: + path: /local/content/docker + state: directory + + +##### UPDATED TO REMOVE ALL CONTAINERS ##### +#- name: start log agents +- name: stop log agents + docker_compose: + project_src: /local/content/docker + files: log-agents.yml + #state: present + state: absent + + +#- name: start frontend and backend containers +- name: stop frontend and backend containers + docker_compose: + project_src: /local/content/docker + files: app.yml + #state: present + state: absent + +- name: get docker output + shell: docker ps -a + register: docker_out + +- name: echo docker output + debug: + msg: "{{ docker_out.stdout_lines }}" \ No newline at end of file diff --git a/ansible/roles/bento-ctdc/templates/app.service.j2 b/ansible/roles/bento-ctdc/templates/app.service.j2 new file mode 100644 index 000000000..8860dae1d --- /dev/null +++ b/ansible/roles/bento-ctdc/templates/app.service.j2 @@ -0,0 +1,19 @@ +[Unit] +Description=Docker Compose Application Service +Requires=docker.service +After=docker.service + +[Service] +Type=oneshot +RemainAfterExit=yes +WorkingDirectory=/local/content/docker + +#User=tomcat +#Group=tomcat + +ExecStart=/usr/bin/docker-compose -f app.yml up -d +ExecStop=/usr/bin/docker-compose -f app.yml down +#TimeoutStartSec=60 + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/ansible/roles/bento-ctdc/templates/app.timer.j2 b/ansible/roles/bento-ctdc/templates/app.timer.j2 new file mode 100644 index 000000000..60d0752c6 --- /dev/null +++ b/ansible/roles/bento-ctdc/templates/app.timer.j2 @@ -0,0 +1,8 @@ +[Unit] +Description=Run foo weekly and on boot + +[Timer] +OnStartupSec=120 + +[Install] +WantedBy=timers.target \ No newline at end of file diff --git a/ansible/roles/bento-ctdc/templates/app.yml.j2 b/ansible/roles/bento-ctdc/templates/app.yml.j2 new file mode 100644 index 000000000..c39799bdb --- /dev/null +++ b/ansible/roles/bento-ctdc/templates/app.yml.j2 @@ -0,0 +1,60 @@ +version: '3.4' +services: + +################################################ +# backend container +################################################ + bento-backend: + container_name: backend + image: ncidockerhub.nci.nih.gov/icdc/ctdc-backend:{{backend_version}} + environment: + NEO4J_URL: bolt://{{ neo4j_ip }}:7687 + NEO4J_USER: "{{ neo4j_user }}" + NEO4J_PASSWORD: "{{ neo4j_password }}" + NEO4J_GRAPHQL_ENDPOINT: http://{{ neo4j_ip }}:7474/graphql/ + BENTO_API_VERSION: "{{backend_version}}" + REDIS_ENABLE: "{{ enable_redis }}" + REDIS_USE_CLUSTER: "{{ use_cluster }}" + REDIS_HOST: "{{ redis_host[tier] }}" + REDIS_PORT: "{{ redis_port }}" + REDIS_FILTER_ENABLE: "{{ enable_redis_filter }}" + NEW_RELIC_LICENSE_KEY: "{{ newrelic_license_key }}" + NEW_RELIC_APP_NAME: "{{ app_name }}-backend-{{ inventory_hostname }}" + NEW_RELIC_DISTRIBUTED_TRACING_ENABLED: "true" + NEW_RELIC_HOST: "gov-collector.newrelic.com" + NEW_RELIC_LOG_FILE_NAME: "STDOUT" + JAVA_OPTS: "-javaagent:/usr/local/tomcat/newrelic/newrelic.jar" + entrypoint: ["/bin/ash", "-c", 'if [ ! -f /usr/local/tomcat/newrelic/newrelic.jar ]; then wget "https://download.newrelic.com/newrelic/java-agent/newrelic-agent/current/newrelic-java.zip" -O newrelic-java.zip && unzip newrelic-java.zip; fi && bin/catalina.sh run'] + volumes: + - /local/content/ctdc/logs:/usr/local/tomcat/logs + ports: + - "8080:8080" + restart: always + +########################################################## +# frontend container +########################################################## + + bento-frontend: + container_name: frontend + image: ncidockerhub.nci.nih.gov/icdc/ctdc-frontend:{{frontend_version}} + environment: + REACT_APP_BACKEND_GETUSERINFO_API: "{{ backend_user_info }}" + REACT_APP_LOGIN_URL: "{{ backend_fence_login }}" + REACT_APP_USER_LOGOUT_URL: "{{ backend_fence_logout }}" + REACT_APP_BACKEND_API: "{% if tier == prod %}https://trialcommons.cancer.gov/v1/graphql/{% else %}https://trialcommons-{{ tier }}.cancer.gov/v1/graphql/{% endif %}" + REACT_APP_ABOUT_CONTENT_URL: "{{ backend_content_url }}" + REACT_APP_BE_VERSION: "{{ backend_version }}" + REACT_APP_FE_VERSION: "{{ frontend_version }}" + REACT_APP_GA_TRACKING_ID: "{{ backend_google_analytics_id }}" + NEW_RELIC_LICENSE_KEY: "{{ newrelic_license_key }}" + NEW_RELIC_APP_NAME: "{{ app_name }}-frontend-{{ inventory_hostname }}" + NEW_RELIC_DISTRIBUTED_TRACING_ENABLED: "true" + NEW_RELIC_HOST: "gov-collector.newrelic.com" + NEW_RELIC_NO_CONFIG_FILE: "true" + REACT_APP_FILE_SERVICE_API: "{% if tier == prod %}https://trialcommons.cancer.gov/api/files/{% else %}https://trialcommons-{{ tier }}.cancer.gov/api/files/{% endif %}" + volumes: + - "/local/content/ctdc/nginx:/var/log/nginx" + ports: + - "80:80" + restart: always diff --git a/ansible/roles/bento-ctdc/tests/inventory b/ansible/roles/bento-ctdc/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/bento-ctdc/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/bento-ctdc/tests/test.yml b/ansible/roles/bento-ctdc/tests/test.yml new file mode 100644 index 000000000..c9b79015b --- /dev/null +++ b/ansible/roles/bento-ctdc/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - cicd \ No newline at end of file diff --git a/ansible/roles/bento-ctdc/vars/main.yml b/ansible/roles/bento-ctdc/vars/main.yml new file mode 100644 index 000000000..1590cc2d1 --- /dev/null +++ b/ansible/roles/bento-ctdc/vars/main.yml @@ -0,0 +1,64 @@ +--- +# vars file for cicd +#env_file_home: /local/content/docker +#tls_hostname: "{{ lookup('env','TLS_HOSTNAME') }}" +#fence_id: "{{ lookup('env','FENCE_ID') }}" +#home: /local/content/docker +#dev_alb_dns_name: internal-dev-a-appli-caninedata-8uhlkjyn-1830173970.us-east-1.elb.amazonaws.com +#frontend_url: "{% if tier == 'prod' %}https://caninecommons.cancer.gov/{% else %}https://caninecommons-{{ tier }}.cancer.gov/{% endif %}" + +workspace: "{{ lookup('env','WORKSPACE') }}" +tier: "{{ lookup('env','TIER') }}" +platform: "cloudone" +app_name: ctdc-{{ platform }}-{{ tier }} +region: us-east-1 + +# redis config +enable_redis: 'false' +use_cluster: 'false' +redis_host: + dev: "10.208.3.27" + qa: "10.208.9.72" + stage: "10.208.21.143" + prod: "10.208.25.49" + +es_host: "" +redis_password: "" +redis_port: 6379 + +# versions +frontend_git_tag: "{{ lookup('env','FRONTEND_GIT_TAG') }}" +frontend_version: "{{ lookup('env','FE_VERSION') }}" +backend_git_tag: "{{ lookup('env','BACKEND_GIT_TAG') }}" +backend_version: "{{ lookup('env','BE_VERSION') }}" +bento_api_version: "{{ backend_version }}" +build_number: "{{ lookup('env','BUILD_NUMBER')}}" + +# docker +docker_host: "{{ lookup('env','DOCKER_HOST') }}" +docker_user: "{{ lookup('env','DOCKER_USER') }}" +docker_password: "{{ lookup('env','DOCKER_PASSWORD') }}" +tls_hostname: "{{ lookup('env','TLS_HOSTNAME') }}" + +# neo4j +neo4j_ip: "{{ lookup('env','NEO4J_IP') }}" +neo4j_user: "{{ lookup('env','NEO4J_USER') }}" +neo4j_password: "{{ lookup('env','NEO4J_PASS') }}" + +# monitoring +newrelic_license_key: "{{ lookup('env','NEWRELIC_LIC_KEY')}}" +sumo_access_id: "{{ lookup('env','SUMO_ACCESS_ID')}}" +sumo_access_key: "{{ lookup('env','SUMO_ACCESS_KEY')}}" +syslog_host: "{{ ansible_default_ipv4.address|default(ansible_all_ipv4_addresses[0]) }}" + +# backend +schema_file: ctdc.graphql +backend_user_info: https://caninecommons.cancer.gov/fence/login/ +backend_fence_login: https://nci-crdc-staging.datacommons.io/user/oauth2/authorize?client_id={{ fence_id }}&response_type=code&redirect_uri=https%3A%2F%2Fcaninecommons.cancer.gov%2F&scope=openid%20user +backend_fence_logout: https://caninecommons.cancer.gov/fence/logout +backend_content_url: "{% if tier == prod %}https://raw.githubusercontent.com/CBIIT/ctdc-frontend/master/src/content/prod/aboutPagesContent.yaml{% else %}https://raw.githubusercontent.com/CBIIT/ctdc-frontend/master/src/content/pre-prod/aboutPagesContent.yaml{% endif %}" +indexd_url: "{{ indexd_url }}" +backend_google_analytics_id: UA-154442677-1 + +#frontend url +frontend_url: "{% if tier == 'prod' %}https://https://trialcommons.cancer.gov//{% else %}https://trialcommons-{{ tier }}.cancer.gov/{% endif %}" \ No newline at end of file diff --git a/ansible/roles/bento-demo/.travis.yml b/ansible/roles/bento-demo/.travis.yml new file mode 100644 index 000000000..36bbf6208 --- /dev/null +++ b/ansible/roles/bento-demo/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/ansible/roles/bento-demo/README.md b/ansible/roles/bento-demo/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/bento-demo/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/bento-demo/defaults/main.yml b/ansible/roles/bento-demo/defaults/main.yml new file mode 100644 index 000000000..9085d3dfc --- /dev/null +++ b/ansible/roles/bento-demo/defaults/main.yml @@ -0,0 +1,3 @@ +--- +# defaults file for cicd +platform: aws \ No newline at end of file diff --git a/ansible/roles/bento-demo/handlers/main.yml b/ansible/roles/bento-demo/handlers/main.yml new file mode 100644 index 000000000..3c94d4172 --- /dev/null +++ b/ansible/roles/bento-demo/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for cicd \ No newline at end of file diff --git a/ansible/roles/bento-demo/meta/main.yml b/ansible/roles/bento-demo/meta/main.yml new file mode 100644 index 000000000..227ad9c34 --- /dev/null +++ b/ansible/roles/bento-demo/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.9 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/ansible/roles/bento-demo/tasks/build.yml b/ansible/roles/bento-demo/tasks/build.yml new file mode 100644 index 000000000..4e267fac4 --- /dev/null +++ b/ansible/roles/bento-demo/tasks/build.yml @@ -0,0 +1,83 @@ +--- +- name: set demo environment facts + set_fact: + neo4j_ip: "{{ lookup('env','NEO4J_DEMO_IP') }}" + bearer: "{{ lookup('env','BEARER') }}" + when: tier == "demo" + +- name: copy environment file to {{workspace}}/bento-frontend + template: + src: env.j2 + dest: "{{workspace}}/src/main/frontend/.env" + +- name: run npm install in {{workspace}}/src/main/frontend/ + command: "{{item}}" + args: + chdir: "{{workspace}}/src/main/frontend/" + loop: + - npm install + - npm install --save https://github.com/skiran86/mui-custom-datatables/tarball/master + +- name: run npm install and build in {{workspace}}/src/main/frontend/node_modules/mui-custom-datatables + command: "{{item}}" + args: + chdir: "{{workspace}}/src/main/frontend/node_modules/mui-custom-datatables" + loop: + - npm install + - npm run build + +- name: run npm build in frontend + command: npm run-script build + args: + chdir: "{{workspace}}/src/main/frontend" + +- name: copy application.properties file to /src/main/resources/ + template: + src: application.properties.j2 + dest: "{{workspace}}/src/main/resources/application.properties" + +- name: build springboot code + command: mvn package -DskipTests + args: + chdir: "{{workspace}}" + +- name: copy ICDC-0.0.1.war to ROOT.war + copy: + remote_src: yes + src: "{{workspace}}/target/ICDC-0.0.1.war" + dest: "{{workspace}}/target/ROOT.war" + + +- name: log into DockerHub + docker_login: + username: "{{docker_user}}" + password: "{{docker_password}}" + +- name: build cbiitssrepo/icdc-demo image + docker_image: + build: + path: "{{workspace}}" + dockerfile: "{{workspace}}/icdc-devops/docker/backend-dockerfile" + pull: yes + nocache: yes + name: cbiitssrepo/icdc-demo + tag: "{{build_number}}" + push: yes + source: build + + +- name: post schemas + uri: + url: http://{{neo4j_ip}}:7474/graphql/idl/ + method: POST + body: "{{ lookup('file','{{workspace}}/src/main/resources/graphql/icdc.graphql') }}" + headers: + Accept: "application/json" + Authorization: "{{bearer}}" + register: schema + +- name: schema output + debug: + msg: "{{schema}}" + + \ No newline at end of file diff --git a/ansible/roles/bento-demo/tasks/deploy.yml b/ansible/roles/bento-demo/tasks/deploy.yml new file mode 100644 index 000000000..66e3c1c4e --- /dev/null +++ b/ansible/roles/bento-demo/tasks/deploy.yml @@ -0,0 +1,70 @@ +--- +- name: create task definition + ecs_taskdefinition: + containers: + - name: demo + essential: true + image: "cbiitssrepo/icdc-demo:{{build_number}}" + environment: + - name: NEW_RELIC_LICENSE_KEY + value: "{{ newrelic_license_key }}" + - name: NEW_RELIC_APP_NAME + value: "{{ app_name }}" + - name: NEW_RELIC_DISTRIBUTED_TRACING_ENABLED + value: true + - name: NEW_RELIC_HOST + value: "gov-collector.newrelic.com" + portMappings: + - containerPort: "8080" + hostPort: "8080" + # logConfiguration: + # logDriver: sumologic + # options: + # sumo-url: "{{sumo_collector_url}}" + # sumo-source-category: "{{tier}}/{{platform}}/bento/api/logs" + # sumo-source-name: "bento-{{platform}}-api-docker-{{tier}}" + network_mode: bridge + family: bento-{{tier}} + memory: '1024' + cpu: '512' + state: present + region: "{{region}}" + register: task_output + + +- name: query task definition + ecs_taskdefinition_info: + task_definition: bento-{{tier}} + region: "{{region}}" + register: task_demo + +- name: query demo service + ecs_service_info: + cluster: bento-{{tier}} + service: bento-{{tier}} + details: true + region: "{{region}}" + register: service_demo + +- name: set facts + set_fact: + demo_revision: "{{task_demo.revision}}" + task_demo_name: "{{task_demo.family}}" + lb_demo: "{{service_demo.services[0].loadBalancers}}" + role_arn: "{{service_demo.services[0].roleArn}}" + +- name: update demo service + ecs_service: + state: present + name: bento-{{tier}} + cluster: bento-{{tier}} + task_definition: "{{task_demo_name}}:{{demo_revision}}" + role: "{{role_arn}}" + force_new_deployment: yes + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 2 + load_balancers: "{{ lb_demo }}" + region: "{{region}}" + diff --git a/ansible/roles/bento-demo/tasks/main.yml b/ansible/roles/bento-demo/tasks/main.yml new file mode 100644 index 000000000..e69de29bb diff --git a/ansible/roles/bento-demo/templates/application.properties.j2 b/ansible/roles/bento-demo/templates/application.properties.j2 new file mode 100644 index 000000000..d75fa1487 --- /dev/null +++ b/ansible/roles/bento-demo/templates/application.properties.j2 @@ -0,0 +1,26 @@ +spring.mvc.throw-exception-if-no-handler-found=true +spring.data.neo4j.username={{neo4j_user}} +spring.data.neo4j.password={{neo4j_ip}} +neo4j.jdbc.server=jdbc:neo4j:bolt://{{neo4j_ip}} +graphql.schema=graphql/person.graphqls, graphql/icdc.graphqls +neo4j.graphql.endpoint=http://{{neo4j_ip}}:7474/graphql/ +neo4j.java.driver.server=bolt://{{neo4j_ip}}:7687 +neo4j.authorization={{bearer}} +spring.mvc.view.prefix=/WEB-INF/ +spring.mvc.view.suffix=.jsp +error.redirect_url=http://localhost/error.html +api.version=v1 +session.timeout=30 +data.model.version = 1 +allow_grapqh_query = true +allow_graphql_mutation =false +fence.client_id={{fence_id}} +fence.client_credential={{fence_credential}} +fence.redirect_url=https://{{fence_url}}/ +fence.url=https://nci-crdc-staging.datacommons.io/ +fence.exchange_token_url=https://nci-crdc-staging.datacommons.io/user/oauth2/token +fence.log_out_url = https://nci-crdc-staging.datacommons.io/user/logout +neo4j_query.getversion= query { numberOfStudies }; +graphql_api.version = 1.0.0 +rest_api.version =1.0.0 +front_end.version =1.0.0 \ No newline at end of file diff --git a/ansible/roles/bento-demo/templates/env.j2 b/ansible/roles/bento-demo/templates/env.j2 new file mode 100644 index 000000000..7db771b8c --- /dev/null +++ b/ansible/roles/bento-demo/templates/env.j2 @@ -0,0 +1,6 @@ +REACT_APP_BACKEND_GETUSERINFO_API=https://{{demo_url}}/fence/login/ +REACT_APP_LOGIN_URL=https://nci-crdc-staging.datacommons.io/user/oauth2/authorize?client_id=82pslYFJqA7auRvKYfTOK67jzQAMb8f6C33tlmZz&response_type=code&redirect_uri=https%3A%2F%2Fcaninecommons-demo.cancer.gov%2F&scope=openid%20user +REACT_APP_USER_LOGOUT_URL=https://{{demo_url}}/fence/logout +REACT_APP_BACKEND_API=https://{{demo_url}}/v1/graphql/ +REACT_APP_APPLICATION_VERSION={{version}} +REACT_APP_ABOUT_CONTENT_URL=https://raw.githubusercontent.com/CBIIT/icdc-codebase/master/src/main/frontend/src/content/dev/aboutPagesContent.yaml \ No newline at end of file diff --git a/ansible/roles/bento-demo/tests/inventory b/ansible/roles/bento-demo/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/bento-demo/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/bento-demo/tests/test.yml b/ansible/roles/bento-demo/tests/test.yml new file mode 100644 index 000000000..c9b79015b --- /dev/null +++ b/ansible/roles/bento-demo/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - cicd \ No newline at end of file diff --git a/ansible/roles/bento-demo/vars/main.yml b/ansible/roles/bento-demo/vars/main.yml new file mode 100644 index 000000000..d8f254168 --- /dev/null +++ b/ansible/roles/bento-demo/vars/main.yml @@ -0,0 +1,19 @@ +--- +# vars file for cicd +tier: "{{ lookup('env','TIER') }}" +version: "{{ lookup('env','VERSION') }}" +workspace: "{{ lookup('env','WORKSPACE') }}" +docker_user: "{{ lookup('env','DOCKER_USER') }}" +docker_password: "{{ lookup('env','DOCKER_PASSWORD') }}" +build_number: "{{ lookup('env','BUILD_NUMBER')}}" +region: us-east-1 +newrelic_license_key: "{{ lookup('aws_ssm', 'newrelic_license_key', region='us-east-1' ) }}" +sumo_collector_url: "{{ lookup('aws_ssm', 'sumo_collector_url', region='us-east-1' ) }}" +app_name: bento-{{platform}}-{{tier}} +neo4j_password: "{{ lookup('env','NEO4J_PASSWORD') }}" +neo4j_user: "{{ lookup('env','NEO4J_USER') }}" +fence_id: "{{ lookup('env','FENCE_ID') }}" +fence_credential: "{{ lookup('env','FENCE_CREDENTIAL') }}" +fence_url: "{{ lookup('env','FENCE_URL') }}" +demo_url: "{{lookup('env','DEMO_URL')}}" +# hostname: "{{ansible_fqdn}}" \ No newline at end of file diff --git a/ansible/roles/bento-docs/.travis.yml b/ansible/roles/bento-docs/.travis.yml new file mode 100644 index 000000000..36bbf6208 --- /dev/null +++ b/ansible/roles/bento-docs/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/ansible/roles/bento-docs/README.md b/ansible/roles/bento-docs/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/bento-docs/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/bento-docs/defaults/main.yml b/ansible/roles/bento-docs/defaults/main.yml new file mode 100644 index 000000000..fc5714050 --- /dev/null +++ b/ansible/roles/bento-docs/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for cicd diff --git a/ansible/roles/bento-docs/handlers/main.yml b/ansible/roles/bento-docs/handlers/main.yml new file mode 100644 index 000000000..3c94d4172 --- /dev/null +++ b/ansible/roles/bento-docs/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for cicd \ No newline at end of file diff --git a/ansible/roles/bento-docs/meta/main.yml b/ansible/roles/bento-docs/meta/main.yml new file mode 100644 index 000000000..227ad9c34 --- /dev/null +++ b/ansible/roles/bento-docs/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.9 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/ansible/roles/bento-docs/tasks/build.yml b/ansible/roles/bento-docs/tasks/build.yml new file mode 100644 index 000000000..a1904ac5a --- /dev/null +++ b/ansible/roles/bento-docs/tasks/build.yml @@ -0,0 +1,60 @@ +--- +#- name: get python version +# shell: "python3 --version" +# args: +# chdir: "{{ workspace }}" +# register: python_ver + +#- name: echo python version +# debug: +# msg: "{{ python_ver.stdout_lines }}" + +- name: clone docs repo + shell: git clone https://github.com/CBIIT/bento-docs.git + args: + chdir: "{{ workspace }}" + +- name: get repo commit ID + shell: git rev-parse HEAD + args: + chdir: "{{ workspace }}/bento-docs" + register: repo_commit + +- name: echo repo commit + debug: + msg: "{{ repo_commit.stdout_lines }}" + +- name: get additional branches + shell: git branch -r | grep -v '\->' | while read remote; do git branch --track "${remote#origin/}" "$remote"; done + args: + chdir: "{{ workspace }}/bento-docs" + +- name: pull all branches + shell: git pull + args: + chdir: "{{ workspace }}/bento-docs" + +- name: get python requirements + shell: "pip3 install -r requirements.txt" + args: + chdir: "{{ workspace }}/bento-docs" + +- name: build latest docs + shell: "sphinx-multiversion source docs" + args: + chdir: "{{ workspace }}/bento-docs" + register: sphinx_out + +- name: echo sphinx output + debug: + msg: "{{ sphinx_out.stdout_lines }}" + +- name: show changes in git status + shell: "git status -s" + args: + chdir: "{{ workspace }}/bento-docs/docs" + register: git_status + +- name: echo git status + debug: + msg: "{{ git_status.stdout_lines }}" \ No newline at end of file diff --git a/ansible/roles/bento-docs/tests/inventory b/ansible/roles/bento-docs/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/bento-docs/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/bento-docs/tests/test.yml b/ansible/roles/bento-docs/tests/test.yml new file mode 100644 index 000000000..c9b79015b --- /dev/null +++ b/ansible/roles/bento-docs/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - cicd \ No newline at end of file diff --git a/ansible/roles/bento-docs/vars/main.yml b/ansible/roles/bento-docs/vars/main.yml new file mode 100644 index 000000000..97e7fe0c0 --- /dev/null +++ b/ansible/roles/bento-docs/vars/main.yml @@ -0,0 +1,3 @@ +--- +# vars file for cicd +workspace: "{{ lookup('env','WORKSPACE') }}" diff --git a/ansible/roles/bento-file-downloader/.travis.yml b/ansible/roles/bento-file-downloader/.travis.yml new file mode 100644 index 000000000..36bbf6208 --- /dev/null +++ b/ansible/roles/bento-file-downloader/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/ansible/roles/bento-file-downloader/README.md b/ansible/roles/bento-file-downloader/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/bento-file-downloader/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/bento-file-downloader/defaults/main.yml b/ansible/roles/bento-file-downloader/defaults/main.yml new file mode 100644 index 000000000..fc5714050 --- /dev/null +++ b/ansible/roles/bento-file-downloader/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for cicd diff --git a/ansible/roles/bento-file-downloader/files/inject.template.js b/ansible/roles/bento-file-downloader/files/inject.template.js new file mode 100644 index 000000000..9c7915530 --- /dev/null +++ b/ansible/roles/bento-file-downloader/files/inject.template.js @@ -0,0 +1,11 @@ +window.injectedEnv = { + REACT_APP_BACKEND_GETUSERINFO_API: '${REACT_APP_BACKEND_GETUSERINFO_API}', + REACT_APP_LOGIN_URL: '${REACT_APP_LOGIN_URL}', + REACT_APP_USER_LOGOUT_URL: '${REACT_APP_USER_LOGOUT_URL}', + REACT_APP_BACKEND_API: '${REACT_APP_BACKEND_API}', + REACT_APP_ABOUT_CONTENT_URL: '${REACT_APP_ABOUT_CONTENT_URL}', + REACT_APP_BE_VERSION: '${REACT_APP_BE_VERSION}', + REACT_APP_FE_VERSION: '${REACT_APP_FE_VERSION}', + REACT_APP_APPLICATION_VERSION: '${REACT_APP_APPLICATION_VERSION}', + REACT_APP_GA_TRACKING_ID: '${REACT_APP_GA_TRACKING_ID}', +}; diff --git a/ansible/roles/bento-file-downloader/files/nginx-entrypoint.sh b/ansible/roles/bento-file-downloader/files/nginx-entrypoint.sh new file mode 100755 index 000000000..70875490e --- /dev/null +++ b/ansible/roles/bento-file-downloader/files/nginx-entrypoint.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +WWW_DIR=/usr/share/nginx/html +INJECT_FILE_SRC="${WWW_DIR}/inject.template.js" +INJECT_FILE_DST="${WWW_DIR}/injectEnv.js" +envsubst < "${INJECT_FILE_SRC}" > "${INJECT_FILE_DST}" + +[ -z "$@" ] && nginx -g 'daemon off;' || $@ \ No newline at end of file diff --git a/ansible/roles/bento-file-downloader/handlers/main.yml b/ansible/roles/bento-file-downloader/handlers/main.yml new file mode 100644 index 000000000..3c94d4172 --- /dev/null +++ b/ansible/roles/bento-file-downloader/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for cicd \ No newline at end of file diff --git a/ansible/roles/bento-file-downloader/meta/main.yml b/ansible/roles/bento-file-downloader/meta/main.yml new file mode 100644 index 000000000..227ad9c34 --- /dev/null +++ b/ansible/roles/bento-file-downloader/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.9 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/ansible/roles/bento-file-downloader/tasks/build.yml b/ansible/roles/bento-file-downloader/tasks/build.yml new file mode 100644 index 000000000..952ce3b36 --- /dev/null +++ b/ansible/roles/bento-file-downloader/tasks/build.yml @@ -0,0 +1,30 @@ +--- +################################################################################################# + +- name: log into DockerHub + docker_login: + username: "{{docker_user}}" + password: "{{docker_password}}" + +- name: build cbiitssrepo/{{project}}-filedownloader image + docker_image: + build: + path: "{{workspace}}/bento-files" + dockerfile: "{{workspace}}/icdc-devops/docker/dockerfiles/filedownload-dockerfile" + pull: yes + nocache: yes + name: cbiitssrepo/{{project}}-filedownloader + tag: "{{downloader_version}}-{{build_number}}" + push: yes + force_source: yes + source: build + +- name: Add tag latest to cbiitssrepo/{{project}}-filedownloader image + docker_image: + name: "cbiitssrepo/{{project}}-filedownloader:{{downloader_version}}-{{build_number}}" + repository: cbiitssrepo/{{project}}-filedownloader:latest + force_tag: yes + push: yes + source: local + + \ No newline at end of file diff --git a/ansible/roles/bento-file-downloader/tasks/deploy.yml b/ansible/roles/bento-file-downloader/tasks/deploy.yml new file mode 100644 index 000000000..23f3cd6a2 --- /dev/null +++ b/ansible/roles/bento-file-downloader/tasks/deploy.yml @@ -0,0 +1,124 @@ +--- +############################################################################################################################ + +# Task Definitions + +############################################################################################################################ + +- name: get distribution domain name + command: aws cloudfront list-distributions --query "DistributionList.Items[?Origins.Items[?Id=='{{cloudfront_origin_id}}']].DomainName |[0]" --output text + register: dist_info + +- name: retrieve key group id + command: aws cloudfront list-public-keys --query "PublicKeyList.Items[?Name=='{{cloudfront_key_group_name}}'].Id |[0]" --output text --region us-east-1 + register: key_group + +- name: set facts distribution DomainName and key_group id + set_fact: + cloudfront_domain_name: "{{dist_info.stdout_lines | map('trim') | list |first}}" + cloudfront_key_group_id: "{{key_group.stdout_lines | map('trim') | list |first}}" + +- name: show cloudfront_key_group_name + debug: + msg: "{{cloudfront_key_group_id}}" + +- name: create task definition - {{project}} filedownloader + ecs_taskdefinition: + containers: + - name: downloader + essential: true + image: "cbiitssrepo/{{project}}-filedownloader:{{downloader_version}}" + environment: + - name: VERSION + value: "{{downloader_version}}" + - name: DATE + value: "{{ansible_date_time.date}}" + - name: PROJECT + value: "{{project}}" + - name: CF_URL + value: https://{{cloudfront_domain_name}} + - name: CF_PRIVATE_KEY + value: "{{cloudfront_private_key}}" + - name: CF_KEY_PAIR_ID + value: "{{cloudfront_key_group_id}}" + - name: URL_SRC + value: "{{url_src}}" + - name: BACKEND_URL + value: "{% if tier == 'prod' %}https://{{stack_name}}.bento-tools.org/v1/graphql/{% else %}https://{{stack_name}}-{{ tier }}.bento-tools.org/v1/graphql/{% endif %}" + - name: NEW_RELIC_LICENSE_KEY + value: "{{ newrelic_license_key }}" + - name: NEW_RELIC_APP_NAME + value: "{{stack_name}}-{{tier}}-file-downloader-{{ansible_hostname}}" + portMappings: + - containerPort: "8081" + hostPort: "8081" + logConfiguration: + logDriver: syslog + options: + syslog-address: tcp://{{ syslog_host }}:514 + tag: "{{ app_name }}-file-downloader" + syslog-format: "rfc5424micro" + network_mode: bridge + family: "{{project}}-{{tier}}-file-downloader" + state: present + memory: '128' + cpu: '128' + region: "{{region}}" + register: task_output + +############################################################################################################################ + +# Task Definition Queries + +############################################################################################################################ + +- name: query task definition - bento filedownloader + ecs_taskdefinition_info: + task_definition: "{{project}}-{{tier}}-file-downloader" + region: "{{region}}" + register: task_downloader + +############################################################################################################################ + +# Service Queries + +############################################################################################################################ + +- name: query filedownloader service + ecs_service_info: + cluster: "{{project}}-{{tier}}" + service: "{{project}}-{{tier}}-file-downloader" + details: true + region: "{{region}}" + register: service_downloader + +############################################################################################################################ + +- name: set facts + set_fact: + downloader_revision: "{{task_downloader.revision}}" + task_downloader_name: "{{task_downloader.family}}" + lb_downloader: "{{service_downloader.services[0].loadBalancers}}" + role_arn: "{{service_downloader.services[0].roleArn}}" + +############################################################################################################################ + +# Update Services + +############################################################################################################################ + +- name: update filedownloader service + ecs_service: + state: present + name: "{{project}}-{{tier}}-file-downloader" + cluster: "{{project}}-{{tier}}" + task_definition: "{{task_downloader_name}}:{{downloader_revision}}" + role: "{{role_arn}}" + force_new_deployment: yes + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + load_balancers: "{{ lb_downloader}}" + region: "{{region}}" + register: service_downloader_output diff --git a/ansible/roles/bento-file-downloader/tasks/main.yml b/ansible/roles/bento-file-downloader/tasks/main.yml new file mode 100644 index 000000000..e69de29bb diff --git a/ansible/roles/bento-file-downloader/templates/env.j2 b/ansible/roles/bento-file-downloader/templates/env.j2 new file mode 100644 index 000000000..17c24f011 --- /dev/null +++ b/ansible/roles/bento-file-downloader/templates/env.j2 @@ -0,0 +1,9 @@ +{% if tier == "prod" %} +REACT_APP_BACKEND_API=https://api.bento-tools.org/v1/graphql/ +REACT_APP_APPLICATION_VERSION={{version}} +REACT_APP_ABOUT_CONTENT_URL=https://raw.githubusercontent.com/CBIIT/bento-frontend/master/src/content/{{tier}}/aboutPagesContent.yaml +{% else %} +REACT_APP_BACKEND_API=https://api-{{tier}}.bento-tools.org/v1/graphql/ +REACT_APP_APPLICATION_VERSION={{version}} +REACT_APP_ABOUT_CONTENT_URL=https://raw.githubusercontent.com/CBIIT/bento-frontend/master/src/content/{{tier}}/aboutPagesContent.yaml +{% endif %} \ No newline at end of file diff --git a/ansible/roles/bento-file-downloader/templates/nginx-config.yml.j2 b/ansible/roles/bento-file-downloader/templates/nginx-config.yml.j2 new file mode 100644 index 000000000..848a0dfb3 --- /dev/null +++ b/ansible/roles/bento-file-downloader/templates/nginx-config.yml.j2 @@ -0,0 +1,5 @@ +integrations: + - name: nri-nginx + env: + REMOTE_MONITORING: true + METRICS: 1 \ No newline at end of file diff --git a/ansible/roles/bento-file-downloader/tests/inventory b/ansible/roles/bento-file-downloader/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/bento-file-downloader/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/bento-file-downloader/tests/test.yml b/ansible/roles/bento-file-downloader/tests/test.yml new file mode 100644 index 000000000..c9b79015b --- /dev/null +++ b/ansible/roles/bento-file-downloader/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - cicd \ No newline at end of file diff --git a/ansible/roles/bento-file-downloader/vars/main.yml b/ansible/roles/bento-file-downloader/vars/main.yml new file mode 100644 index 000000000..932dc0a25 --- /dev/null +++ b/ansible/roles/bento-file-downloader/vars/main.yml @@ -0,0 +1,21 @@ +--- +# vars file for cicd +url_src: CLOUD_FRONT +stack_name: "{{project}}" +platform: aws +project: "{{project}}" +tier: "{{ lookup('env','TIER') }}" +workspace: "{{ lookup('env','WORKSPACE') }}" +docker_user: "{{ lookup('env','DOCKER_USER') }}" +docker_password: "{{ lookup('env','DOCKER_PASSWORD') }}" +build_number: "{{ lookup('env','BUILD_NUMBER')}}" +region: us-east-1 +newrelic_license_key: "{{ lookup('aws_ssm', 'newrelic_license_key', region='us-east-1' ) }}" +sumo_access_id: "{{ lookup('aws_ssm', 'sumo_access_id', region='us-east-1' ) }}" +sumo_access_key: "{{ lookup('aws_ssm', 'sumo_access_key', region='us-east-1' ) }}" +syslog_host: "{{ lookup('aws_ssm', 'syslog_host', region='us-east-1' ) }}" +app_name: "{{project}}-{{platform}}-{{tier}}" +downloader_version: "{{ lookup('env','DOWNLOADER_VERSION') }}" +cloudfront_origin_id: "{{project}}_files_origin_id" +cloudfront_key_group_name: "{{project}}-{{tier}}-pub-key" +cloudfront_private_key: "{{ lookup('aws_ssm', 'bento_files_cloudfront_private_key', region='us-east-1' ) }}" diff --git a/ansible/roles/bento-gdc/.travis.yml b/ansible/roles/bento-gdc/.travis.yml new file mode 100644 index 000000000..36bbf6208 --- /dev/null +++ b/ansible/roles/bento-gdc/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/ansible/roles/bento-gdc/README.md b/ansible/roles/bento-gdc/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/bento-gdc/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/bento-gdc/defaults/main.yml b/ansible/roles/bento-gdc/defaults/main.yml new file mode 100644 index 000000000..691ff5448 --- /dev/null +++ b/ansible/roles/bento-gdc/defaults/main.yml @@ -0,0 +1,7 @@ +--- +# defaults file for cicd +platform: aws +redis_host: + gdc: "bento-gdc-redis-cluster.l5vrvc.clustercfg.use1.cache.amazonaws.com" +redis_password: "" +redis_port: 6379 \ No newline at end of file diff --git a/ansible/roles/bento-gdc/handlers/main.yml b/ansible/roles/bento-gdc/handlers/main.yml new file mode 100644 index 000000000..3c94d4172 --- /dev/null +++ b/ansible/roles/bento-gdc/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for cicd \ No newline at end of file diff --git a/ansible/roles/bento-gdc/meta/main.yml b/ansible/roles/bento-gdc/meta/main.yml new file mode 100644 index 000000000..227ad9c34 --- /dev/null +++ b/ansible/roles/bento-gdc/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.9 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/ansible/roles/bento-gdc/tasks/build.yml b/ansible/roles/bento-gdc/tasks/build.yml new file mode 100644 index 000000000..7a453e6b0 --- /dev/null +++ b/ansible/roles/bento-gdc/tasks/build.yml @@ -0,0 +1,109 @@ +--- +- name: set dev environment facts + set_fact: + neo4j_ip: "{{ lookup('env','NEO4J_GDC_IP') }}" + bearer: "{{ lookup('env','BEARER') }}" + when: tier == "gdc" + +- name: set qa environment facts + set_fact: + neo4j_ip: "{{ lookup('env','NEO4J_QA_IP') }}" + bearer: "{{ lookup('env','BEARER') }}" + when: tier == "qa" + +- name: set stage environment facts + set_fact: + neo4j_ip: "{{ lookup('env','NEO4J_STAGE_IP') }}" + bearer: "{{ lookup('env','BEARER') }}" + when: tier == "stage" + +- name: set prod environment facts + set_fact: + neo4j_ip: "{{ lookup('env','NEO4J_PROD_IP') }}" + bearer: "{{ lookup('env','BEARER') }}" + when: tier == "prod" + +- name: remove the application_example.properties file + file: + path: "{{workspace}}/src/main/resources/application_example.properties" + state: absent + +- name: copy application.properties file to /src/main/resources/ + template: + src: "{{workspace}}/src/main/resources/application.properties.j2" + dest: "{{workspace}}/src/main/resources/application.properties" + +- name: build springboot code + command: mvn package -DskipTests + args: + chdir: "{{workspace}}" +- name: copy Bento-0.0.1.war to api.war + copy: + remote_src: yes + src: "{{workspace}}/target/Bento-0.0.1.war" + dest: "{{workspace}}/target/ROOT.war" + +- name: log into DockerHub + docker_login: + username: "{{docker_user}}" + password: "{{docker_password}}" + +- name: build cbiitssrepo/bento-backend image + docker_image: + build: + path: "{{workspace}}" + dockerfile: "./dockerfiles/backend-dockerfile" + pull: yes + nocache: yes + name: cbiitssrepo/bento-backend + tag: "{{build_number}}" + push: yes + force_source: yes + source: build + +- name: Add tag latest to cbiitssrepo/bento-backend image + docker_image: + name: "cbiitssrepo/bento-backend:{{build_number}}" + repository: cbiitssrepo/bento-backend:latest + force_tag: yes + push: yes + source: local + +- name: copy environment file to {{workspace}}/bento-frontend + template: + src: env.j2 + dest: "{{workspace}}/bento-frontend/.env" + +- name: run npm install in {{workspace}}/bento-frontend/ + command: "{{item}}" + args: + chdir: "{{workspace}}/bento-frontend" + loop: + - npm install npm@latest -g + - npm install + - npm run lint-fix + - npm run build + +- name: build cbiitssrepo/bento-frontend image + docker_image: + build: + path: "{{workspace}}/bento-frontend" + dockerfile: "../dockerfiles/frontend-dockerfile" + pull: yes + nocache: yes + name: cbiitssrepo/bento-frontend + tag: "{{build_number}}" + push: yes + force_source: yes + source: build + +- name: Add tag latest to cbiitssrepo/bento-frontend image + docker_image: + name: "cbiitssrepo/bento-frontend:{{build_number}}" + repository: cbiitssrepo/bento-frontend:latest + force_tag: yes + push: yes + source: local + + + \ No newline at end of file diff --git a/ansible/roles/bento-gdc/tasks/deploy-backend.yml b/ansible/roles/bento-gdc/tasks/deploy-backend.yml new file mode 100644 index 000000000..f818c4bd2 --- /dev/null +++ b/ansible/roles/bento-gdc/tasks/deploy-backend.yml @@ -0,0 +1,67 @@ +--- +- name: create task definition + ecs_taskdefinition: + containers: + - name: backend + essential: true + image: "cbiitssrepo/bento-backend:{{build_number}}" + portMappings: + - containerPort: "8080" + # hostPort: "80" + - name: frontend + essential: true + image: "cbiitssrepo/bento-frontend:{{build_number}}" + portMappings: + - containerPort: "80" + hostPort: "80" + network_mode: bridge + family: ctdc + state: present + region: "{{region}}" + register: task_output + +- name: query task definition + ecs_taskdefinition_info: + task_definition: ctdc + region: "{{region}}" + register: task_output + +- name: query ecs service + ecs_service_info: + cluster: ctdc-ecs + service: ctdc_ecs_service + details: true + region: "{{region}}" + register: service_info + +- name: set facts + set_fact: + revision: "{{task_output.revision}}" + task_name: "{{task_output.family}}" + lb_target_arn: "{{service_info.services[0].loadBalancers[0].targetGroupArn}}" + lb_container_port: "{{service_info.services[0].loadBalancers[0].containerPort}}" + lb_container_name: "{{service_info.services[0].loadBalancers[0].containerName}}" + role_arn: "{{service_info.services[0].roleArn}}" + +- name: update ecs service + ecs_service: + state: present + name: ctdc_ecs_service + cluster: ctdc-ecs + task_definition: "{{task_name}}:{{revision}}" + role: "{{role_arn}}" + force_new_deployment: yes + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + load_balancers: + - targetGroupArn: "{{lb_target_arn}}" + containerName: "{{lb_container_name}}" + containerPort: "{{ lb_container_port }}" + region: "{{region}}" + register: service_output + +- name: show service service + debug: + msg: "{{service_output}}" \ No newline at end of file diff --git a/ansible/roles/bento-gdc/tasks/deploy-dev.yml b/ansible/roles/bento-gdc/tasks/deploy-dev.yml new file mode 100644 index 000000000..7f0ccdb04 --- /dev/null +++ b/ansible/roles/bento-gdc/tasks/deploy-dev.yml @@ -0,0 +1,359 @@ +--- +############################################################################################################################ + +# Task Definitions + +############################################################################################################################ + +- name: create task definition - backend + ecs_taskdefinition: + containers: + - name: backend + essential: true + image: "cbiitssrepo/bento-backend:{{build_number}}" + environment: + - name: NEW_RELIC_LICENSE_KEY + value: "{{ newrelic_license_key }}" + - name: NEW_RELIC_APP_NAME + value: "{{ app_name }}-backend" + - name: NEW_RELIC_DISTRIBUTED_TRACING_ENABLED + value: true + - name: NEW_RELIC_HOST + value: "gov-collector.newrelic.com" +# - name: NEW_RELIC_LOG_FILE_NAME +# value: "STDOUT" +# - name: JAVA_OPTS +# value: "-javaagent:/usr/local/tomcat/newrelic/newrelic.jar" +# entryPoint: +# - "/bin/ash" +# - "-c" +# - 'wget "https://download.newrelic.com/newrelic/java-agent/newrelic-agent/current/newrelic-java.zip" -O newrelic-java.zip && unzip newrelic-java.zip && bin/catalina.sh run' + portMappings: + - containerPort: "8080" + hostPort: "8080" + logConfiguration: + logDriver: syslog + options: + syslog-address: tcp://{{ syslog_host }}:514 + syslog-format: rfc5424 + tag: "{{ app_name }}-backend" + network_mode: bridge + family: bento-{{tier}}-backend + memory: '512' + cpu: '512' + state: present + region: "{{region}}" + register: task_output + +- name: create task definition - frontend + ecs_taskdefinition: + containers: + - name: frontend + essential: true + image: "cbiitssrepo/bento-frontend:{{build_number}}" + environment: + - name: NEW_RELIC_LICENSE_KEY + value: "{{ newrelic_license_key }}" + - name: NEW_RELIC_APP_NAME + value: "{{ app_name }}-frontend" + - name: NEW_RELIC_DISTRIBUTED_TRACING_ENABLED + value: true + - name: NEW_RELIC_HOST + value: "gov-collector.newrelic.com" + - name: NEW_RELIC_NO_CONFIG_FILE + value: "true" + portMappings: + - containerPort: "80" + hostPort: "80" + logConfiguration: + logDriver: syslog + options: + syslog-address: tcp://{{ syslog_host }}:514 + syslog-format: rfc5424 + tag: "{{ app_name }}-frontend" + network_mode: bridge + family: bento-{{tier}}-frontend + state: present + memory: '512' + cpu: '512' + region: "{{region}}" + register: task_output + +- name: create task definition - sumo syslog + ecs_taskdefinition: + containers: + - name: sumologic-syslog + essential: true + image: "sumologic/collector:latest-syslog" + environment: + - name: SUMO_COLLECTOR_NAME + value: "{{ app_name }}-syslog" + - name: SUMO_ACCESS_ID + value: "{{ sumo_access_id }}" + - name: SUMO_ACCESS_KEY + value: "{{ sumo_access_key }}" + - name: SUMO_COLLECTOR_NAME_PREFIX + value: "" + - name: SUMO_CLOBBER + value: "true" + portMappings: + - containerPort: "514" + hostPort: "514" + network_mode: bridge + family: bento-{{tier}}-sumo_syslog + state: present + memory: '512' + cpu: '128' + region: "{{region}}" + register: task_output + +- name: create task definition - sumo docker + ecs_taskdefinition: + containers: + - name: sumologic-docker + essential: true + image: "sumologic/collector:latest" + environment: + - name: SUMO_COLLECTOR_NAME + value: "{{ app_name }}-docker" + - name: SUMO_ACCESS_ID + value: "{{ sumo_access_id }}" + - name: SUMO_ACCESS_KEY + value: "{{ sumo_access_key }}" + - name: SUMO_COLLECTOR_NAME_PREFIX + value: "" + - name: SUMO_CLOBBER + value: "true" + mountPoints: + - containerPath: /var/run/docker.sock + sourceVolume: docker-sock + readOnly: true + volumes: + - name: docker-sock + host: + sourcePath: /var/run/docker.sock + network_mode: bridge + family: bento-{{tier}}-sumo_docker + state: present + memory: '512' + cpu: '128' + region: "{{region}}" + register: task_output + +- name: create task definition - newrelic docker + ecs_taskdefinition: + containers: + - name: newrelic-docker + essential: true + image: "newrelic/infrastructure:latest" + environment: + - name: NRIA_LICENSE_KEY + value: "{{ newrelic_license_key }}" + - name: NRIA_DISPLAY_NAME + value: "{{ app_name }}-docker" + - name: NEW_RELIC_HOST + value: "gov-collector.newrelic.com" + mountPoints: + - containerPath: /var/run/docker.sock + sourceVolume: docker-sock + readOnly: true + - containerPath: /host + sourceVolume: docker-host + readOnly: true +# cap_add: +# - SYS_PTRACE + volumes: + - name: docker-sock + host: + sourcePath: /var/run/docker.sock + - name: docker-host + host: + sourcePath: / + network_mode: bridge + family: bento-{{tier}}-nr_docker + state: present + memory: '128' + cpu: '128' + region: "{{region}}" + register: task_output + +############################################################################################################################ + +# Task Definition Queries + +############################################################################################################################ + +- name: query task definition - bento frontend + ecs_taskdefinition_info: + task_definition: bento-{{tier}}-frontend + region: "{{region}}" + register: task_frontend + +- name: query task definition - bento backend + ecs_taskdefinition_info: + task_definition: bento-{{tier}}-backend + region: "{{region}}" + register: task_backend + +- name: query task definition - sumologic syslog + ecs_taskdefinition_info: + task_definition: bento-{{tier}}-sumo_syslog + region: "{{region}}" + register: task_sumo_syslog + +- name: query task definition - sumologic docker + ecs_taskdefinition_info: + task_definition: bento-{{tier}}-sumo_docker + region: "{{region}}" + register: task_sumo_docker + +- name: query task definition - newrelic docker + ecs_taskdefinition_info: + task_definition: bento-{{tier}}-nr_docker + region: "{{region}}" + register: task_nr_docker + +############################################################################################################################ + +# Service Queries + +############################################################################################################################ + +- name: query backend service + ecs_service_info: + cluster: bento-{{tier}} + service: bento-{{tier}}-backend + details: true + region: "{{region}}" + register: service_backend + +- name: query ecs service + ecs_service_info: + cluster: bento-{{tier}} + service: bento-{{tier}}-frontend + details: true + region: "{{region}}" + register: service_frontend + +- name: query sumologic syslog service + ecs_service_info: + cluster: bento-{{tier}} + service: bento-{{tier}}-sumo_syslog + details: true + region: "{{region}}" + register: service_sumo_syslog + +- name: query sumologic docker service + ecs_service_info: + cluster: bento-{{tier}} + service: bento-{{tier}}-sumo_docker + details: true + region: "{{region}}" + register: service_sumo_docker + +- name: query newrelic docker service + ecs_service_info: + cluster: bento-{{tier}} + service: bento-{{tier}}-nr_docker + details: true + region: "{{region}}" + register: service_nr_docker + +############################################################################################################################ + +- name: set facts + set_fact: + frontend_revision: "{{task_frontend.revision}}" + backend_revision: "{{task_backend.revision}}" + task_backend_name: "{{task_backend.family}}" + task_frontend_name: "{{task_frontend.family}}" + task_sumo_syslog_name: "{{task_sumo_syslog.family}}" + task_sumo_docker_name: "{{task_sumo_docker.family}}" + task_nr_docker_name: "{{task_nr_docker.family}}" + lb_frontend: "{{service_frontend.services[0].loadBalancers}}" + lb_backend: "{{service_backend.services[0].loadBalancers}}" + role_arn: "{{service_backend.services[0].roleArn}}" + +############################################################################################################################ + +# Update Services + +############################################################################################################################ + +- name: update sumologic syslog service + ecs_service: + state: present + name: bento-{{tier}}-sumo_syslog + cluster: bento-{{tier}} + task_definition: "{{task_sumo_syslog_name}}" + force_new_deployment: yes + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + region: "{{region}}" + register: service_sumo_syslog_output + +- name: update sumologic docker service + ecs_service: + state: present + name: bento-{{tier}}-sumo_docker + cluster: bento-{{tier}} + task_definition: "{{task_sumo_docker_name}}" + force_new_deployment: yes + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + region: "{{region}}" + register: service_sumo_docker_output + +- name: update newrelic docker service + ecs_service: + state: present + name: bento-{{tier}}-nr_docker + cluster: bento-{{tier}} + task_definition: "{{task_nr_docker_name}}" + force_new_deployment: yes + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + region: "{{region}}" + register: service_nr_docker_output + +- name: update backend service + ecs_service: + state: present + name: bento-{{tier}}-backend + cluster: bento-{{tier}} + task_definition: "{{task_backend_name}}:{{backend_revision}}" + role: "{{role_arn}}" + force_new_deployment: yes + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + load_balancers: "{{ lb_backend }}" + region: "{{region}}" + register: service_backend_output + +- name: update frontend service + ecs_service: + state: present + name: bento-{{tier}}-frontend + cluster: bento-{{tier}} + task_definition: "{{task_frontend_name}}:{{frontend_revision}}" + role: "{{role_arn}}" + force_new_deployment: yes + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + load_balancers: "{{ lb_frontend }}" + region: "{{region}}" + register: service_frontend_output + +#- name: show lb_backend +# debug: +# msg: "{{lb_backend}}" diff --git a/ansible/roles/bento-gdc/tasks/deploy.yml b/ansible/roles/bento-gdc/tasks/deploy.yml new file mode 100644 index 000000000..34c845522 --- /dev/null +++ b/ansible/roles/bento-gdc/tasks/deploy.yml @@ -0,0 +1,357 @@ +--- +############################################################################################################################ + +# Task Definitions + +############################################################################################################################ + +- name: create task definition - backend + ecs_taskdefinition: + containers: + - name: backend + essential: true + image: "cbiitssrepo/bento-backend:{{build_number}}" + environment: + - name: NEW_RELIC_LICENSE_KEY + value: "{{ newrelic_license_key }}" + - name: NEW_RELIC_APP_NAME + value: "{{ app_name }}-backend" + - name: NEW_RELIC_DISTRIBUTED_TRACING_ENABLED + value: true + - name: NEW_RELIC_HOST + value: "gov-collector.newrelic.com" + - name: NEW_RELIC_LOG_FILE_NAME + value: "STDOUT" + - name: JAVA_OPTS + value: "-javaagent:/usr/local/tomcat/newrelic/newrelic.jar" + entryPoint: + - "/bin/ash" + - "-c" + - 'wget "https://download.newrelic.com/newrelic/java-agent/newrelic-agent/current/newrelic-java.zip" -O newrelic-java.zip && unzip newrelic-java.zip && bin/catalina.sh run' + portMappings: + - containerPort: "8080" + hostPort: "8080" + logConfiguration: + logDriver: syslog + options: + syslog-address: tcp://{{ syslog_host }}:514 + tag: "{{ app_name }}-backend" + syslog-format: "rfc5424micro" + network_mode: bridge + family: bento-{{tier}}-backend + memory: '512' + cpu: '512' + state: present + region: "{{region}}" + register: task_output + +- name: create task definition - frontend + ecs_taskdefinition: + containers: + - name: frontend + essential: true + image: "cbiitssrepo/bento-frontend:{{build_number}}" + environment: + - name: NEW_RELIC_LICENSE_KEY + value: "{{ newrelic_license_key }}" + - name: NEW_RELIC_APP_NAME + value: "{{ app_name }}-frontend" + - name: NEW_RELIC_DISTRIBUTED_TRACING_ENABLED + value: true + - name: NEW_RELIC_HOST + value: "gov-collector.newrelic.com" + - name: NEW_RELIC_NO_CONFIG_FILE + value: "true" + portMappings: + - containerPort: "80" + hostPort: "80" + logConfiguration: + logDriver: syslog + options: + syslog-address: tcp://{{ syslog_host }}:514 + tag: "{{ app_name }}-frontend" + syslog-format: "rfc5424micro" + network_mode: bridge + family: bento-{{tier}}-frontend + state: present + memory: '512' + cpu: '128' + region: "{{region}}" + register: task_output + +- name: create task definition - sumo syslog + ecs_taskdefinition: + containers: + - name: sumologic-syslog + essential: true + image: "sumologic/collector:latest-syslog" + environment: + - name: SUMO_COLLECTOR_NAME + value: "{{ app_name }}-syslog" + - name: SUMO_ACCESS_ID + value: "{{ sumo_access_id }}" + - name: SUMO_ACCESS_KEY + value: "{{ sumo_access_key }}" + - name: SUMO_COLLECTOR_NAME_PREFIX + value: "" + - name: SUMO_CLOBBER + value: "true" + portMappings: + - containerPort: "514" + hostPort: "514" + network_mode: bridge + family: bento-{{tier}}-sumo_syslog + state: present + memory: '512' + cpu: '128' + region: "{{region}}" + register: task_output + +- name: create task definition - sumo docker + ecs_taskdefinition: + containers: + - name: sumologic-docker + essential: true + image: "sumologic/collector:latest" + environment: + - name: SUMO_COLLECTOR_NAME + value: "{{ app_name }}-docker" + - name: SUMO_ACCESS_ID + value: "{{ sumo_access_id }}" + - name: SUMO_ACCESS_KEY + value: "{{ sumo_access_key }}" + - name: SUMO_COLLECTOR_NAME_PREFIX + value: "" + - name: SUMO_CLOBBER + value: "true" + mountPoints: + - containerPath: /var/run/docker.sock + sourceVolume: docker-sock + readOnly: true + volumes: + - name: docker-sock + host: + sourcePath: /var/run/docker.sock + network_mode: bridge + family: bento-{{tier}}-sumo_docker + state: present + memory: '512' + cpu: '128' + region: "{{region}}" + register: task_output + +- name: create task definition - newrelic docker + ecs_taskdefinition: + containers: + - name: newrelic-docker + essential: true + image: "newrelic/infrastructure:latest" + environment: + - name: NRIA_LICENSE_KEY + value: "{{ newrelic_license_key }}" + - name: NRIA_DISPLAY_NAME + value: "{{ app_name }}-docker" + - name: NEW_RELIC_HOST + value: "gov-collector.newrelic.com" + mountPoints: + - containerPath: /var/run/docker.sock + sourceVolume: docker-sock + readOnly: true + - containerPath: /host + sourceVolume: docker-host + readOnly: true + volumes: + - name: docker-sock + host: + sourcePath: /var/run/docker.sock + - name: docker-host + host: + sourcePath: / + network_mode: bridge + family: bento-{{tier}}-nr_docker + state: present + memory: '128' + cpu: '128' + region: "{{region}}" + register: task_output + +############################################################################################################################ + +# Task Definition Queries + +############################################################################################################################ + +- name: query task definition - bento frontend + ecs_taskdefinition_info: + task_definition: bento-{{tier}}-frontend + region: "{{region}}" + register: task_frontend + +- name: query task definition - bento backend + ecs_taskdefinition_info: + task_definition: bento-{{tier}}-backend + region: "{{region}}" + register: task_backend + +- name: query task definition - sumologic syslog + ecs_taskdefinition_info: + task_definition: bento-{{tier}}-sumo_syslog + region: "{{region}}" + register: task_sumo_syslog + +- name: query task definition - sumologic docker + ecs_taskdefinition_info: + task_definition: bento-{{tier}}-sumo_docker + region: "{{region}}" + register: task_sumo_docker + +- name: query task definition - newrelic docker + ecs_taskdefinition_info: + task_definition: bento-{{tier}}-nr_docker + region: "{{region}}" + register: task_nr_docker + +############################################################################################################################ + +# Service Queries + +############################################################################################################################ + +- name: query backend service + ecs_service_info: + cluster: bento-{{tier}} + service: bento-{{tier}}-backend + details: true + region: "{{region}}" + register: service_backend + +- name: query ecs service + ecs_service_info: + cluster: bento-{{tier}} + service: bento-{{tier}}-frontend + details: true + region: "{{region}}" + register: service_frontend + +- name: query sumologic syslog service + ecs_service_info: + cluster: bento-{{tier}} + service: bento-{{tier}}-sumo_syslog + details: true + region: "{{region}}" + register: service_sumo_syslog + +- name: query sumologic docker service + ecs_service_info: + cluster: bento-{{tier}} + service: bento-{{tier}}-sumo_docker + details: true + region: "{{region}}" + register: service_sumo_docker + +- name: query newrelic docker service + ecs_service_info: + cluster: bento-{{tier}} + service: bento-{{tier}}-nr_docker + details: true + region: "{{region}}" + register: service_nr_docker + +############################################################################################################################ + +- name: set facts + set_fact: + frontend_revision: "{{task_frontend.revision}}" + backend_revision: "{{task_backend.revision}}" + task_backend_name: "{{task_backend.family}}" + task_frontend_name: "{{task_frontend.family}}" + task_sumo_syslog_name: "{{task_sumo_syslog.family}}" + task_sumo_docker_name: "{{task_sumo_docker.family}}" + task_nr_docker_name: "{{task_nr_docker.family}}" + lb_frontend: "{{service_frontend.services[0].loadBalancers}}" + lb_backend: "{{service_backend.services[0].loadBalancers}}" + role_arn: "{{service_backend.services[0].roleArn}}" + +############################################################################################################################ + +# Update Services + +############################################################################################################################ + +- name: update sumologic syslog service + ecs_service: + state: present + name: bento-{{tier}}-sumo_syslog + cluster: bento-{{tier}} + task_definition: "{{task_sumo_syslog_name}}" + force_new_deployment: yes + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + region: "{{region}}" + register: service_sumo_syslog_output + +- name: update sumologic docker service + ecs_service: + state: present + name: bento-{{tier}}-sumo_docker + cluster: bento-{{tier}} + task_definition: "{{task_sumo_docker_name}}" + force_new_deployment: yes + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + region: "{{region}}" + register: service_sumo_docker_output + +- name: update newrelic docker service + ecs_service: + state: present + name: bento-{{tier}}-nr_docker + cluster: bento-{{tier}} + task_definition: "{{task_nr_docker_name}}" + force_new_deployment: yes + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + region: "{{region}}" + register: service_nr_docker_output + +- name: update backend service + ecs_service: + state: present + name: bento-{{tier}}-backend + cluster: bento-{{tier}} + task_definition: "{{task_backend_name}}:{{backend_revision}}" + role: "{{role_arn}}" + force_new_deployment: yes + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + load_balancers: "{{ lb_backend }}" + region: "{{region}}" + register: service_backend_output + +- name: update frontend service + ecs_service: + state: present + name: bento-{{tier}}-frontend + cluster: bento-{{tier}} + task_definition: "{{task_frontend_name}}:{{frontend_revision}}" + role: "{{role_arn}}" + force_new_deployment: yes + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + load_balancers: "{{ lb_frontend }}" + region: "{{region}}" + register: service_frontend_output + +- name: show lb_backend + debug: + msg: "{{lb_backend}}" diff --git a/ansible/roles/bento-gdc/tasks/main.yml b/ansible/roles/bento-gdc/tasks/main.yml new file mode 100644 index 000000000..e69de29bb diff --git a/ansible/roles/bento-gdc/templates/application.properties.j2 b/ansible/roles/bento-gdc/templates/application.properties.j2 new file mode 100644 index 000000000..1451bc89b --- /dev/null +++ b/ansible/roles/bento-gdc/templates/application.properties.j2 @@ -0,0 +1,14 @@ +spring.mvc.throw-exception-if-no-handler-found=true +neo4j.graphql.endpoint=http://{{neo4j_ip}}:7474/graphql/ +neo4j.graphql.endpoint.schema_endpoint=idl/ +graphql.schema=graphql/{{schema_file}} +neo4j.authorization={{bearer}} +spring.mvc.view.prefix=/WEB-INF/ +spring.mvc.view.suffix=.jsp +error.redirect_url=http://localhost/error.html +allow_grapqh_query = true +allow_graphql_mutation =false +redis.host={{redis_host}} +redis.password={{redis_password}} +redis.password={{redis_port}} +redis.ttl=86400 \ No newline at end of file diff --git a/ansible/roles/bento-gdc/templates/env.j2 b/ansible/roles/bento-gdc/templates/env.j2 new file mode 100644 index 000000000..c61c78306 --- /dev/null +++ b/ansible/roles/bento-gdc/templates/env.j2 @@ -0,0 +1,9 @@ +{% if tier == "prod" %} +REACT_APP_BACKEND_API=https://api.bento-tools.org/v1/graphql/ +REACT_APP_APPLICATION_VERSION={{version}} +REACT_APP_ABOUT_CONTENT_URL=https://raw.githubusercontent.com/CBIIT/bento-frontend/master/src/content/{{tier}}/aboutPagesContent.yaml +{% else %} +REACT_APP_BACKEND_API=https://api-demo.bento-tools.org/v1/graphql/ +REACT_APP_APPLICATION_VERSION={{version}} +REACT_APP_ABOUT_CONTENT_URL=https://raw.githubusercontent.com/CBIIT/bento-frontend/master/src/content/{{tier}}/aboutPagesContent.yaml +{% endif %} \ No newline at end of file diff --git a/ansible/roles/bento-gdc/tests/inventory b/ansible/roles/bento-gdc/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/bento-gdc/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/bento-gdc/tests/test.yml b/ansible/roles/bento-gdc/tests/test.yml new file mode 100644 index 000000000..c9b79015b --- /dev/null +++ b/ansible/roles/bento-gdc/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - cicd \ No newline at end of file diff --git a/ansible/roles/bento-gdc/vars/main.yml b/ansible/roles/bento-gdc/vars/main.yml new file mode 100644 index 000000000..9c600dafb --- /dev/null +++ b/ansible/roles/bento-gdc/vars/main.yml @@ -0,0 +1,16 @@ +--- +# vars file for cicd +tier: "{{ lookup('env','TIER') }}" +version: "{{ lookup('env','VERSION') }}" +workspace: "{{ lookup('env','WORKSPACE') }}" +docker_user: "{{ lookup('env','DOCKER_USER') }}" +docker_password: "{{ lookup('env','DOCKER_PASSWORD') }}" +build_number: "{{ lookup('env','BUILD_NUMBER')}}" +region: us-east-1 +newrelic_license_key: "{{ lookup('aws_ssm', 'newrelic_license_key', region='us-east-1' ) }}" +sumo_access_id: "{{ lookup('aws_ssm', 'sumo_access_id', region='us-east-1' ) }}" +sumo_access_key: "{{ lookup('aws_ssm', 'sumo_access_key', region='us-east-1' ) }}" +syslog_host: "{{ lookup('aws_ssm', 'syslog_host', region='us-east-1' ) }}" +app_name: bento-{{platform}}-{{tier}} +schema_file: bento-extended.graphql +# hostname: "{{ansible_fqdn}}" \ No newline at end of file diff --git a/ansible/roles/bento-gke/.travis.yml b/ansible/roles/bento-gke/.travis.yml new file mode 100644 index 000000000..36bbf6208 --- /dev/null +++ b/ansible/roles/bento-gke/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/ansible/roles/bento-gke/README.md b/ansible/roles/bento-gke/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/bento-gke/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/bento-gke/defaults/main.yml b/ansible/roles/bento-gke/defaults/main.yml new file mode 100644 index 000000000..9085d3dfc --- /dev/null +++ b/ansible/roles/bento-gke/defaults/main.yml @@ -0,0 +1,3 @@ +--- +# defaults file for cicd +platform: aws \ No newline at end of file diff --git a/ansible/roles/bento-gke/handlers/main.yml b/ansible/roles/bento-gke/handlers/main.yml new file mode 100644 index 000000000..3c94d4172 --- /dev/null +++ b/ansible/roles/bento-gke/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for cicd \ No newline at end of file diff --git a/ansible/roles/bento-gke/meta/main.yml b/ansible/roles/bento-gke/meta/main.yml new file mode 100644 index 000000000..227ad9c34 --- /dev/null +++ b/ansible/roles/bento-gke/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.9 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/ansible/roles/bento-gke/tasks/build.yml b/ansible/roles/bento-gke/tasks/build.yml new file mode 100644 index 000000000..7c1c76199 --- /dev/null +++ b/ansible/roles/bento-gke/tasks/build.yml @@ -0,0 +1,120 @@ +--- +- name: set dev environment facts + set_fact: + neo4j_ip: "{{ lookup('env','NEO4J_DEV_IP') }}" + bearer: "{{ lookup('env','BEARER') }}" + when: tier == "dev" + +- name: set qa environment facts + set_fact: + neo4j_ip: "{{ lookup('env','NEO4J_QA_IP') }}" + bearer: "{{ lookup('env','BEARER') }}" + when: tier == "qa" + +- name: set stage environment facts + set_fact: + neo4j_ip: "{{ lookup('env','NEO4J_STAGE_IP') }}" + bearer: "{{ lookup('env','BEARER') }}" + when: tier == "stage" + +- name: set prod environment facts + set_fact: + neo4j_ip: "{{ lookup('env','NEO4J_PROD_IP') }}" + bearer: "{{ lookup('env','BEARER') }}" + when: tier == "prod" + +- name: remove the application_example.properties file + file: + path: "{{workspace}}/src/main/resources/application_example.properties" + state: absent + +- name: copy application.properties file to /src/main/resources/ + template: + src: application.properties.j2 + dest: "{{workspace}}/src/main/resources/application.properties" + +- name: copy deployment and services manufests + template: + src: "{{item.src}}" + dest: "{{item.dest}}" + loop: + - { src: 'backend.yml.j2', dest: '{{workspace}}/backend.yml'} + - { src: 'frontend.yml.j2', dest: '{{workspace}}/frontend.yml'} + - { src: 'bento-backend.yml.j2', dest: '{{workspace}}/bento-backend.yml'} + - { src: 'bento-frontend.yml.j2', dest: '{{workspace}}/bento-frontend.yml'} + +- name: build springboot code + command: mvn package -DskipTests + args: + chdir: "{{workspace}}" +- name: copy Bento-0.0.1.war to api.war + copy: + remote_src: yes + src: "{{workspace}}/target/Bento-0.0.1.war" + dest: "{{workspace}}/target/ROOT.war" + +- name: log into DockerHub + docker_login: + username: "{{docker_user}}" + password: "{{docker_password}}" + +- name: build cbiitssrepo/bento-backend image + docker_image: + build: + path: "{{workspace}}" + dockerfile: "{{workspace}}/dockerfiles/backend-dockerfile" + pull: yes + nocache: yes + name: cbiitssrepo/bento-backend + tag: "{{build_number}}" + push: yes + source: build + +- name: copy environment file to {{workspace}}/bento-frontend + template: + src: env.j2 + dest: "{{workspace}}/bento-frontend/.env" + +- name: run npm install in {{workspace}}/bento-frontend/ + command: "{{item}}" + args: + chdir: "{{workspace}}/bento-frontend" + loop: + - npm install + +- name: run npm build in frontend + command: npm run-script build + args: + chdir: "{{workspace}}/bento-frontend" + +- name: build cbiitssrepo/bento-frontend image + docker_image: + build: + path: "{{workspace}}/bento-frontend" + dockerfile: "{{workspace}}/dockerfiles/frontend-dockerfile" + pull: yes + nocache: yes + name: cbiitssrepo/bento-frontend + tag: "{{build_number}}" + push: yes + source: build + +# - name: post schemas +# uri: +# url: http://{{neo4j_ip}}:7474/graphql/idl/ +# method: POST +# body: "{{ lookup('file','{{workspace}}/src/main/resources/graphql/bento-extended.graphql') }}" +# headers: +# Accept: "application/json" +# Authorization: "{{bearer}}" +# register: schema + +# - name: post schema +# command: "curl -X POST http://{{neo4j_ip}}:7474/graphql/idl/ -H 'Accept: application/json' -H 'Authorization: {{bearer}}' -d @{{workspace}}/bento-backend/src/main/resources/graphql/ctdc.graphql" +# register: schema + +# - name: schema output +# debug: +# msg: "{{schema}}" + + \ No newline at end of file diff --git a/ansible/roles/bento-gke/tasks/deploy.yml b/ansible/roles/bento-gke/tasks/deploy.yml new file mode 100644 index 000000000..5e341802b --- /dev/null +++ b/ansible/roles/bento-gke/tasks/deploy.yml @@ -0,0 +1,44 @@ +--- + +- name: create a {{tier}} namespace + k8s: + name: "{{tier}}" + api_version: v1 + kind: Namespace + state: present + +- name: deploy frontend and backend bento applications + k8s: + state: present + definition: "{{ lookup('template', '{{item}}') | from_yaml }}" + validate_certs: no + namespace: "{{tier}}" + force: yes + loop: + - bento-backend.yml.j2 + - bento-frontend.yml.j2 + +- name: deploy frontend and backend services + k8s: + state: present + definition: "{{ lookup('template', '{{item}}') | from_yaml }}" + validate_certs: no + namespace: "{{tier}}" + apply: yes + loop: + - backend.yml.j2 + - frontend.yml.j2 + +- name: deploy ingress and certificate + k8s: + state: present + definition: "{{ lookup('template', '{{item}}') | from_yaml }}" + validate_certs: no + namespace: "{{tier}}" + apply: yes + loop: + - ingress.yml.j2 + - managedcert.yml.j2 + + + diff --git a/ansible/roles/bento-gke/tasks/main.yml b/ansible/roles/bento-gke/tasks/main.yml new file mode 100644 index 000000000..e69de29bb diff --git a/ansible/roles/bento-gke/templates/application.properties.j2 b/ansible/roles/bento-gke/templates/application.properties.j2 new file mode 100644 index 000000000..cda065c47 --- /dev/null +++ b/ansible/roles/bento-gke/templates/application.properties.j2 @@ -0,0 +1,8 @@ +spring.mvc.throw-exception-if-no-handler-found=true +neo4j.graphql.endpoint=http://{{neo4j_ip}}:7474/graphql/ +neo4j.authorization={{bearer}} +spring.mvc.view.prefix=/WEB-INF/ +spring.mvc.view.suffix=.jsp +error.redirect_url=http://localhost/error.html +allow_grapqh_query = true +allow_graphql_mutation =false \ No newline at end of file diff --git a/ansible/roles/bento-gke/templates/backend.yml.j2 b/ansible/roles/bento-gke/templates/backend.yml.j2 new file mode 100644 index 000000000..c88a19a4c --- /dev/null +++ b/ansible/roles/bento-gke/templates/backend.yml.j2 @@ -0,0 +1,15 @@ +kind: Service +apiVersion: v1 +metadata: + name: bento-backend-service-{{tier}} +spec: + type: NodePort + selector: + app: bento + role: backend + env: {{tier}} + ports: + - port: 8080 + targetPort: 8080 + protocol: TCP + name: http \ No newline at end of file diff --git a/ansible/roles/bento-gke/templates/bento-backend.yml.j2 b/ansible/roles/bento-gke/templates/bento-backend.yml.j2 new file mode 100644 index 000000000..ec17b6c43 --- /dev/null +++ b/ansible/roles/bento-gke/templates/bento-backend.yml.j2 @@ -0,0 +1,36 @@ +kind: Deployment +apiVersion: apps/v1 +metadata: + name: bento-backend-pod-{{tier}} + namespace: {{tier}} +spec: + replicas: 1 + selector: + matchLabels: + app: bento + role: backend + env: {{tier}} + template: + metadata: + name: bento-backend-{{tier}} + labels: + app: bento + role: backend + env: {{tier}} + spec: + containers: + - name: backend + image: cbiitssrepo/bento-backend:{{build_number}} + resources: + limits: + memory: "500Mi" + cpu: "100m" + imagePullPolicy: Always + readinessProbe: + httpGet: + path: /ping + port: 8080 + ports: + - name: backend + containerPort: 8080 + protocol: TCP \ No newline at end of file diff --git a/ansible/roles/bento-gke/templates/bento-frontend.yml.j2 b/ansible/roles/bento-gke/templates/bento-frontend.yml.j2 new file mode 100644 index 000000000..7479020bc --- /dev/null +++ b/ansible/roles/bento-gke/templates/bento-frontend.yml.j2 @@ -0,0 +1,35 @@ +kind: Deployment +apiVersion: apps/v1 +metadata: + name: bento-frontend-{{tier}} + namespace: {{tier}} +spec: + replicas: 1 + selector: + matchLabels: + app: bento + role: frontend + env: {{tier}} + template: + metadata: + name: bento-frontend-{{tier}} + labels: + app: bento + role: frontend + env: {{tier}} + spec: + containers: + - name: frontend + image: cbiitssrepo/bento-frontend:{{build_number}} + resources: + limits: + memory: "500Mi" + cpu: "100m" + imagePullPolicy: Always + readinessProbe: + httpGet: + path: / + port: 80 + ports: + - name: frontend + containerPort: 80 \ No newline at end of file diff --git a/ansible/roles/bento-gke/templates/env.j2 b/ansible/roles/bento-gke/templates/env.j2 new file mode 100644 index 000000000..f6236dfc4 --- /dev/null +++ b/ansible/roles/bento-gke/templates/env.j2 @@ -0,0 +1,3 @@ +REACT_APP_BACKEND_API=https://gke-{{tier}}.bento-tools.org/v1/graphql/ +REACT_APP_APPLICATION_VERSION={{version}} +REACT_APP_ABOUT_CONTENT_URL=https://raw.githubusercontent.com/CBIIT/bento-frontend/master/src/content/{{tier}}/aboutPagesContent.yaml diff --git a/ansible/roles/bento-gke/templates/frontend.yml.j2 b/ansible/roles/bento-gke/templates/frontend.yml.j2 new file mode 100644 index 000000000..d3688ed67 --- /dev/null +++ b/ansible/roles/bento-gke/templates/frontend.yml.j2 @@ -0,0 +1,15 @@ +kind: Service +apiVersion: v1 +metadata: + name: bento-frontend-service-{{tier}} +spec: + type: NodePort + ports: + - name: http + port: 80 + targetPort: 80 + protocol: TCP + selector: + app: bento + role: frontend + env: {{tier}} \ No newline at end of file diff --git a/ansible/roles/bento-gke/templates/ingress.yml.j2 b/ansible/roles/bento-gke/templates/ingress.yml.j2 new file mode 100644 index 000000000..882fed76e --- /dev/null +++ b/ansible/roles/bento-gke/templates/ingress.yml.j2 @@ -0,0 +1,20 @@ +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: bento-ingress-{{tier}} + annotations: + kubernetes.io/ingress.global-static-ip-name: "{{tier}}-web-frontend-ip" + networking.gke.io/managed-certificates: bento-certificate-{{tier}} + kubernetes.io/ingress.allow-http: "false" +spec: + rules: + - http: + paths: + - path: /* + backend: + serviceName: bento-frontend-service-{{tier}} + servicePort: 80 + - path: /v1/graphql/* + backend: + serviceName: bento-backend-service-{{tier}} + servicePort: 8080 \ No newline at end of file diff --git a/ansible/roles/bento-gke/templates/managedcert.yml.j2 b/ansible/roles/bento-gke/templates/managedcert.yml.j2 new file mode 100644 index 000000000..6c8030121 --- /dev/null +++ b/ansible/roles/bento-gke/templates/managedcert.yml.j2 @@ -0,0 +1,7 @@ +apiVersion: networking.gke.io/v1beta1 +kind: ManagedCertificate +metadata: + name: bento-certificate-{{tier}} +spec: + domains: + - gke-{{tier}}.bento-tools.org \ No newline at end of file diff --git a/ansible/roles/bento-gke/tests/inventory b/ansible/roles/bento-gke/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/bento-gke/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/bento-gke/tests/test.yml b/ansible/roles/bento-gke/tests/test.yml new file mode 100644 index 000000000..c9b79015b --- /dev/null +++ b/ansible/roles/bento-gke/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - cicd \ No newline at end of file diff --git a/ansible/roles/bento-gke/vars/main.yml b/ansible/roles/bento-gke/vars/main.yml new file mode 100644 index 000000000..788daf16c --- /dev/null +++ b/ansible/roles/bento-gke/vars/main.yml @@ -0,0 +1,13 @@ +--- +# vars file for cicd +tier: "{{ lookup('env','TIER') }}" +version: "{{ lookup('env','TAG') }}" +workspace: "{{ lookup('env','WORKSPACE') }}" +docker_user: "{{ lookup('env','DOCKER_USER') }}" +docker_password: "{{ lookup('env','DOCKER_PASSWORD') }}" +build_number: "{{ lookup('env','BUILD_NUMBER')}}" +region: us-east-1 +newrelic_license_key: "{{ lookup('aws_ssm', 'newrelic_license_key', region='us-east-1' ) }}" +sumo_collector_url: "{{ lookup('aws_ssm', 'sumo_collector_url', region='us-east-1' ) }}" +app_name: bento-{{platform}}-{{tier}} +#hostname: "{{ansible_fqdn}}" \ No newline at end of file diff --git a/ansible/roles/bento-icdc/.travis.yml b/ansible/roles/bento-icdc/.travis.yml new file mode 100644 index 000000000..36bbf6208 --- /dev/null +++ b/ansible/roles/bento-icdc/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/ansible/roles/bento-icdc/README.md b/ansible/roles/bento-icdc/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/bento-icdc/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/bento-icdc/defaults/main.yml b/ansible/roles/bento-icdc/defaults/main.yml new file mode 100644 index 000000000..d3746df26 --- /dev/null +++ b/ansible/roles/bento-icdc/defaults/main.yml @@ -0,0 +1,12 @@ +--- +# defaults file for cicd +platform: aws +release: fail +project: icdc +# jq: "{{project}}.{{tier}}" +# alb_dns_name: +# icdc: +# dev: internal-dev-a-appli-caninedata-8uhlkjyn-1830173970.us-east-1.elb.amazonaws.com +# qa: internal-qa-ca-appli-6p2vxovvw4sd-1727805720.us-east-1.elb.amazonaws.com + + diff --git a/ansible/roles/bento-icdc/files/inject.template.js b/ansible/roles/bento-icdc/files/inject.template.js new file mode 100644 index 000000000..da2b7d199 --- /dev/null +++ b/ansible/roles/bento-icdc/files/inject.template.js @@ -0,0 +1,14 @@ +window.injectedEnv = { + REACT_APP_BACKEND_GETUSERINFO_API: '${REACT_APP_BACKEND_GETUSERINFO_API}', + REACT_APP_LOGIN_URL: '${REACT_APP_LOGIN_URL}', + REACT_APP_USER_LOGOUT_URL: '${REACT_APP_USER_LOGOUT_URL}', + REACT_APP_BACKEND_API: '${REACT_APP_BACKEND_API}', + REACT_APP_ABOUT_CONTENT_URL: '${REACT_APP_ABOUT_CONTENT_URL}', + REACT_APP_BE_VERSION: '${REACT_APP_BE_VERSION}', + REACT_APP_FE_VERSION: '${REACT_APP_FE_VERSION}', + REACT_APP_APPLICATION_VERSION: '${REACT_APP_APPLICATION_VERSION}', + REACT_APP_GA_TRACKING_ID: '${REACT_APP_GA_TRACKING_ID}', + REACT_APP_FILE_SERVICE_API: '${REACT_APP_FILE_SERVICE_API}', + REACT_APP_DATA_MODEL: '${REACT_APP_DATA_MODEL}', + REACT_APP_DATA_MODEL_PROPS: '${REACT_APP_DATA_MODEL_PROPS}', +}; diff --git a/ansible/roles/bento-icdc/files/nginx-entrypoint.sh b/ansible/roles/bento-icdc/files/nginx-entrypoint.sh new file mode 100755 index 000000000..70875490e --- /dev/null +++ b/ansible/roles/bento-icdc/files/nginx-entrypoint.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +WWW_DIR=/usr/share/nginx/html +INJECT_FILE_SRC="${WWW_DIR}/inject.template.js" +INJECT_FILE_DST="${WWW_DIR}/injectEnv.js" +envsubst < "${INJECT_FILE_SRC}" > "${INJECT_FILE_DST}" + +[ -z "$@" ] && nginx -g 'daemon off;' || $@ \ No newline at end of file diff --git a/ansible/roles/bento-icdc/handlers/main.yml b/ansible/roles/bento-icdc/handlers/main.yml new file mode 100644 index 000000000..3c94d4172 --- /dev/null +++ b/ansible/roles/bento-icdc/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for cicd \ No newline at end of file diff --git a/ansible/roles/bento-icdc/meta/main.yml b/ansible/roles/bento-icdc/meta/main.yml new file mode 100644 index 000000000..227ad9c34 --- /dev/null +++ b/ansible/roles/bento-icdc/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.9 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/ansible/roles/bento-icdc/tasks/build.yml b/ansible/roles/bento-icdc/tasks/build.yml new file mode 100644 index 000000000..59f52cce4 --- /dev/null +++ b/ansible/roles/bento-icdc/tasks/build.yml @@ -0,0 +1,273 @@ +--- +- name: create docker build directory + file: + path: "{{workspace}}/build" + state : directory + +- name: set sandbox environment facts + set_fact: + neo4j_ip: "{{ lookup('env','NEO4J_ICDC_SANDBOX_IP') }}" + bearer: "{{ lookup('env','BEARER') }}" + when: tier == "sandbox" + +- name: set dev environment facts + set_fact: + neo4j_ip: "{{ lookup('env','NEO4J_ICDC_DEV_IP') }}" + bearer: "{{ lookup('env','BEARER') }}" + when: tier == "dev" + +- name: set qa environment facts + set_fact: + neo4j_ip: "{{ lookup('env','NEO4J_ICDC_QA_IP') }}" + bearer: "{{ lookup('env','BEARER') }}" + when: tier == "qa" + +- name: set stage environment facts + set_fact: + neo4j_ip: "{{ lookup('env','NEO4J_ICDC_STAGE_IP') }}" + bearer: "{{ lookup('env','BEARER') }}" + when: tier == "stage" + +- name: set prod environment facts + set_fact: + neo4j_ip: "{{ lookup('env','NEO4J_ICDC_PROD_IP') }}" + bearer: "{{ lookup('env','BEARER') }}" + when: tier == "prod" + +################################################################################ + + + +################################################################################## + +- name: get backend commit ID + shell: git rev-parse HEAD + args: + chdir: "{{workspace}}" + register: backend_tag + +- name: get frontend commit ID + shell: git rev-parse HEAD + args: + chdir: "{{workspace}}/icdc-frontend" + register: frontend_tag + +- name: echo backend tag + debug: + msg: "{{ backend_tag.stdout_lines }}" + +- name: echo frontend tag + debug: + msg: "{{ frontend_tag.stdout_lines }}" + +################################################################################## + +- name: remove the application_example.properties file + file: + path: "{{workspace}}/src/main/resources/application_example.properties" + state: absent + +- name: copy application.properties file to /src/main/resources/ + template: + src: "{{workspace}}/src/main/resources/application.properties.j2" + dest: "{{workspace}}/src/main/resources/application.properties" + +- name: create graphql directory in backend + file: + state: directory + path: "{{workspace}}/src/main/resources/graphql" + +- name: create yaml directory in backend + file: + state: directory + path: "{{workspace}}/src/main/resources/yaml" + +- name: copy schema from frontend to resources + template: + src: "{{workspace}}/icdc-frontend/graphql/{{ schema_file }}" + dest: "{{workspace}}/src/main/resources/graphql/{{ schema_file }}" + +- name: verify redis schema file exists + stat: + path: "{{workspace}}/icdc-frontend/graphql/{{ redis_schema_file }}" + register: redis_schema + +- name: verify redis init queries file exists + stat: + path: "{{workspace}}/icdc-frontend/yaml/{{ redis_init_queries_file }}" + register: redis_queries + +- name: verify test queries file exists + stat: + path: "{{workspace}}/icdc-frontend/yaml/{{ test_queries_file }}" + register: test_queries + +- name: copy redis schema from frontend to resources + template: + src: "{{workspace}}/icdc-frontend/graphql/{{ redis_schema_file }}" + dest: "{{workspace}}/src/main/resources/graphql/{{ redis_schema_file }}" + when: redis_schema.stat.exists + +- name: copy redis init queries from frontend to resources + template: + src: "{{workspace}}/icdc-frontend/yaml/{{ redis_init_queries_file }}" + dest: "{{workspace}}/src/main/resources/yaml/{{ redis_init_queries_file }}" + when: redis_queries.stat.exists + +- name: copy test queries from frontend to resources + template: + src: "{{workspace}}/icdc-frontend/yaml/{{ test_queries_file }}" + dest: "{{workspace}}/src/main/resources/yaml/{{ test_queries_file }}" + when: test_queries.stat.exists + +- name: build springboot code + command: mvn package -DskipTests + args: + chdir: "{{workspace}}" +- name: copy Bento-0.0.1.war to api.war + copy: + remote_src: yes + src: "{{workspace}}/target/Bento-0.0.1.war" + dest: "{{workspace}}/target/ROOT.war" + + + +- name: log into ncidockerhub + docker_login: + username: "{{docker_user}}" + password: "{{docker_password}}" + registry: https://ncidockerhub.nci.nih.gov + tls: yes + ca_cert: /local/home/commonsdocker/.docker/ca.pem + client_cert: /local/home/commonsdocker/.docker/jenkinscert.pem + client_key: /local/home/commonsdocker/.docker/jenkinskey.pem + tls_hostname: "{{tls_hostname}}" + docker_host: "{{docker_host}}" + + +- name: build cbiitssrepo/icdc-backend image + docker_image: + build: + path: "{{workspace}}" + dockerfile: "{{workspace}}/icdc-devops/docker/dockerfiles/backend-icdc-dockerfile" + pull: yes + nocache: yes + name: ncidockerhub.nci.nih.gov/icdc/icdc-backend + + tag: "{{backend_version}}" + push: yes + force_source: yes + source: build + tls: yes + ca_cert: /local/home/commonsdocker/.docker/ca.pem + client_cert: /local/home/commonsdocker/.docker/jenkinscert.pem + client_key: /local/home/commonsdocker/.docker/jenkinskey.pem + tls_hostname: "{{tls_hostname}}" + docker_host: "{{docker_host}}" + +- name: Add tag latest to cbiitssrepo/icdc-backend image + docker_image: + name: "ncidockerhub.nci.nih.gov/icdc/icdc-backend:{{backend_version}}" + repository: ncidockerhub.nci.nih.gov/icdc/icdc-backend:latest + force_tag: yes + push: yes + source: local + tls: yes + ca_cert: /local/home/commonsdocker/.docker/ca.pem + client_cert: /local/home/commonsdocker/.docker/jenkinscert.pem + client_key: /local/home/commonsdocker/.docker/jenkinskey.pem + tls_hostname: "{{tls_hostname}}" + docker_host: "{{docker_host}}" + +- name: remove .env + file: + state: absent + path: "{{workspace}}/icdc-frontend/.env" + +- name: copy nginx conf + copy: + remote_src: yes + src: '{{workspace}}/icdc-devops/docker/dockerfiles/icdc-nginx.conf' + dest: '{{workspace}}/build/nginx.conf' + +#- name: copy nginx conf +# copy: +# remote_src: yes +# src: '{{workspace}}/icdc-devops/docker/dockerfiles/icdc-nginx.conf' +# dest: '{{workspace}}/icdc-frontend/nginx.conf' + +- name: copy entrypoint.sh to workspace + copy: + src: "nginx-entrypoint.sh" + dest: "{{workspace}}/build/nginx-entrypoint.sh" + mode: 0755 + +#- name: copy entrypoint.sh to workspace +# copy: +# src: "nginx-entrypoint.sh" +# dest: "{{workspace}}/icdc-frontend/nginx-entrypoint.sh" +# mode: 0755 + +- name: run npm install in {{workspace}}/icdc-frontend/ + command: "{{item}}" + args: + chdir: "{{workspace}}/icdc-frontend" + loop: + - npm install npm@7.7.6 -g + - npm install + - npm run build + +- name: move dist from frontend to temp build + copy: + src: "{{ container_build_path }}/dist" + dest: "{{workspace}}/build" + remote_src: yes + directory_mode: yes + +- name: copy env to dist + copy: + src: inject.template.js + dest: "{{workspace}}/build/dist/inject.template.js" + mode: 0755 + +#- name: copy env to dist +# copy: +# # remote_src: yes +# src: inject.template.js +# dest: "{{workspace}}/icdc-frontend/dist/inject.template.js" +# mode: 0755 + +- name: build cbiitssrepo/icdc-frontend image + docker_image: + build: + path: "{{workspace}}/build" + dockerfile: "{{workspace}}/icdc-devops/docker/dockerfiles/frontend-icdc-dockerfile" + pull: yes + nocache: yes + name: ncidockerhub.nci.nih.gov/icdc/icdc-frontend + + tag: "{{frontend_version}}" + push: yes + force_source: yes + source: build + tls: yes + ca_cert: /local/home/commonsdocker/.docker/ca.pem + client_cert: /local/home/commonsdocker/.docker/jenkinscert.pem + client_key: /local/home/commonsdocker/.docker/jenkinskey.pem + tls_hostname: "{{tls_hostname}}" + docker_host: "{{docker_host}}" + +- name: Add tag latest to cbiitssrepo/icdc-frontend image + docker_image: + name: "ncidockerhub.nci.nih.gov/icdc/icdc-frontend:{{frontend_version}}" + repository: ncidockerhub.nci.nih.gov/icdc/icdc-frontend:latest + force_tag: yes + push: yes + source: local + tls: yes + ca_cert: /local/home/commonsdocker/.docker/ca.pem + client_cert: /local/home/commonsdocker/.docker/jenkinscert.pem + client_key: /local/home/commonsdocker/.docker/jenkinskey.pem + tls_hostname: "{{tls_hostname}}" + docker_host: "{{docker_host}}" + diff --git a/ansible/roles/bento-icdc/tasks/deploy.yml b/ansible/roles/bento-icdc/tasks/deploy.yml new file mode 100644 index 000000000..d2d094eaf --- /dev/null +++ b/ansible/roles/bento-icdc/tasks/deploy.yml @@ -0,0 +1,79 @@ +--- +- name: log into DockerHub + docker_login: + username: "{{ docker_user }}" + password: "{{ docker_password }}" + registry: https://ncidockerhub.nci.nih.gov + +- name: verify host + file: + path: /tmp/myworkspace + state: directory + + +################################################################### + +# - name: remove frontend container +# docker_container: +# name: frontend +# state: absent + +# - name: remove backend container +# docker_container: +# name: backend +# state: absent + +- name: clean up + shell: docker system prune -a -f + +- name: ensure log and docker directory exists + file: + path: "{{item}}" + state: directory + loop: + - /local/content/docker + - /local/content/k9dc/file-downloader + - /local/content/k9dc/nginx + +- name: update services and compose files + template: + src: "{{item.src}}" + dest: "{{item.dest}}" + loop: + - {src: 'app.yml.j2',dest: '/local/content/docker/app.yml'} + - {src: 'app.service.j2',dest: '/etc/systemd/system/app.service'} + - {src: 'app.timer.j2',dest: '/etc/systemd/system/app.timer'} + +- name: start frontend and backend containers + docker_compose: + project_src: /local/content/docker + files: app.yml + state: present + +- name: reload systemd + systemd: + daemon_reload: yes + +- name: enable log-agents and app.timer services + service: + name: app.timer + enabled: yes + +- name: ensure app.service is disabled + service: + name: app.service + enabled: no + + +- name: "wait for {{ frontend_url }} to become available" + uri: + url: "{{ frontend_url }}" + follow_redirects: none + method: GET + register: _result + until: ('status' in _result) and (_result.status == 200) + retries: 100 + delay: 10 + + + diff --git a/ansible/roles/bento-icdc/tasks/fail-build.yml b/ansible/roles/bento-icdc/tasks/fail-build.yml new file mode 100644 index 000000000..17b3b25b7 --- /dev/null +++ b/ansible/roles/bento-icdc/tasks/fail-build.yml @@ -0,0 +1,51 @@ +- name: log into DockerHub + docker_login: + username: "{{ docker_user }}" + password: "{{ docker_password }}" + registry: https://ncidockerhub.nci.nih.gov + +# - name: remove old deployment +# docker_compose: +# project_src: /local/content/docker +# files: log-agents.yml +# state: absent + +# - name: remove old deployment +# docker_compose: +# project_src: /local/content/docker +# files: app.yml +# state: absent + +- name: check if prev_app.yml exist + stat: + path: "/local/content/docker/prev_app.yml" + register: prev_app + + +- name: rename prev_app.yml app.yml file + copy: + src: "/local/content/docker/prev_app.yml" + dest: "/local/content/docker/app.yml" + remote_src: yes + when: prev_app.stat.exists or release == "fail" + +- name: start frontend and backend containers + docker_compose: + project_src: /local/content/docker + files: app.yml + state: present + +- name: Pause to allow updates to process + pause: + seconds: 15 + +- name: "wait for {{ frontend_url }} to become available" + uri: + url: "{{ frontend_url }}" + follow_redirects: none + method: GET + register: _result + until: ('status' in _result) and (_result.status == 200) + retries: 100 + delay: 10 + diff --git a/ansible/roles/bento-icdc/tasks/main.yml b/ansible/roles/bento-icdc/tasks/main.yml new file mode 100644 index 000000000..e69de29bb diff --git a/ansible/roles/bento-icdc/tasks/pass-build.yml b/ansible/roles/bento-icdc/tasks/pass-build.yml new file mode 100644 index 000000000..6ab460219 --- /dev/null +++ b/ansible/roles/bento-icdc/tasks/pass-build.yml @@ -0,0 +1,18 @@ + +- name: ensure /local/content/docker directory exist + file: + path: /local/content/docker + state: directory + +- name: check if app.yml exist + stat: + path: "/local/content/docker/app.yml" + register: prev_app + + +- name: save previous app.yml file + copy: + src: "/local/content/docker/app.yml" + dest: "/local/content/docker/prev_app.yml" + remote_src: yes + when: not prev_app.stat.exists or release == "pass" diff --git a/ansible/roles/bento-icdc/tasks/stop_site.yml b/ansible/roles/bento-icdc/tasks/stop_site.yml new file mode 100644 index 000000000..068d32ca1 --- /dev/null +++ b/ansible/roles/bento-icdc/tasks/stop_site.yml @@ -0,0 +1,41 @@ +--- + +- name: verify host + file: + path: /tmp/myworkspace + state: directory + +- name: clean up + shell: docker system prune -a -f + +- name: ensure /local/content/docker exists + file: + path: /local/content/docker + state: directory + + +##### UPDATED TO REMOVE ALL CONTAINERS ##### +#- name: start log agents +- name: stop log agents + docker_compose: + project_src: /local/content/docker + files: log-agents.yml + #state: present + state: absent + + +#- name: start frontend and backend containers +- name: stop frontend and backend containers + docker_compose: + project_src: /local/content/docker + files: app.yml + #state: present + state: absent + +- name: get docker output + shell: docker ps -a + register: docker_out + +- name: echo docker output + debug: + msg: "{{ docker_out.stdout_lines }}" \ No newline at end of file diff --git a/ansible/roles/bento-icdc/templates/app.service.j2 b/ansible/roles/bento-icdc/templates/app.service.j2 new file mode 100644 index 000000000..8860dae1d --- /dev/null +++ b/ansible/roles/bento-icdc/templates/app.service.j2 @@ -0,0 +1,19 @@ +[Unit] +Description=Docker Compose Application Service +Requires=docker.service +After=docker.service + +[Service] +Type=oneshot +RemainAfterExit=yes +WorkingDirectory=/local/content/docker + +#User=tomcat +#Group=tomcat + +ExecStart=/usr/bin/docker-compose -f app.yml up -d +ExecStop=/usr/bin/docker-compose -f app.yml down +#TimeoutStartSec=60 + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/ansible/roles/bento-icdc/templates/app.timer.j2 b/ansible/roles/bento-icdc/templates/app.timer.j2 new file mode 100644 index 000000000..60d0752c6 --- /dev/null +++ b/ansible/roles/bento-icdc/templates/app.timer.j2 @@ -0,0 +1,8 @@ +[Unit] +Description=Run foo weekly and on boot + +[Timer] +OnStartupSec=120 + +[Install] +WantedBy=timers.target \ No newline at end of file diff --git a/ansible/roles/bento-icdc/templates/app.yml.j2 b/ansible/roles/bento-icdc/templates/app.yml.j2 new file mode 100644 index 000000000..2576a7858 --- /dev/null +++ b/ansible/roles/bento-icdc/templates/app.yml.j2 @@ -0,0 +1,81 @@ +version: '3.4' +services: + +################################################ +# backend container +################################################ + bento-backend: + container_name: backend + image: ncidockerhub.nci.nih.gov/icdc/icdc-backend:{{backend_version}} + environment: + NEO4J_URL: bolt://{{ neo4j_ip }}:7687 + NEO4J_USER: "{{ neo4j_user }}" + NEO4J_PASSWORD: "{{ neo4j_password }}" + NEO4J_GRAPHQL_ENDPOINT: http://{{ neo4j_ip }}:7474/graphql/ + NEO4J_AUTHORIZATION: "{{ neo4j_bearer }}" + BENTO_API_VERSION: "{{backend_version}}" + REDIS_ENABLE: "{{ enable_redis }}" + REDIS_USE_CLUSTER: "{{ use_cluster }}" + REDIS_HOST: "{{ redis_host[tier] }}" + REDIS_PORT: "{{ redis_port }}" + REDIS_FILTER_ENABLE: "{{ enable_redis_filter }}" + NEW_RELIC_LICENSE_KEY: "{{ newrelic_license_key }}" + NEW_RELIC_APP_NAME: "{{ app_name }}-backend" + NEW_RELIC_DISTRIBUTED_TRACING_ENABLED: "true" + NEW_RELIC_HOST: "gov-collector.newrelic.com" + NEW_RELIC_LOG_FILE_NAME: "STDOUT" + NEW_RELIC_LABELS: "Project:{{ project }};Environment:{{ tier }}" + JAVA_OPTS: "-javaagent:/usr/local/tomcat/newrelic/newrelic.jar" + entrypoint: ["/bin/ash", "-c", 'if [ ! -f /usr/local/tomcat/newrelic/newrelic.jar ]; then wget "https://download.newrelic.com/newrelic/java-agent/newrelic-agent/current/newrelic-java.zip" -O newrelic-java.zip && unzip newrelic-java.zip; fi && bin/catalina.sh run'] + volumes: + - /local/content/k9dc/logs:/usr/local/tomcat/logs + ports: + - "8080:8080" + restart: always + +########################################################## +# frontend container +########################################################## + + bento-frontend: + container_name: frontend + image: ncidockerhub.nci.nih.gov/icdc/icdc-frontend:{{frontend_version}} + environment: + REACT_APP_BACKEND_GETUSERINFO_API: "{{ backend_user_info }}" + REACT_APP_LOGIN_URL: "{{ backend_fence_login }}" + REACT_APP_USER_LOGOUT_URL: "{{ backend_fence_logout }}" + REACT_APP_BACKEND_API: "{% if tier == 'prod' %}https://caninecommons.cancer.gov/v1/graphql/{% else %}https://caninecommons-{{ tier }}.cancer.gov/v1/graphql/{% endif %}" + REACT_APP_ABOUT_CONTENT_URL: "{{ backend_content_url }}" + REACT_APP_BE_VERSION: "{{ bento_api_version }}" + REACT_APP_FE_VERSION: "{{ backend_frontend_version }}" + REACT_APP_GA_TRACKING_ID: "{{ backend_google_analytics_id }}" + NEW_RELIC_LICENSE_KEY: "{{ newrelic_license_key }}" + NEW_RELIC_APP_NAME: "{{ app_name }}-frontend" + NEW_RELIC_DISTRIBUTED_TRACING_ENABLED: "true" + NEW_RELIC_HOST: "gov-collector.newrelic.com" + NEW_RELIC_NO_CONFIG_FILE: "true" + REACT_APP_DATA_MODEL: "{% if tier == 'prod' %}https://raw.githubusercontent.com/CBIIT/icdc-model-tool/{{data_model_branch | default('master')}}/model-desc/icdc-model.yml{% else %}https://raw.githubusercontent.com/CBIIT/icdc-model-tool/{{data_model_branch | default('develop')}}/model-desc/icdc-model.yml{% endif %}" + REACT_APP_DATA_MODEL_PROPS: "{% if tier == 'prod' %}https://raw.githubusercontent.com/CBIIT/icdc-model-tool/{{data_model_branch | default('master')}}/model-desc/icdc-model-props.yml{% else %}https://raw.githubusercontent.com/CBIIT/icdc-model-tool/{{data_model_branch | default('develop')}}/model-desc/icdc-model-props.yml{% endif %}" + NEW_RELIC_LABELS: "Project:{{ project }};Environment:{{ tier }}" + REACT_APP_FILE_SERVICE_API: "{% if tier == 'prod' %}https://caninecommons.cancer.gov/api/files/{% else %}https://caninecommons-{{ tier }}.cancer.gov/api/files/{% endif %}" + volumes: + - "/local/content/k9dc/nginx:/var/log/nginx" + ports: + - "80:80" + restart: always + #links: + # - "data-dictionary:dictionary" + + #data-dictionary: + # container_name: data-dictionary + # image: ncidockerhub.nci.nih.gov/icdc/data-dictionary:changedBaseRoute-28 + # environment: + # REACT_APP_MODEL_URL: "{{react_app_model_url}}" + # REACT_APP_MODEL_PROPS_URL: "{{react_app_model_props_url}}" + # volumes: + # - "/local/content/k9dc/data-dictionary:/var/log/nginx" + # ports: + # - "81:81" + # restart: always + + diff --git a/ansible/roles/bento-icdc/templates/backend.env.j2 b/ansible/roles/bento-icdc/templates/backend.env.j2 new file mode 100644 index 000000000..8b6e84bf4 --- /dev/null +++ b/ansible/roles/bento-icdc/templates/backend.env.j2 @@ -0,0 +1,6 @@ +NEW_RELIC_LICENSE_KEY={{ newrelic_license_key }} +NEW_RELIC_APP_NAME={{ app_name }}-backend-{{ inventory_hostname }} +NEW_RELIC_DISTRIBUTED_TRACING_ENABLED=true +NEW_RELIC_HOST=gov-collector.newrelic.com +NEW_RELIC_LOG_FILE_NAME=STDOUT +JAVA_OPTS="-javaagent:/usr/local/tomcat/newrelic/newrelic.jar" \ No newline at end of file diff --git a/ansible/roles/bento-icdc/templates/docker-compose.yml.j2 b/ansible/roles/bento-icdc/templates/docker-compose.yml.j2 new file mode 100644 index 000000000..f76d1550b --- /dev/null +++ b/ansible/roles/bento-icdc/templates/docker-compose.yml.j2 @@ -0,0 +1,120 @@ + +version: '3.4' +services: + +################################################################# +# syslog container +################################################################ + + sumologic-syslog: + container_name: sumologic-syslog + image: sumologic/collector:latest-syslog + environment: + SUMO_COLLECTOR_NAME: "{{ app_name }}-syslog-{{ inventory_hostname }}" + SUMO_ACCESS_ID: "{{ sumo_access_id }}" + SUMO_ACCESS_KEY: "{{ sumo_access_key }}" + SUMO_COLLECTOR_NAME_PREFIX: "" + SUMO_CLOBBER: "true" + restart: always + ports: + - "514:514" + +###################################################################### +# sumologic container +###################################################################### + + sumologic-docker: + container_name: sumologic-docker + image: sumologic/collector:latest + environment: + SUMO_COLLECTOR_NAME: "{{ app_name }}-docker-{{ inventory_hostname }}" + SUMO_ACCESS_ID: "{{ sumo_access_id }}" + SUMO_ACCESS_KEY: "{{ sumo_access_key }}" + SUMO_COLLECTOR_NAME_PREFIX: "" + SUMO_CLOBBER: "true" + restart: always + volumes: + - "/var/run/docker.sock:/var/run/docker.sock" +######################################################################## +# new relic container +######################################################################### + + newrelic-docker: + container_name: newrelic-docker + image: newrelic/infrastructure:latest + environment: + NRIA_LICENSE_KEY: "{{ newrelic_license_key }}" + NRIA_DISPLAY_NAME: "{{ app_name }}-docker-{{ inventory_hostname }}" + NEW_RELIC_HOST: "gov-collector.newrelic.com" + restart: always + volumes: + - "/var/run/docker.sock:/var/run/docker.sock" + - "/:/host" + +################################################ +# backend container +################################################ + bento-backend: + container_name: backend + image: ncidockerhub.nci.nih.gov/icdc/icdc-backend:{{build_number}} + environment: + NEW_RELIC_LICENSE_KEY: "{{ newrelic_license_key }}" + NEW_RELIC_APP_NAME: "{{ app_name }}-backend-{{ inventory_hostname }}" + NEW_RELIC_DISTRIBUTED_TRACING_ENABLED: "true" + NEW_RELIC_HOST: "gov-collector.newrelic.com" + NEW_RELIC_LOG_FILE_NAME: "STDOUT" + JAVA_OPTS: "-javaagent:/usr/local/tomcat/newrelic/newrelic.jar" + + command: sh -c "dockerize -wait tcp://sumologic-syslog:514 -timeout 300s -wait-retry-interval 30s /bin/catalina.sh run" + ports: + - "8080:8080" + depends_on: + - sumologic-syslog + logging: + driver: syslog + options: + syslog-format: "rfc5424micro" + syslog-address: tcp://{{ syslog_host }}:514 + tag: "{{ app_name }}-backend" + restart: always + +########################################################## +# frontend container +########################################################## + + bento-frontend: + container_name: frontend + image: ncidockerhub.nci.nih.gov/icdc/icdc-frontend:{{ build_number }} + environment: + REACT_APP_BACKEND_GETUSERINFO_API: "{{ backend_user_info }}" + REACT_APP_LOGIN_URL: "{{ backend_fence_login }}" + REACT_APP_USER_LOGOUT_URL: "{{ backend_fence_logout }}" + REACT_APP_BACKEND_API: "{% if tier == 'prod' %}https://caninecommons.cancer.gov/v1/graphql/{% else %}https://caninecommons-{{ tier }}.cancer.gov/v1/graphql/{% endif %}" + REACT_APP_ABOUT_CONTENT_URL: "{{ backend_content_url }}" + REACT_APP_BE_VERSION: "{{ bento_api_version }}" + REACT_APP_FE_VERSION: "{{ backend_frontend_version }}" + REACT_APP_GA_TRACKING_ID: "{{ backend_google_analytics_id }}" + NEW_RELIC_LICENSE_KEY: "{{ newrelic_license_key }}" + NEW_RELIC_APP_NAME: "{{ app_name }}-frontend-{{ inventory_hostname }}" + NEW_RELIC_DISTRIBUTED_TRACING_ENABLED: "true" + NEW_RELIC_HOST: "gov-collector.newrelic.com" + NEW_RELIC_NO_CONFIG_FILE: "true" + command: sh -c "dockerize -wait tcp://sumologic-syslog:514 -timeout 300s -wait-retry-interval 30s /nginx-entrypoint.sh" + logging: + driver: syslog + options: + syslog-format: "rfc5424micro" + syslog-address: tcp://{{ syslog_host }}:514 + tag: "{{ app_name }}-frontend" + restart: always + ports: + - "80:80" + depends_on: + - sumologic-syslog + + + + + + + diff --git a/ansible/roles/bento-icdc/templates/env.j2 b/ansible/roles/bento-icdc/templates/env.j2 new file mode 100644 index 000000000..e9655e511 --- /dev/null +++ b/ansible/roles/bento-icdc/templates/env.j2 @@ -0,0 +1,22 @@ +{% if tier == "prod" %} +REACT_APP_BACKEND_GETUSERINFO_API=https://caninecommons.cancer.gov/fence/login/ +REACT_APP_LOGIN_URL=https://nci-crdc-staging.datacommons.io/user/oauth2/authorize?client_id={{fence_id}}&response_type=code&redirect_uri=https%3A%2F%2Fcaninecommons.cancer.gov%2F&scope=openid%20user +REACT_APP_USER_LOGOUT_URL=https://caninecommons.cancer.gov/fence/logout +REACT_APP_BACKEND_API=https://caninecommons.cancer.gov/v1/graphql/ +REACT_APP_ABOUT_CONTENT_URL=https://raw.githubusercontent.com/CBIIT/icdc-codebase/master/src/main/frontend/src/content/{{tier}}/aboutPagesContent.yaml +REACT_APP_BE_VERSION={{backend_version}} +REACT_APP_FE_VERSION={{frontend_version}} +REACT_APP_GA_TRACKING_ID=UA-154442677-1 + +{% else %} +REACT_APP_BACKEND_GETUSERINFO_API=https://caninecommons-{{tier}}.cancer.gov/fence/login/ +REACT_APP_LOGIN_URL=https://nci-crdc-staging.datacommons.io/user/oauth2/authorize?client_id={{fence_id}}&response_type=code&redirect_uri=https%3A%2F%2Fcaninecommons-{{tier}}.cancer.gov%2F&scope=openid%20user +REACT_APP_USER_LOGOUT_URL=https://caninecommons-{{tier}}.cancer.gov/fence/logout +REACT_APP_BACKEND_API=https://caninecommons-{{tier}}.cancer.gov/v1/graphql/ +REACT_APP_ABOUT_CONTENT_URL=https://raw.githubusercontent.com/CBIIT/icdc-codebase/master/src/main/frontend/src/content/{{tier}}/aboutPagesContent.yaml +REACT_APP_BE_VERSION={{backend_version}} +REACT_APP_FE_VERSION={{frontend_version}} +REACT_APP_GA_TRACKING_ID=UA-154442677-1 +{% endif %} + + diff --git a/ansible/roles/bento-icdc/templates/frontend.env.j2 b/ansible/roles/bento-icdc/templates/frontend.env.j2 new file mode 100644 index 000000000..21228cd94 --- /dev/null +++ b/ansible/roles/bento-icdc/templates/frontend.env.j2 @@ -0,0 +1,13 @@ +REACT_APP_BACKEND_GETUSERINFO_API={{ backend_user_info }} +REACT_APP_LOGIN_URL={{ backend_fence_login }} +REACT_APP_USER_LOGOUT_URL={{ backend_fence_logout }} +REACT_APP_BACKEND_API={% if tier == 'prod' %}https://caninecommons.cancer.gov/v1/graphql/{% else %}https://caninecommons-{{ tier }}.cancer.gov/v1/graphql/{% endif %} +REACT_APP_ABOUT_CONTENT_URL={{ backend_content_url }} +REACT_APP_BE_VERSION={{ bento_api_version }} +REACT_APP_FE_VERSION={{ backend_frontend_version }} +REACT_APP_GA_TRACKING_ID={{ backend_google_analytics_id }} +NEW_RELIC_LICENSE_KEY={{ newrelic_license_key }} +NEW_RELIC_APP_NAME={{ app_name }}-frontend-{{ inventory_hostname }} +NEW_RELIC_DISTRIBUTED_TRACING_ENABLED=true +NEW_RELIC_HOST=gov-collector.newrelic.com +NEW_RELIC_NO_CONFIG_FILE=true \ No newline at end of file diff --git a/ansible/roles/bento-icdc/templates/log-agents.service.j2 b/ansible/roles/bento-icdc/templates/log-agents.service.j2 new file mode 100644 index 000000000..7b47b8a4e --- /dev/null +++ b/ansible/roles/bento-icdc/templates/log-agents.service.j2 @@ -0,0 +1,19 @@ +[Unit] +Description=Docker Compose Application Service +Requires=docker.service +After=docker.service + +[Service] +Type=oneshot +RemainAfterExit=yes +WorkingDirectory=/local/content/docker + +#User=tomcat +#Group=tomcat + +ExecStart=/usr/bin/docker-compose -f log-agents.yml up -d +ExecStop=/usr/bin/docker-compose -f log-agents.yml down +#TimeoutStartSec=60 + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/ansible/roles/bento-icdc/templates/log-agents.yml.j2 b/ansible/roles/bento-icdc/templates/log-agents.yml.j2 new file mode 100644 index 000000000..374237bb7 --- /dev/null +++ b/ansible/roles/bento-icdc/templates/log-agents.yml.j2 @@ -0,0 +1,56 @@ + +version: '3.4' +services: + +################################################################# +# syslog container +################################################################ + + sumologic-syslog: + container_name: sumologic-syslog + image: sumologic/collector:latest-syslog + environment: + SUMO_COLLECTOR_NAME: "{{ app_name }}-syslog-{{ inventory_hostname }}" + SUMO_ACCESS_ID: "{{ sumo_access_id }}" + SUMO_ACCESS_KEY: "{{ sumo_access_key }}" + SUMO_COLLECTOR_NAME_PREFIX: "" + SUMO_CLOBBER: "true" + restart: always + ports: + - "514:514" + +###################################################################### +# sumologic container +###################################################################### + + sumologic-docker: + container_name: sumologic-docker + image: sumologic/collector:latest + environment: + SUMO_COLLECTOR_NAME: "{{ app_name }}-docker-{{ inventory_hostname }}" + SUMO_ACCESS_ID: "{{ sumo_access_id }}" + SUMO_ACCESS_KEY: "{{ sumo_access_key }}" + SUMO_COLLECTOR_NAME_PREFIX: "" + SUMO_CLOBBER: "true" + restart: always + volumes: + - "/var/run/docker.sock:/var/run/docker.sock" +######################################################################## +# new relic container +######################################################################### + + newrelic-docker: + container_name: newrelic-docker + image: newrelic/infrastructure:latest + environment: + NRIA_LICENSE_KEY: "{{ newrelic_license_key }}" + NRIA_DISPLAY_NAME: "{{ app_name }}-docker-{{ inventory_hostname }}" + NEW_RELIC_HOST: "gov-collector.newrelic.com" + restart: always + volumes: + - "/var/run/docker.sock:/var/run/docker.sock" + - "/:/host" + +networks: + agents: + driver: bridge \ No newline at end of file diff --git a/ansible/roles/bento-icdc/templates/new-app.yml b/ansible/roles/bento-icdc/templates/new-app.yml new file mode 100644 index 000000000..d07f6d18c --- /dev/null +++ b/ansible/roles/bento-icdc/templates/new-app.yml @@ -0,0 +1,59 @@ + +version: '3.4' +services: + +################################################ +# backend container +################################################ + bento-backend: + container_name: backend + image: ncidockerhub.nci.nih.gov/icdc/icdc-backend:{{build_number}} + environment: + NEW_RELIC_LICENSE_KEY: "{{ newrelic_license_key }}" + NEW_RELIC_APP_NAME: "{{ app_name }}-backend-{{ inventory_hostname }}" + NEW_RELIC_DISTRIBUTED_TRACING_ENABLED: "true" + NEW_RELIC_HOST: "gov-collector.newrelic.com" + NEW_RELIC_LOG_FILE_NAME: "STDOUT" + JAVA_OPTS: "-javaagent:/usr/local/tomcat/newrelic/newrelic.jar" + volumes: + - /local/content/k9dc/logs:/usr/local/tomcat/logs + ports: + - "8080:8080" + #logging: + # driver: syslog + # options: + # syslog-format: "rfc5424micro" + # syslog-address: tcp://{{ syslog_host }}:514 + # tag: "{{ app_name }}-backend" + restart: always + +########################################################## +# frontend container +########################################################## + + bento-frontend: + container_name: frontend + image: ncidockerhub.nci.nih.gov/icdc/icdc-frontend:{{ build_number }} + environment: + REACT_APP_BACKEND_GETUSERINFO_API: "{{ backend_user_info }}" + REACT_APP_LOGIN_URL: "{{ backend_fence_login }}" + REACT_APP_USER_LOGOUT_URL: "{{ backend_fence_logout }}" + REACT_APP_BACKEND_API: "{% if tier == 'prod' %}https://caninecommons.cancer.gov/v1/graphql/{% else %}https://caninecommons-{{ tier }}.cancer.gov/v1/graphql/{% endif %}" + REACT_APP_ABOUT_CONTENT_URL: "{{ backend_content_url }}" + REACT_APP_BE_VERSION: "{{ bento_api_version }}" + REACT_APP_FE_VERSION: "{{ backend_frontend_version }}" + REACT_APP_GA_TRACKING_ID: "{{ backend_google_analytics_id }}" + NEW_RELIC_LICENSE_KEY: "{{ newrelic_license_key }}" + NEW_RELIC_APP_NAME: "{{ app_name }}-frontend-{{ inventory_hostname }}" + NEW_RELIC_DISTRIBUTED_TRACING_ENABLED: "true" + NEW_RELIC_HOST: "gov-collector.newrelic.com" + NEW_RELIC_NO_CONFIG_FILE: "true" + ports: + - "80:80" + #logging: + # driver: syslog + # options: + # syslog-format: "rfc5424micro" + # syslog-address: tcp://{{ syslog_host }}:514 + # tag: "{{ app_name }}-frontend" + restart: always diff --git a/ansible/roles/bento-icdc/templates/new-appy.yml b/ansible/roles/bento-icdc/templates/new-appy.yml new file mode 100644 index 000000000..9d769e06a --- /dev/null +++ b/ansible/roles/bento-icdc/templates/new-appy.yml @@ -0,0 +1,65 @@ + +version: '3.4' +services: + +################################################ +# backend container +################################################ + bento-backend: + container_name: backend + image: ncidockerhub.nci.nih.gov/icdc/icdc-backend:{{backend_version}} + environment: + NEW_RELIC_LICENSE_KEY: "{{ newrelic_license_key }}" + NEW_RELIC_APP_NAME: "{{ app_name }}-backend-{{ inventory_hostname }}" + NEW_RELIC_DISTRIBUTED_TRACING_ENABLED: "true" + NEW_RELIC_HOST: "gov-collector.newrelic.com" + NEW_RELIC_LOG_FILE_NAME: "STDOUT" + JAVA_OPTS: "-javaagent:/usr/local/tomcat/newrelic/newrelic.jar" + ports: + - "8080:8080" + logging: + driver: syslog + options: + syslog-format: "rfc5424micro" + syslog-address: tcp://{{ syslog_host }}:514 + tag: "{{ app_name }}-backend" + restart: always + +########################################################## +# frontend container +########################################################## + + bento-frontend: + container_name: frontend + image: ncidockerhub.nci.nih.gov/icdc/icdc-frontend:{{frontend_version}} + environment: + REACT_APP_BACKEND_GETUSERINFO_API: "{{ backend_user_info }}" + REACT_APP_LOGIN_URL: "{{ backend_fence_login }}" + REACT_APP_USER_LOGOUT_URL: "{{ backend_fence_logout }}" + REACT_APP_BACKEND_API: "{% if tier == 'prod' %}https://caninecommons.cancer.gov/v1/graphql/{% else %}https://caninecommons-{{ tier }}.cancer.gov/v1/graphql/{% endif %}" + REACT_APP_ABOUT_CONTENT_URL: "{{ backend_content_url }}" + REACT_APP_BE_VERSION: "{{ bento_api_version }}" + REACT_APP_FE_VERSION: "{{ backend_frontend_version }}" + REACT_APP_GA_TRACKING_ID: "{{ backend_google_analytics_id }}" + NEW_RELIC_LICENSE_KEY: "{{ newrelic_license_key }}" + NEW_RELIC_APP_NAME: "{{ app_name }}-frontend-{{ inventory_hostname }}" + NEW_RELIC_DISTRIBUTED_TRACING_ENABLED: "true" + NEW_RELIC_HOST: "gov-collector.newrelic.com" + NEW_RELIC_NO_CONFIG_FILE: "true" + logging: + driver: syslog + options: + syslog-format: "rfc5424micro" + syslog-address: tcp://{{ syslog_host }}:514 + tag: "{{ app_name }}-frontend" + restart: always + ports: + - "80:80" + + + + + + + + diff --git a/ansible/roles/bento-icdc/tests/inventory b/ansible/roles/bento-icdc/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/bento-icdc/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/bento-icdc/tests/test.yml b/ansible/roles/bento-icdc/tests/test.yml new file mode 100644 index 000000000..c9b79015b --- /dev/null +++ b/ansible/roles/bento-icdc/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - cicd \ No newline at end of file diff --git a/ansible/roles/bento-icdc/vars/main.yml b/ansible/roles/bento-icdc/vars/main.yml new file mode 100644 index 000000000..2a57d658b --- /dev/null +++ b/ansible/roles/bento-icdc/vars/main.yml @@ -0,0 +1,56 @@ +--- +# vars file for cicd +env_file_home: /local/content/docker +# redis config +enable_redis: 'true' +#enable_redis_filter: "{{ lookup('env','ENABLE_REDIS_FILTER') }}" +use_cluster: 'false' +redis_host: + dev: "10.208.2.75" + qa: "10.208.10.169" + stage: "10.208.18.154" + prod: "10.208.26.156" + demo: "10.208.6.182" +redis_password: "" +redis_port: 6379 +home: /local/content/docker +tier: "{{ lookup('env','TIER') }}" +prod: prod +platform: "cloudone" +backend_git_tag: "{{ lookup('env','BACKEND_GIT_TAG') }}" +frontend_git_tag: "{{ lookup('env','FRONTEND_GIT_TAG') }}" +frontend_version: "{{ lookup('env','FE_VERSION') }}" +docker_host: "{{ lookup('env','DOCKER_HOST') }}" +tls_hostname: "{{ lookup('env','TLS_HOSTNAME') }}" +fence_id: "{{ lookup('env','FENCE_ID') }}" +backend_version: "{{ lookup('env','BE_VERSION') }}" +workspace: "{{ lookup('env','WORKSPACE') }}" +docker_user: "{{ lookup('env','DOCKER_USER') }}" +docker_password: "{{ lookup('env','DOCKER_PASSWORD') }}" +build_number: "{{ lookup('env','BUILD_NUMBER')}}" + +neo4j_ip: "{{ lookup('env','NEO4J_IP') }}" +neo4j_bearer: "{{ lookup('env','BEARER') }}" +neo4j_user: "{{ lookup('env','NEO4J_USER') }}" +neo4j_password: "{{ lookup('env','NEO4J_PASS') }}" +########data-dictionary######## +react_app_model_url: https://raw.githubusercontent.com/CBIIT/icdc-model-tool/master/model-desc/icdc-model.yml +react_app_model_props_url: https://raw.githubusercontent.com/CBIIT/icdc-model-tool/master/model-desc/icdc-model-props.yml +####################### +region: us-east-1 +newrelic_license_key: "{{ lookup('env','NEWRELIC_LIC_KEY')}}" +sumo_access_id: "{{ lookup('env','SUMO_ACCESS_ID')}}" +sumo_access_key: "{{ lookup('env','SUMO_ACCESS_KEY')}}" +# syslog_host: "{{ lookup('env','SYSLOG_HOST')}}" +syslog_host: "{{ ansible_default_ipv4.address|default(ansible_all_ipv4_addresses[0]) }}" +app_name: icdc-{{ platform }}-{{ tier }} +schema_file: icdc.graphql +backend_user_info: https://caninecommons.cancer.gov/fence/login/ +backend_fence_login: https://nci-crdc-staging.datacommons.io/user/oauth2/authorize?client_id={{ fence_id }}&response_type=code&redirect_uri=https%3A%2F%2Fcaninecommons.cancer.gov%2F&scope=openid%20user +backend_fence_logout: https://caninecommons.cancer.gov/fence/logout +backend_content_url: "{% if tier == prod %}https://raw.githubusercontent.com/CBIIT/bento-icdc-frontend/master/src/content/prod/aboutPagesContent.yaml{% else %}https://raw.githubusercontent.com/CBIIT/bento-icdc-frontend/master/src/content/pre-prod/aboutPagesContent.yaml{% endif %}" +bento_api_version: "{{ backend_version }}" +backend_frontend_version: "{{frontend_version }}" +backend_google_analytics_id: UA-154442677-1 +dev_alb_dns_name: internal-dev-a-appli-caninedata-8uhlkjyn-1830173970.us-east-1.elb.amazonaws.com +frontend_url: "{% if tier == 'prod' %}https://caninecommons.cancer.gov/{% else %}https://caninecommons-{{ tier }}.cancer.gov/{% endif %}" diff --git a/ansible/roles/bento-ins/.travis.yml b/ansible/roles/bento-ins/.travis.yml new file mode 100644 index 000000000..36bbf6208 --- /dev/null +++ b/ansible/roles/bento-ins/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/ansible/roles/bento-ins/README.md b/ansible/roles/bento-ins/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/bento-ins/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/bento-ins/defaults/main.yml b/ansible/roles/bento-ins/defaults/main.yml new file mode 100644 index 000000000..fc5714050 --- /dev/null +++ b/ansible/roles/bento-ins/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for cicd diff --git a/ansible/roles/bento-ins/files/inject.template.js b/ansible/roles/bento-ins/files/inject.template.js new file mode 100644 index 000000000..9c7915530 --- /dev/null +++ b/ansible/roles/bento-ins/files/inject.template.js @@ -0,0 +1,11 @@ +window.injectedEnv = { + REACT_APP_BACKEND_GETUSERINFO_API: '${REACT_APP_BACKEND_GETUSERINFO_API}', + REACT_APP_LOGIN_URL: '${REACT_APP_LOGIN_URL}', + REACT_APP_USER_LOGOUT_URL: '${REACT_APP_USER_LOGOUT_URL}', + REACT_APP_BACKEND_API: '${REACT_APP_BACKEND_API}', + REACT_APP_ABOUT_CONTENT_URL: '${REACT_APP_ABOUT_CONTENT_URL}', + REACT_APP_BE_VERSION: '${REACT_APP_BE_VERSION}', + REACT_APP_FE_VERSION: '${REACT_APP_FE_VERSION}', + REACT_APP_APPLICATION_VERSION: '${REACT_APP_APPLICATION_VERSION}', + REACT_APP_GA_TRACKING_ID: '${REACT_APP_GA_TRACKING_ID}', +}; diff --git a/ansible/roles/bento-ins/files/nginx-entrypoint.sh b/ansible/roles/bento-ins/files/nginx-entrypoint.sh new file mode 100755 index 000000000..70875490e --- /dev/null +++ b/ansible/roles/bento-ins/files/nginx-entrypoint.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +WWW_DIR=/usr/share/nginx/html +INJECT_FILE_SRC="${WWW_DIR}/inject.template.js" +INJECT_FILE_DST="${WWW_DIR}/injectEnv.js" +envsubst < "${INJECT_FILE_SRC}" > "${INJECT_FILE_DST}" + +[ -z "$@" ] && nginx -g 'daemon off;' || $@ \ No newline at end of file diff --git a/ansible/roles/bento-ins/handlers/main.yml b/ansible/roles/bento-ins/handlers/main.yml new file mode 100644 index 000000000..3c94d4172 --- /dev/null +++ b/ansible/roles/bento-ins/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for cicd \ No newline at end of file diff --git a/ansible/roles/bento-ins/meta/main.yml b/ansible/roles/bento-ins/meta/main.yml new file mode 100644 index 000000000..227ad9c34 --- /dev/null +++ b/ansible/roles/bento-ins/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.9 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/ansible/roles/bento-ins/tasks/build.yml b/ansible/roles/bento-ins/tasks/build.yml new file mode 100644 index 000000000..d6240fc66 --- /dev/null +++ b/ansible/roles/bento-ins/tasks/build.yml @@ -0,0 +1,180 @@ +--- +- name: set dev environment facts + set_fact: + NEO4J_IN_ip: "{{ lookup('env','NEO4J_IN_DEV_IP') }}" + bearer: "{{ lookup('env','BEARER') }}" + when: tier == "dev" + +- name: set qa environment facts + set_fact: + NEO4J_IN_ip: "{{ lookup('env','NEO4J_IN_QA_IP') }}" + bearer: "{{ lookup('env','BEARER') }}" + when: tier == "qa" + +- name: set stage environment facts + set_fact: + NEO4J_IN_ip: "{{ lookup('env','NEO4J_IN_PERF_IP') }}" + bearer: "{{ lookup('env','BEARER') }}" + when: tier == "perf" + +- name: set prod environment facts + set_fact: + NEO4J_IN_ip: "{{ lookup('env','NEO4J_IN_PROD_IP') }}" + bearer: "{{ lookup('env','BEARER') }}" + when: tier == "prod" + +- name: remove the application_example.properties file + file: + path: "{{workspace}}/src/main/resources/application_example.properties" + state: absent + +- name: copy application.properties file to /src/main/resources/ + template: + src: "{{workspace}}/src/main/resources/application.properties.j2" + dest: "{{workspace}}/src/main/resources/application.properties" + +- name: create graphql directory in backend + file: + state: directory + path: "{{workspace}}/src/main/resources/graphql" + +- name: create yaml directory in backend + file: + state: directory + path: "{{workspace}}/src/main/resources/yaml" + +- name: copy schema from frontend to resources + template: + src: "{{workspace}}/bento-frontend/graphql/{{ schema_file }}" + dest: "{{workspace}}/src/main/resources/graphql/{{ schema_file }}" + +- name: verify redis schema file exists + stat: + path: "{{workspace}}/bento-frontend/graphql/{{ redis_schema_file }}" + register: redis_schema + +- name: verify redis init queries file exists + stat: + path: "{{workspace}}/bento-frontend/yaml/{{ redis_init_queries_file }}" + register: redis_queries + +- name: verify test queries file exists + stat: + path: "{{workspace}}/bento-frontend/yaml/{{ test_queries_file }}" + register: test_queries + +- name: copy redis schema from frontend to resources + template: + src: "{{workspace}}/bento-frontend/graphql/{{ redis_schema_file }}" + dest: "{{workspace}}/src/main/resources/graphql/{{ redis_schema_file }}" + when: redis_schema.stat.exists + +- name: copy redis init queries from frontend to resources + template: + src: "{{workspace}}/bento-frontend/yaml/{{ redis_init_queries_file }}" + dest: "{{workspace}}/src/main/resources/yaml/{{ redis_init_queries_file }}" + when: redis_queries.stat.exists + +- name: copy test queries from frontend to resources + template: + src: "{{workspace}}/bento-frontend/yaml/{{ test_queries_file }}" + dest: "{{workspace}}/src/main/resources/yaml/{{ test_queries_file }}" + when: test_queries.stat.exists + +- name: build springboot code + command: mvn package -DskipTests + args: + chdir: "{{workspace}}" + +- name: copy Bento-0.0.1.war to api.war + copy: + remote_src: yes + src: "{{workspace}}/target/Bento-0.0.1.war" + dest: "{{workspace}}/target/ROOT.war" + +- name: log into DockerHub + docker_login: + username: "{{docker_user}}" + password: "{{docker_password}}" + +- name: build cbiitssrepo/{{stack_name}}-backend image + docker_image: + build: + path: "{{workspace}}" + dockerfile: "{{workspace}}/icdc-devops/docker/dockerfiles/backend-dockerfile" + pull: yes + nocache: yes + name: cbiitssrepo/{{stack_name}}-backend + tag: "{{backend_version}}-{{build_number}}" + push: yes + force_source: yes + source: build + +- name: Add tag latest to cbiitssrepo/in-backend image + docker_image: + name: "cbiitssrepo/{{stack_name}}-backend:{{backend_version}}-{{build_number}}" + repository: cbiitssrepo/{{stack_name}}-backend:latest + force_tag: yes + push: yes + source: local +########################################### +- name: remove .env + file: + state: absent + path: "{{workspace}}/bento-frontend/.env" + +- name: copy nginx conf + copy: + remote_src: yes + src: '{{workspace}}/icdc-devops/docker/dockerfiles/nginx.conf' + dest: '{{workspace}}/bento-frontend/nginx.conf' + +- name: copy entrypoint.sh to workspace + copy: + src: "nginx-entrypoint.sh" + dest: "{{workspace}}/bento-frontend/nginx-entrypoint.sh" + mode: 0755 +# - name: copy environment file to {{workspace}}/bento-frontend +# template: +# src: env.j2 +# dest: "{{workspace}}/bento-frontend/.env" +- name: run npm install in {{workspace}}/bento-frontend/ + command: "{{item}}" + args: + chdir: "{{workspace}}/bento-frontend" + loop: + # - npm install npm@7.7.6 -g + - npm install + - npm run build + +- name: copy env to dist + copy: + # remote_src: yes + src: inject.template.js + dest: "{{workspace}}/bento-frontend/dist/inject.template.js" + mode: 0755 + +- name: build cbiitssrepo/{{stack_name}}-frontend image + docker_image: + build: + path: "{{workspace}}/bento-frontend" + dockerfile: "{{workspace}}/icdc-devops/docker/dockerfiles/frontend-icdc-dockerfile" + pull: yes + nocache: yes + name: cbiitssrepo/{{stack_name}}-frontend + tag: "{{frontend_version}}-{{build_number}}" + push: yes + force_source: yes + source: build + +- name: Add tag latest to cbiitssrepo/in-frontend image + docker_image: + name: "cbiitssrepo/{{stack_name}}-frontend:{{frontend_version}}-{{build_number}}" + repository: cbiitssrepo/{{stack_name}}-frontend:latest + force_tag: yes + push: yes + source: local + + + + \ No newline at end of file diff --git a/ansible/roles/bento-ins/tasks/deploy.yml b/ansible/roles/bento-ins/tasks/deploy.yml new file mode 100644 index 000000000..1eabb42d0 --- /dev/null +++ b/ansible/roles/bento-ins/tasks/deploy.yml @@ -0,0 +1,396 @@ +--- +############################################################################################################################ + +# Task Definitions + +############################################################################################################################ + +- name: create task definition - {{stack_name}}-backend + ecs_taskdefinition: + containers: + - name: backend + essential: true + image: "cbiitssrepo/{{stack_name}}-backend:{{backend_version}}" + environment: + - name: NEO4J_URL + value: bolt://{{ neo4j_ip }}:7687 + - name: NEO4J_USER + value: "{{ neo4j_user }}" + - name: NEO4J_PASSWORD + value: "{{ neo4j_password }}" + - name: NEO4J_GRAPHQL_ENDPOINT + value: http://{{ neo4j_ip }}:7474/graphql/ + - name: NEO4J_AUTHORIZATION + value: "{{ neo4j_bearer }}" + - name: BENTO_API_VERSION + value: "{{backend_version}}" + - name: REDIS_ENABLE + value: "{{ enable_redis }}" + - name: REDIS_USE_CLUSTER + value: "{{ use_cluster }}" + - name: REDIS_HOST + value: "{{ redis_host[tier] }}" + - name: REDIS_PORT + value: "{{ redis_port }}" + - name: REDIS_FILTER_ENABLE + value: "{{ enable_redis_filter }}" + - name: NEW_RELIC_LICENSE_KEY + value: "{{ newrelic_license_key }}" + # - name: NEW_RELIC_APP_NAME + # value: "{{ app_name }}-backend" + # - name: NEW_RELIC_DISTRIBUTED_TRACING_ENABLED + # value: true + # - name: NEW_RELIC_HOST + # value: "gov-collector.newrelic.com" + # - name: NEW_RELIC_LOG_FILE_NAME + # value: "STDOUT" + # - name: JAVA_OPTS + # value: "-javaagent:/usr/local/tomcat/newrelic/newrelic.jar" + # entryPoint: + # - "/bin/ash" + # - "-c" + # - 'wget "https://download.newrelic.com/newrelic/java-agent/newrelic-agent/current/newrelic-java.zip" -O newrelic-java.zip && unzip newrelic-java.zip && bin/catalina.sh run' + portMappings: + - containerPort: "8080" + hostPort: "8080" + # logConfiguration: + # logDriver: syslog + # options: + # syslog-address: tcp://{{ syslog_host }}:514 + # tag: "{{ app_name }}-backend" + # syslog-format: "rfc5424micro" + network_mode: bridge + family: "{{stack_name }}-{{tier}}-backend" + memory: '1024' + cpu: '512' + state: present + region: "{{region}}" + register: task_output + +- name: create task definition - in-frontend + ecs_taskdefinition: + containers: + - name: frontend + essential: true + image: "cbiitssrepo/{{stack_name}}-frontend:{{frontend_version}}" + environment: + - name: REACT_APP_BACKEND_API + value: "{% if tier == 'prod' %}https://api-{{stack_name}}.bento-tools.org/v1/graphql/{% else %}https://api-{{stack_name}}-{{ tier }}.bento-tools.org/v1/graphql/{% endif %}" + - name: REACT_APP_FILE_SERVICE_API + value: "{% if tier == 'prod' %}https://bento-tools.org/api/files/{% else %}https://{{ tier }}.bento-tools.org/api/files/{% endif %}" + - name: REACT_APP_BE_VERSION + value: "{{backend_version}}" + - name: REACT_APP_FE_VERSION + value: "{{frontend_version}}" + - name: REACT_APP_ABOUT_CONTENT_URL + value: "{{ backend_content_url }}" + # - name: NEW_RELIC_LICENSE_KEY + # value: "{{ newrelic_license_key }}" + # - name: NEW_RELIC_APP_NAME + # value: "{{ app_name }}-frontend" + # - name: NEW_RELIC_DISTRIBUTED_TRACING_ENABLED + # value: true + # - name: NEW_RELIC_HOST + # value: "gov-collector.newrelic.com" + # - name: NEW_RELIC_NO_CONFIG_FILE + # value: true + portMappings: + - containerPort: "80" + hostPort: "80" + # logConfiguration: + # logDriver: syslog + # options: + # syslog-address: tcp://{{ syslog_host }}:514 + # tag: "{{ app_name }}-frontend" + # syslog-format: "rfc5424micro" + network_mode: bridge + family: "{{stack_name }}-{{tier}}-frontend" + state: present + memory: '512' + cpu: '128' + region: "{{region}}" + register: task_output + +# - name: create task definition - sumo syslog +# ecs_taskdefinition: +# containers: +# - name: sumologic-syslog +# essential: true +# image: "sumologic/collector:latest-syslog" +# environment: +# - name: SUMO_COLLECTOR_NAME +# value: "{{ app_name }}-syslog" +# - name: SUMO_ACCESS_ID +# value: "{{ sumo_access_id }}" +# - name: SUMO_ACCESS_KEY +# value: "{{ sumo_access_key }}" +# - name: SUMO_COLLECTOR_NAME_PREFIX +# value: "" +# - name: SUMO_CLOBBER +# value: "true" +# portMappings: +# - containerPort: "514" +# hostPort: "514" +# network_mode: bridge +# family: bento-{{tier}}-sumo_syslog +# state: present +# memory: '512' +# cpu: '128' +# region: "{{region}}" +# register: task_output + +# - name: create task definition - sumo docker +# ecs_taskdefinition: +# containers: +# - name: sumologic-docker +# essential: true +# image: "sumologic/collector:latest" +# environment: +# - name: SUMO_COLLECTOR_NAME +# value: "{{ app_name }}-docker" +# - name: SUMO_ACCESS_ID +# value: "{{ sumo_access_id }}" +# - name: SUMO_ACCESS_KEY +# value: "{{ sumo_access_key }}" +# - name: SUMO_COLLECTOR_NAME_PREFIX +# value: "" +# - name: SUMO_CLOBBER +# value: "true" +# mountPoints: +# - containerPath: /var/run/docker.sock +# sourceVolume: docker-sock +# readOnly: true +# volumes: +# - name: docker-sock +# host: +# sourcePath: /var/run/docker.sock +# network_mode: bridge +# family: bento-{{tier}}-sumo_docker +# state: present +# memory: '512' +# cpu: '128' +# region: "{{region}}" +# register: task_output + +# - name: create task definition - newrelic docker +# ecs_taskdefinition: +# containers: +# - name: newrelic-docker +# essential: true +# image: "newrelic/infrastructure-bundle:latest" +# environment: +# - name: NRIA_LICENSE_KEY +# value: "{{ newrelic_license_key }}" +# - name: NRIA_DISPLAY_NAME +# value: "{{ app_name }}-docker" +# - name: NEW_RELIC_HOST +# value: "gov-collector.newrelic.com" +# - name: STATUS_URL +# value: "{% if tier == 'prod' %}https://bento-tools.org/nginx_status{% else %}https://{{ tier }}.bento-tools.org/nginx_status{% endif %}" +# entryPoint: +# - "/bin/ash" +# - "-c" +# - 'echo -e "integrations:\n - name: nri-nginx\n env:\n REMOTE_MONITORING: true\n METRICS: 1" > /etc/newrelic-infra/integrations.d/nginx-config.yml && /sbin/tini -- /usr/bin/newrelic-infra-service' +# mountPoints: +# - containerPath: /var/run/docker.sock +# sourceVolume: docker-sock +# readOnly: true +# - containerPath: /host +# sourceVolume: docker-host +# readOnly: true +# volumes: +# - name: docker-sock +# host: +# sourcePath: /var/run/docker.sock +# - name: docker-host +# host: +# sourcePath: / +# network_mode: bridge +# family: bento-{{tier}}-nr_docker +# state: present +# memory: '128' +# cpu: '128' +# region: "{{region}}" +# register: task_output + +############################################################################################################################ + +# Task Definition Queries + +############################################################################################################################ + +- name: query task definition - bento frontend + ecs_taskdefinition_info: + task_definition: "{{stack_name }}-{{tier}}-frontend" + region: "{{region}}" + register: task_frontend + +- name: query task definition - bento backend + ecs_taskdefinition_info: + task_definition: "{{stack_name }}-{{tier}}-backend" + region: "{{region}}" + register: task_backend + +# - name: query task definition - sumologic syslog +# ecs_taskdefinition_info: +# task_definition: bento-{{tier}}-sumo_syslog +# region: "{{region}}" +# register: task_sumo_syslog + +# - name: query task definition - sumologic docker +# ecs_taskdefinition_info: +# task_definition: bento-{{tier}}-sumo_docker +# region: "{{region}}" +# register: task_sumo_docker + +# - name: query task definition - newrelic docker +# ecs_taskdefinition_info: +# task_definition: bento-{{tier}}-nr_docker +# region: "{{region}}" +# register: task_nr_docker + +############################################################################################################################ + +# Service Queries + +############################################################################################################################ + +- name: query backend service + ecs_service_info: + cluster: "{{stack_name }}-{{tier}}" + service: "{{stack_name }}-{{tier}}-backend" + details: true + region: "{{region}}" + register: service_backend + +- name: query ecs service + ecs_service_info: + cluster: "{{stack_name }}-{{tier}}" + service: "{{stack_name }}-{{tier}}-frontend" + details: true + region: "{{region}}" + register: service_frontend + +# - name: query sumologic syslog service +# ecs_service_info: +# cluster: bento-{{tier}} +# service: bento-{{tier}}-sumo_syslog +# details: true +# region: "{{region}}" +# register: service_sumo_syslog + +# - name: query sumologic docker service +# ecs_service_info: +# cluster: bento-{{tier}} +# service: bento-{{tier}}-sumo_docker +# details: true +# region: "{{region}}" +# register: service_sumo_docker + +# - name: query newrelic docker service +# ecs_service_info: +# cluster: bento-{{tier}} +# service: bento-{{tier}}-nr_docker +# details: true +# region: "{{region}}" +# register: service_nr_docker + +############################################################################################################################ + +- name: set facts + set_fact: + frontend_url: "{% if tier == 'prod' %}https://{{stack_name}}.bento-tools.org/{% else %}https://{{stack_name}}-{{ tier }}.bento-tools.org/{% endif %}" + frontend_revision: "{{task_frontend.revision}}" + backend_revision: "{{task_backend.revision}}" + task_backend_name: "{{task_backend.family}}" + task_frontend_name: "{{task_frontend.family}}" + # task_sumo_syslog_name: "{{task_sumo_syslog.family}}" + # task_sumo_docker_name: "{{task_sumo_docker.family}}" + # task_nr_docker_name: "{{task_nr_docker.family}}" + lb_frontend: "{{service_frontend.services[0].loadBalancers}}" + lb_backend: "{{service_backend.services[0].loadBalancers}}" + role_arn: "{{service_backend.services[0].roleArn}}" + +- name: show + debug: + msg: "{{role_arn}}" + +############################################################################################################################ + +# Update Services + +############################################################################################################################ + +# - name: update sumologic syslog service +# ecs_service: +# state: present +# name: bento-{{tier}}-sumo_syslog +# cluster: bento-{{tier}} +# task_definition: "{{task_sumo_syslog_name}}" +# force_new_deployment: no +# deployment_configuration: +# minimum_healthy_percent: 0 +# maximum_percent: 100 +# desired_count: 1 +# region: "{{region}}" +# register: service_sumo_syslog_output + +# - name: update sumologic docker service +# ecs_service: +# state: present +# name: bento-{{tier}}-sumo_docker +# cluster: bento-{{tier}} +# task_definition: "{{task_sumo_docker_name}}" +# force_new_deployment: no +# deployment_configuration: +# minimum_healthy_percent: 0 +# maximum_percent: 100 +# desired_count: 1 +# region: "{{region}}" +# register: service_sumo_docker_output + +# - name: update newrelic docker service +# ecs_service: +# state: present +# name: bento-{{tier}}-nr_docker +# cluster: bento-{{tier}} +# task_definition: "{{task_nr_docker_name}}" +# force_new_deployment: no +# deployment_configuration: +# minimum_healthy_percent: 0 +# maximum_percent: 100 +# desired_count: 1 +# region: "{{region}}" +# register: service_nr_docker_output + +- name: update backend service + ecs_service: + state: present + name: "{{stack_name }}-{{tier}}-backend" + cluster: "{{stack_name }}-{{tier}}" + task_definition: "{{task_backend_name}}:{{backend_revision}}" + role: "{{role_arn}}" + force_new_deployment: yes + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + load_balancers: "{{ lb_backend }}" + region: "{{region}}" + register: service_backend_output + +- name: update frontend service + ecs_service: + state: present + name: "{{stack_name }}-{{tier}}-frontend" + cluster: "{{stack_name }}-{{tier}}" + task_definition: "{{task_frontend_name}}:{{frontend_revision}}" + role: "{{role_arn}}" + force_new_deployment: yes + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + load_balancers: "{{ lb_frontend }}" + region: "{{region}}" + register: service_frontend_output \ No newline at end of file diff --git a/ansible/roles/bento-ins/tasks/main.yml b/ansible/roles/bento-ins/tasks/main.yml new file mode 100644 index 000000000..e69de29bb diff --git a/ansible/roles/bento-ins/tasks/redis.yml b/ansible/roles/bento-ins/tasks/redis.yml new file mode 100644 index 000000000..46787e625 --- /dev/null +++ b/ansible/roles/bento-ins/tasks/redis.yml @@ -0,0 +1,5 @@ +- name: confirm redis redis_host + debug: + msg: "{{redis_host[tier]}}" +- name: flush redis cache + shell: echo -e "get abc \nFLUSHALL ASYNC" | redis-cli -h {{ redis_host[tier]}} -p 6379 -c diff --git a/ansible/roles/bento-ins/templates/env.j2 b/ansible/roles/bento-ins/templates/env.j2 new file mode 100644 index 000000000..17c24f011 --- /dev/null +++ b/ansible/roles/bento-ins/templates/env.j2 @@ -0,0 +1,9 @@ +{% if tier == "prod" %} +REACT_APP_BACKEND_API=https://api.bento-tools.org/v1/graphql/ +REACT_APP_APPLICATION_VERSION={{version}} +REACT_APP_ABOUT_CONTENT_URL=https://raw.githubusercontent.com/CBIIT/bento-frontend/master/src/content/{{tier}}/aboutPagesContent.yaml +{% else %} +REACT_APP_BACKEND_API=https://api-{{tier}}.bento-tools.org/v1/graphql/ +REACT_APP_APPLICATION_VERSION={{version}} +REACT_APP_ABOUT_CONTENT_URL=https://raw.githubusercontent.com/CBIIT/bento-frontend/master/src/content/{{tier}}/aboutPagesContent.yaml +{% endif %} \ No newline at end of file diff --git a/ansible/roles/bento-ins/templates/nginx-config.yml.j2 b/ansible/roles/bento-ins/templates/nginx-config.yml.j2 new file mode 100644 index 000000000..848a0dfb3 --- /dev/null +++ b/ansible/roles/bento-ins/templates/nginx-config.yml.j2 @@ -0,0 +1,5 @@ +integrations: + - name: nri-nginx + env: + REMOTE_MONITORING: true + METRICS: 1 \ No newline at end of file diff --git a/ansible/roles/bento-ins/tests/inventory b/ansible/roles/bento-ins/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/bento-ins/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/bento-ins/tests/test.yml b/ansible/roles/bento-ins/tests/test.yml new file mode 100644 index 000000000..c9b79015b --- /dev/null +++ b/ansible/roles/bento-ins/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - cicd \ No newline at end of file diff --git a/ansible/roles/bento-ins/vars/main.yml b/ansible/roles/bento-ins/vars/main.yml new file mode 100644 index 000000000..91ff784c0 --- /dev/null +++ b/ansible/roles/bento-ins/vars/main.yml @@ -0,0 +1,43 @@ +--- +# vars file for cicd +stack_name: ins +enable_redis: true +platform: aws +redis_host: + dev: "ins-dev-redis-cluster.l5vrvc.0001.use1.cache.amazonaws.com" + qa: "localhost" + perf: "localhost" + icdc: "localhost" + prod: "localhost" +redis_password: "" +redis_port: 6379 +#enable_redis_filter: "{{ lookup('env','ENABLE_REDIS_FILTER') }}" +use_cluster: 'false' +tier: "{{ lookup('env','TIER') }}" +version: "{{ lookup('env','VERSION') }}" +workspace: "{{ lookup('env','WORKSPACE') }}" +docker_user: "{{ lookup('env','DOCKER_USER') }}" +docker_password: "{{ lookup('env','DOCKER_PASSWORD') }}" +build_number: "{{ lookup('env','BUILD_NUMBER')}}" +region: us-east-1 +newrelic_license_key: "{{ lookup('aws_ssm', 'newrelic_license_key', region='us-east-1' ) }}" +sumo_access_id: "{{ lookup('aws_ssm', 'sumo_access_id', region='us-east-1' ) }}" +sumo_access_key: "{{ lookup('aws_ssm', 'sumo_access_key', region='us-east-1' ) }}" +syslog_host: "{{ lookup('aws_ssm', 'syslog_host', region='us-east-1' ) }}" +app_name: bento-{{platform}}-{{tier}} + +#### bento-icdc changes ######################################################################################################### +schema_file: "{% if tier == 'icdc' %}icdc.graphql{% else %}bento-extended.graphql{% endif %}" +################################################################################################################################# + +frontend_version: "{{ lookup('env','FE_VERSION') }}" +backend_version: "{{ lookup('env','BE_VERSION') }}" +bento_api_version: "{{ backend_version }}" +backend_frontend_version: "{{frontend_version }}" +backend_content_url: https://raw.githubusercontent.com/CBIIT/bento-frontend/master/src/content/{{ tier }}/aboutPagesContent.yaml +neo4j_ip: "{{ lookup('env','NEO4J_IP') }}" +neo4j_bearer: "{{ lookup('env','BEARER') }}" +neo4j_user: "{{ lookup('env','NEO4J_USER') }}" +neo4j_password: "{{ lookup('env','NEO4J_PASS') }}" + +# hostname: "{{ansible_fqdn}}" \ No newline at end of file diff --git a/ansible/roles/bento/.travis.yml b/ansible/roles/bento/.travis.yml new file mode 100644 index 000000000..36bbf6208 --- /dev/null +++ b/ansible/roles/bento/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/ansible/roles/bento/README.md b/ansible/roles/bento/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/bento/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/bento/defaults/main.yml b/ansible/roles/bento/defaults/main.yml new file mode 100644 index 000000000..8dddb65c6 --- /dev/null +++ b/ansible/roles/bento/defaults/main.yml @@ -0,0 +1,39 @@ +--- +# defaults file for cicd +# project +platform: aws +region: us-east-1 +project: bento +tier: "{{ lookup('env','TIER') }}" +app_name: "{{project}}-{{platform}}-{{tier}}" + +# build +workspace: "{{ lookup('env','WORKSPACE') }}" +docker_user: "{{ lookup('env','DOCKER_USER') }}" +docker_password: "{{ lookup('env','DOCKER_PASSWORD') }}" +frontend_commit_id: "{{ lookup('env','FE_COMMIT') | default('', true) }}" +backend_commit_id: "{{ lookup('env','BE_COMMIT') | default('', true) }}" +frontend_version: "{{ lookup('env','FE_VERSION') }}" +bento_api_version: "{{ lookup('env','BE_VERSION') }}" +backend_content_url: https://raw.githubusercontent.com/CBIIT/bento-frontend/{{ frontend_version }}/src/content/{{ tier }}/aboutPagesContent.yaml +build_number: "{{ lookup('env','BUILD_NUMBER')}}" + +# db +neo4j_user: "{{ lookup('env','NEO4J_USER') }}" +neo4j_password: "{{ lookup('env','NEO4J_PASS') }}" + +# redis +enable_redis: false +redis_password: "" +redis_port: 6379 +use_cluster: true + +# elasticsearch +enable_es_filter: true +es_host: "{{ lookup('env','ES_HOST') }}" + +# monitoring agents +newrelic_license_key: "{{ lookup('aws_ssm', 'newrelic_license_key', region='us-east-1' ) }}" +sumo_access_id: "{{ lookup('aws_ssm', 'sumo_access_id', region='us-east-1' ) }}" +sumo_access_key: "{{ lookup('aws_ssm', 'sumo_access_key', region='us-east-1' ) }}" +syslog_host: "{{ lookup('aws_ssm', 'syslog_host', region='us-east-1' ) }}" \ No newline at end of file diff --git a/ansible/roles/bento/files/inject.template.js b/ansible/roles/bento/files/inject.template.js new file mode 100644 index 000000000..302bf3a1a --- /dev/null +++ b/ansible/roles/bento/files/inject.template.js @@ -0,0 +1,12 @@ +window.injectedEnv = { + REACT_APP_BACKEND_GETUSERINFO_API: '${REACT_APP_BACKEND_GETUSERINFO_API}', + REACT_APP_LOGIN_URL: '${REACT_APP_LOGIN_URL}', + REACT_APP_USER_LOGOUT_URL: '${REACT_APP_USER_LOGOUT_URL}', + REACT_APP_BACKEND_API: '${REACT_APP_BACKEND_API}', + REACT_APP_ABOUT_CONTENT_URL: '${REACT_APP_ABOUT_CONTENT_URL}', + REACT_APP_BE_VERSION: '${REACT_APP_BE_VERSION}', + REACT_APP_FE_VERSION: '${REACT_APP_FE_VERSION}', + REACT_APP_APPLICATION_VERSION: '${REACT_APP_APPLICATION_VERSION}', + REACT_APP_GA_TRACKING_ID: '${REACT_APP_GA_TRACKING_ID}', + REACT_APP_FILE_SERVICE_API: '${REACT_APP_FILE_SERVICE_API}', +}; diff --git a/ansible/roles/bento/files/nginx-entrypoint.sh b/ansible/roles/bento/files/nginx-entrypoint.sh new file mode 100755 index 000000000..70875490e --- /dev/null +++ b/ansible/roles/bento/files/nginx-entrypoint.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +WWW_DIR=/usr/share/nginx/html +INJECT_FILE_SRC="${WWW_DIR}/inject.template.js" +INJECT_FILE_DST="${WWW_DIR}/injectEnv.js" +envsubst < "${INJECT_FILE_SRC}" > "${INJECT_FILE_DST}" + +[ -z "$@" ] && nginx -g 'daemon off;' || $@ \ No newline at end of file diff --git a/ansible/roles/bento/handlers/main.yml b/ansible/roles/bento/handlers/main.yml new file mode 100644 index 000000000..3c94d4172 --- /dev/null +++ b/ansible/roles/bento/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for cicd \ No newline at end of file diff --git a/ansible/roles/bento/meta/main.yml b/ansible/roles/bento/meta/main.yml new file mode 100644 index 000000000..227ad9c34 --- /dev/null +++ b/ansible/roles/bento/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.9 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/ansible/roles/bento/tasks/build.yml b/ansible/roles/bento/tasks/build.yml new file mode 100644 index 000000000..90cf985aa --- /dev/null +++ b/ansible/roles/bento/tasks/build.yml @@ -0,0 +1,177 @@ +--- + +############################################################################################################################ + +# Set Environment + +############################################################################################################################ + +- name: get db ip + include_tasks: neo4j-ip.yml + +- name: checkout specified commit - backend + shell: "git checkout {{ backend_commit_id }}" + args: + chdir: "{{ workspace }}" + +- name: checkout specified commit - frontend + shell: "git checkout {{ frontend_commit_id }}" + args: + chdir: "{{ workspace }}/{{ project }}-frontend" + +- name: get backend commit ID + shell: git rev-parse HEAD + args: + chdir: "{{ workspace }}" + register: backend_id + +- name: get frontend commit ID + shell: git rev-parse HEAD + args: + chdir: "{{ workspace }}/{{ project }}-frontend" + register: frontend_id + +- name: echo backend id + debug: + msg: "{{ backend_id.stdout_lines }}" + +- name: echo frontend id + debug: + msg: "{{ frontend_id.stdout_lines }}" + +############################################################################################################################ + +# Backend Build + +############################################################################################################################ + +- name: remove the application_example.properties file + file: + path: "{{workspace}}/src/main/resources/application_example.properties" + state: absent + +- name: copy application.properties file to /src/main/resources/ + template: + src: "{{workspace}}/src/main/resources/application.properties.j2" + dest: "{{workspace}}/src/main/resources/application.properties" + +- name: create graphql directory in backend + file: + state: directory + path: "{{workspace}}/src/main/resources/graphql" + +- name: create yaml directory in backend + file: + state: directory + path: "{{workspace}}/src/main/resources/yaml" + +- name: copy schema from frontend to resources + template: + src: "{{workspace}}/{{ project }}-frontend/graphql/{{ schema_file}}" + dest: "{{workspace}}/src/main/resources/graphql/{{ schema_file}}" + +- name: verify test queries file exists + stat: + path: "{{workspace}}/{{ project }}-frontend/yaml/{{ test_queries_file }}" + register: test_queries + +- name: copy test queries from frontend to resources + template: + src: "{{workspace}}/{{ project }}-frontend/yaml/{{ test_queries_file }}" + dest: "{{workspace}}/src/main/resources/yaml/{{ test_queries_file }}" + when: test_queries.stat.exists + +- name: build springboot code + command: mvn package -DskipTests + args: + chdir: "{{workspace}}" + +- name: copy Bento-0.0.1.war to ROOT.war + copy: + remote_src: yes + src: "{{workspace}}/target/Bento-0.0.1.war" + dest: "{{workspace}}/target/ROOT.war" + +- name: log into DockerHub + docker_login: + username: "{{docker_user}}" + password: "{{docker_password}}" + +- name: build cbiitssrepo/{{ project }}-backend image + docker_image: + build: + path: "{{workspace}}" + dockerfile: "{{workspace}}/icdc-devops/docker/dockerfiles/backend-dockerfile" + pull: yes + nocache: yes + name: cbiitssrepo/{{ project }}-backend + tag: "{{bento_api_version}}-{{build_number}}" + push: yes + force_source: yes + source: build + +- name: Add tag latest to cbiitssrepo/{{ project }}-backend image + docker_image: + name: "cbiitssrepo/{{ project }}-backend:{{bento_api_version}}-{{build_number}}" + repository: cbiitssrepo/{{ project }}-backend:latest + force_tag: yes + push: yes + source: local + +############################################################################################################################ + +# Frontend Build + +############################################################################################################################ + +- name: remove .env + file: + state: absent + path: "{{workspace}}/{{ project }}-frontend/.env" + +- name: copy nginx conf + copy: + remote_src: yes + src: '{{workspace}}/icdc-devops/docker/dockerfiles/nginx.conf' + dest: '{{workspace}}/{{ project }}-frontend/nginx.conf' + +- name: copy entrypoint.sh to workspace + copy: + src: "nginx-entrypoint.sh" + dest: "{{workspace}}/{{ project }}-frontend/nginx-entrypoint.sh" + mode: 0755 + +- name: run npm install in {{workspace}}/{{ project }}-frontend/ + command: "{{item}}" + args: + chdir: "{{workspace}}/{{ project }}-frontend" + loop: + - npm install + - npm run build + +- name: copy env to dist + copy: + src: inject.template.js + dest: "{{workspace}}/{{ project }}-frontend/dist/inject.template.js" + mode: 0755 + +- name: build cbiitssrepo/{{ project }}-frontend image + docker_image: + build: + path: "{{workspace}}/{{ project }}-frontend" + dockerfile: "{{workspace}}/icdc-devops/docker/dockerfiles/frontend-icdc-dockerfile" + pull: yes + nocache: yes + name: cbiitssrepo/{{ project }}-frontend + tag: "{{frontend_version}}-{{build_number}}" + push: yes + force_source: yes + source: build + +- name: Add tag latest to cbiitssrepo/{{ project }}-frontend image + docker_image: + name: "cbiitssrepo/{{ project }}-frontend:{{frontend_version}}-{{build_number}}" + repository: cbiitssrepo/{{ project }}-frontend:latest + force_tag: yes + push: yes + source: local \ No newline at end of file diff --git a/ansible/roles/bento/tasks/deploy-backend.yml b/ansible/roles/bento/tasks/deploy-backend.yml new file mode 100644 index 000000000..f818c4bd2 --- /dev/null +++ b/ansible/roles/bento/tasks/deploy-backend.yml @@ -0,0 +1,67 @@ +--- +- name: create task definition + ecs_taskdefinition: + containers: + - name: backend + essential: true + image: "cbiitssrepo/bento-backend:{{build_number}}" + portMappings: + - containerPort: "8080" + # hostPort: "80" + - name: frontend + essential: true + image: "cbiitssrepo/bento-frontend:{{build_number}}" + portMappings: + - containerPort: "80" + hostPort: "80" + network_mode: bridge + family: ctdc + state: present + region: "{{region}}" + register: task_output + +- name: query task definition + ecs_taskdefinition_info: + task_definition: ctdc + region: "{{region}}" + register: task_output + +- name: query ecs service + ecs_service_info: + cluster: ctdc-ecs + service: ctdc_ecs_service + details: true + region: "{{region}}" + register: service_info + +- name: set facts + set_fact: + revision: "{{task_output.revision}}" + task_name: "{{task_output.family}}" + lb_target_arn: "{{service_info.services[0].loadBalancers[0].targetGroupArn}}" + lb_container_port: "{{service_info.services[0].loadBalancers[0].containerPort}}" + lb_container_name: "{{service_info.services[0].loadBalancers[0].containerName}}" + role_arn: "{{service_info.services[0].roleArn}}" + +- name: update ecs service + ecs_service: + state: present + name: ctdc_ecs_service + cluster: ctdc-ecs + task_definition: "{{task_name}}:{{revision}}" + role: "{{role_arn}}" + force_new_deployment: yes + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + load_balancers: + - targetGroupArn: "{{lb_target_arn}}" + containerName: "{{lb_container_name}}" + containerPort: "{{ lb_container_port }}" + region: "{{region}}" + register: service_output + +- name: show service service + debug: + msg: "{{service_output}}" \ No newline at end of file diff --git a/ansible/roles/bento/tasks/deploy-dev.yml b/ansible/roles/bento/tasks/deploy-dev.yml new file mode 100644 index 000000000..7f0ccdb04 --- /dev/null +++ b/ansible/roles/bento/tasks/deploy-dev.yml @@ -0,0 +1,359 @@ +--- +############################################################################################################################ + +# Task Definitions + +############################################################################################################################ + +- name: create task definition - backend + ecs_taskdefinition: + containers: + - name: backend + essential: true + image: "cbiitssrepo/bento-backend:{{build_number}}" + environment: + - name: NEW_RELIC_LICENSE_KEY + value: "{{ newrelic_license_key }}" + - name: NEW_RELIC_APP_NAME + value: "{{ app_name }}-backend" + - name: NEW_RELIC_DISTRIBUTED_TRACING_ENABLED + value: true + - name: NEW_RELIC_HOST + value: "gov-collector.newrelic.com" +# - name: NEW_RELIC_LOG_FILE_NAME +# value: "STDOUT" +# - name: JAVA_OPTS +# value: "-javaagent:/usr/local/tomcat/newrelic/newrelic.jar" +# entryPoint: +# - "/bin/ash" +# - "-c" +# - 'wget "https://download.newrelic.com/newrelic/java-agent/newrelic-agent/current/newrelic-java.zip" -O newrelic-java.zip && unzip newrelic-java.zip && bin/catalina.sh run' + portMappings: + - containerPort: "8080" + hostPort: "8080" + logConfiguration: + logDriver: syslog + options: + syslog-address: tcp://{{ syslog_host }}:514 + syslog-format: rfc5424 + tag: "{{ app_name }}-backend" + network_mode: bridge + family: bento-{{tier}}-backend + memory: '512' + cpu: '512' + state: present + region: "{{region}}" + register: task_output + +- name: create task definition - frontend + ecs_taskdefinition: + containers: + - name: frontend + essential: true + image: "cbiitssrepo/bento-frontend:{{build_number}}" + environment: + - name: NEW_RELIC_LICENSE_KEY + value: "{{ newrelic_license_key }}" + - name: NEW_RELIC_APP_NAME + value: "{{ app_name }}-frontend" + - name: NEW_RELIC_DISTRIBUTED_TRACING_ENABLED + value: true + - name: NEW_RELIC_HOST + value: "gov-collector.newrelic.com" + - name: NEW_RELIC_NO_CONFIG_FILE + value: "true" + portMappings: + - containerPort: "80" + hostPort: "80" + logConfiguration: + logDriver: syslog + options: + syslog-address: tcp://{{ syslog_host }}:514 + syslog-format: rfc5424 + tag: "{{ app_name }}-frontend" + network_mode: bridge + family: bento-{{tier}}-frontend + state: present + memory: '512' + cpu: '512' + region: "{{region}}" + register: task_output + +- name: create task definition - sumo syslog + ecs_taskdefinition: + containers: + - name: sumologic-syslog + essential: true + image: "sumologic/collector:latest-syslog" + environment: + - name: SUMO_COLLECTOR_NAME + value: "{{ app_name }}-syslog" + - name: SUMO_ACCESS_ID + value: "{{ sumo_access_id }}" + - name: SUMO_ACCESS_KEY + value: "{{ sumo_access_key }}" + - name: SUMO_COLLECTOR_NAME_PREFIX + value: "" + - name: SUMO_CLOBBER + value: "true" + portMappings: + - containerPort: "514" + hostPort: "514" + network_mode: bridge + family: bento-{{tier}}-sumo_syslog + state: present + memory: '512' + cpu: '128' + region: "{{region}}" + register: task_output + +- name: create task definition - sumo docker + ecs_taskdefinition: + containers: + - name: sumologic-docker + essential: true + image: "sumologic/collector:latest" + environment: + - name: SUMO_COLLECTOR_NAME + value: "{{ app_name }}-docker" + - name: SUMO_ACCESS_ID + value: "{{ sumo_access_id }}" + - name: SUMO_ACCESS_KEY + value: "{{ sumo_access_key }}" + - name: SUMO_COLLECTOR_NAME_PREFIX + value: "" + - name: SUMO_CLOBBER + value: "true" + mountPoints: + - containerPath: /var/run/docker.sock + sourceVolume: docker-sock + readOnly: true + volumes: + - name: docker-sock + host: + sourcePath: /var/run/docker.sock + network_mode: bridge + family: bento-{{tier}}-sumo_docker + state: present + memory: '512' + cpu: '128' + region: "{{region}}" + register: task_output + +- name: create task definition - newrelic docker + ecs_taskdefinition: + containers: + - name: newrelic-docker + essential: true + image: "newrelic/infrastructure:latest" + environment: + - name: NRIA_LICENSE_KEY + value: "{{ newrelic_license_key }}" + - name: NRIA_DISPLAY_NAME + value: "{{ app_name }}-docker" + - name: NEW_RELIC_HOST + value: "gov-collector.newrelic.com" + mountPoints: + - containerPath: /var/run/docker.sock + sourceVolume: docker-sock + readOnly: true + - containerPath: /host + sourceVolume: docker-host + readOnly: true +# cap_add: +# - SYS_PTRACE + volumes: + - name: docker-sock + host: + sourcePath: /var/run/docker.sock + - name: docker-host + host: + sourcePath: / + network_mode: bridge + family: bento-{{tier}}-nr_docker + state: present + memory: '128' + cpu: '128' + region: "{{region}}" + register: task_output + +############################################################################################################################ + +# Task Definition Queries + +############################################################################################################################ + +- name: query task definition - bento frontend + ecs_taskdefinition_info: + task_definition: bento-{{tier}}-frontend + region: "{{region}}" + register: task_frontend + +- name: query task definition - bento backend + ecs_taskdefinition_info: + task_definition: bento-{{tier}}-backend + region: "{{region}}" + register: task_backend + +- name: query task definition - sumologic syslog + ecs_taskdefinition_info: + task_definition: bento-{{tier}}-sumo_syslog + region: "{{region}}" + register: task_sumo_syslog + +- name: query task definition - sumologic docker + ecs_taskdefinition_info: + task_definition: bento-{{tier}}-sumo_docker + region: "{{region}}" + register: task_sumo_docker + +- name: query task definition - newrelic docker + ecs_taskdefinition_info: + task_definition: bento-{{tier}}-nr_docker + region: "{{region}}" + register: task_nr_docker + +############################################################################################################################ + +# Service Queries + +############################################################################################################################ + +- name: query backend service + ecs_service_info: + cluster: bento-{{tier}} + service: bento-{{tier}}-backend + details: true + region: "{{region}}" + register: service_backend + +- name: query ecs service + ecs_service_info: + cluster: bento-{{tier}} + service: bento-{{tier}}-frontend + details: true + region: "{{region}}" + register: service_frontend + +- name: query sumologic syslog service + ecs_service_info: + cluster: bento-{{tier}} + service: bento-{{tier}}-sumo_syslog + details: true + region: "{{region}}" + register: service_sumo_syslog + +- name: query sumologic docker service + ecs_service_info: + cluster: bento-{{tier}} + service: bento-{{tier}}-sumo_docker + details: true + region: "{{region}}" + register: service_sumo_docker + +- name: query newrelic docker service + ecs_service_info: + cluster: bento-{{tier}} + service: bento-{{tier}}-nr_docker + details: true + region: "{{region}}" + register: service_nr_docker + +############################################################################################################################ + +- name: set facts + set_fact: + frontend_revision: "{{task_frontend.revision}}" + backend_revision: "{{task_backend.revision}}" + task_backend_name: "{{task_backend.family}}" + task_frontend_name: "{{task_frontend.family}}" + task_sumo_syslog_name: "{{task_sumo_syslog.family}}" + task_sumo_docker_name: "{{task_sumo_docker.family}}" + task_nr_docker_name: "{{task_nr_docker.family}}" + lb_frontend: "{{service_frontend.services[0].loadBalancers}}" + lb_backend: "{{service_backend.services[0].loadBalancers}}" + role_arn: "{{service_backend.services[0].roleArn}}" + +############################################################################################################################ + +# Update Services + +############################################################################################################################ + +- name: update sumologic syslog service + ecs_service: + state: present + name: bento-{{tier}}-sumo_syslog + cluster: bento-{{tier}} + task_definition: "{{task_sumo_syslog_name}}" + force_new_deployment: yes + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + region: "{{region}}" + register: service_sumo_syslog_output + +- name: update sumologic docker service + ecs_service: + state: present + name: bento-{{tier}}-sumo_docker + cluster: bento-{{tier}} + task_definition: "{{task_sumo_docker_name}}" + force_new_deployment: yes + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + region: "{{region}}" + register: service_sumo_docker_output + +- name: update newrelic docker service + ecs_service: + state: present + name: bento-{{tier}}-nr_docker + cluster: bento-{{tier}} + task_definition: "{{task_nr_docker_name}}" + force_new_deployment: yes + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + region: "{{region}}" + register: service_nr_docker_output + +- name: update backend service + ecs_service: + state: present + name: bento-{{tier}}-backend + cluster: bento-{{tier}} + task_definition: "{{task_backend_name}}:{{backend_revision}}" + role: "{{role_arn}}" + force_new_deployment: yes + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + load_balancers: "{{ lb_backend }}" + region: "{{region}}" + register: service_backend_output + +- name: update frontend service + ecs_service: + state: present + name: bento-{{tier}}-frontend + cluster: bento-{{tier}} + task_definition: "{{task_frontend_name}}:{{frontend_revision}}" + role: "{{role_arn}}" + force_new_deployment: yes + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + load_balancers: "{{ lb_frontend }}" + region: "{{region}}" + register: service_frontend_output + +#- name: show lb_backend +# debug: +# msg: "{{lb_backend}}" diff --git a/ansible/roles/bento/tasks/deploy.yml b/ansible/roles/bento/tasks/deploy.yml new file mode 100644 index 000000000..385493ee6 --- /dev/null +++ b/ansible/roles/bento/tasks/deploy.yml @@ -0,0 +1,417 @@ +--- +- name: get db ip + include_tasks: neo4j-ip.yml + +############################################################################################################################ + +# Task Definitions + +############################################################################################################################ + +- name: create task definition - {{ project }}-backend + ecs_taskdefinition: + containers: + - name: backend + essential: true + image: "cbiitssrepo/{{ project }}-backend:{{bento_api_version}}" + environment: + - name: NEO4J_URL + value: bolt://{{ neo4j_ip }}:7687 + - name: NEO4J_USER + value: "{{ neo4j_user }}" + - name: NEO4J_PASSWORD + value: "{{ neo4j_password }}" + - name: NEO4J_GRAPHQL_ENDPOINT + value: http://{{ neo4j_ip }}:7474/graphql/ + - name: BENTO_API_VERSION + value: "{{bento_api_version}}" + - name: ES_HOST + value: "{{es_host}}" + - name: REDIS_ENABLE + value: "{{ enable_redis }}" + - name: REDIS_USE_CLUSTER + value: "{{ use_cluster }}" + - name: REDIS_HOST + value: "{{ redis_host[tier] }}" + - name: REDIS_PORT + value: "{{ redis_port }}" + - name: REDIS_FILTER_ENABLE + value: false + - name: ES_FILTER_ENABLED + value: "{{ enable_es_filter }}" + - name: NEW_RELIC_LICENSE_KEY + value: "{{ newrelic_license_key }}" + - name: NEW_RELIC_APP_NAME + value: "{{ app_name }}-backend" + - name: NEW_RELIC_DISTRIBUTED_TRACING_ENABLED + value: true + - name: NEW_RELIC_HOST + value: "gov-collector.newrelic.com" + - name: NEW_RELIC_LOG_FILE_NAME + value: "STDOUT" + - name: NEW_RELIC_LABELS + value: "Project:{{ project }};Environment:{{ tier }}" + - name: JAVA_OPTS + value: "-javaagent:/usr/local/tomcat/newrelic/newrelic.jar" + entryPoint: + - "/bin/ash" + - "-c" + - 'wget "https://download.newrelic.com/newrelic/java-agent/newrelic-agent/current/newrelic-java.zip" -O newrelic-java.zip && rm -rf newrelic && unzip -o newrelic-java.zip && bin/catalina.sh run' + portMappings: + - containerPort: "8080" + hostPort: "8080" + logConfiguration: + logDriver: syslog + options: + syslog-address: tcp://{{ syslog_host }}:514 + tag: "{{ app_name }}-backend" + syslog-format: "rfc5424micro" + network_mode: bridge + family: "{{ project }}-{{tier}}-backend" + memory: '2048' + cpu: '512' + state: present + region: "{{region}}" + register: task_output + +- name: create task definition - {{ project }}-frontend + ecs_taskdefinition: + containers: + - name: frontend + essential: true + image: "cbiitssrepo/{{ project }}-frontend:{{frontend_version}}" + environment: + - name: REACT_APP_BACKEND_API + value: "{% if tier == 'prod' %}https://{{project}}.bento-tools.org/v1/graphql/{% else %}https://{{project}}-{{ tier }}.bento-tools.org/v1/graphql/{% endif %}" + - name: REACT_APP_FILE_SERVICE_API + value: "{% if tier == 'prod' %}https://{{project}}.bento-tools.org/api/files/{% else %}https://{{project}}-{{ tier }}.bento-tools.org/api/files/{% endif %}" + - name: REACT_APP_BE_VERSION + value: "{{bento_api_version}}" + - name: REACT_APP_FE_VERSION + value: "{{frontend_version}}" + - name: REACT_APP_ABOUT_CONTENT_URL + value: "{{about_content_url}}" + - name: NEW_RELIC_LICENSE_KEY + value: "{{ newrelic_license_key }}" + - name: NEW_RELIC_APP_NAME + value: "{{ app_name }}-frontend" + - name: NEW_RELIC_DISTRIBUTED_TRACING_ENABLED + value: true + - name: NEW_RELIC_HOST + value: "gov-collector.newrelic.com" + - name: NEW_RELIC_NO_CONFIG_FILE + value: true + - name: NEW_RELIC_LABELS + value: "Project:{{ project }};Environment:{{ tier }}" + portMappings: + - containerPort: "80" + hostPort: "80" + logConfiguration: + logDriver: syslog + options: + syslog-address: tcp://{{ syslog_host }}:514 + tag: "{{ app_name }}-frontend" + syslog-format: "rfc5424micro" + network_mode: bridge + family: "{{ project }}-{{tier}}-frontend" + state: present + memory: '512' + cpu: '128' + region: "{{region}}" + register: task_output + +- name: create task definition - sumo syslog + ecs_taskdefinition: + containers: + - name: sumologic-syslog + essential: true + image: "sumologic/collector:latest-syslog" + environment: + - name: SUMO_COLLECTOR_NAME + value: "{{ app_name }}-syslog" + - name: SUMO_ACCESS_ID + value: "{{ sumo_access_id }}" + - name: SUMO_ACCESS_KEY + value: "{{ sumo_access_key }}" + - name: SUMO_COLLECTOR_NAME_PREFIX + value: "" + - name: SUMO_CLOBBER + value: "true" + portMappings: + - containerPort: "514" + hostPort: "514" + network_mode: bridge + family: "{{ project }}-{{tier}}-sumo_syslog" + state: present + memory: '512' + cpu: '128' + region: "{{region}}" + register: task_output + +- name: create task definition - sumo docker + ecs_taskdefinition: + containers: + - name: sumologic-docker + essential: true + image: "sumologic/collector:latest" + environment: + - name: SUMO_COLLECTOR_NAME + value: "{{ app_name }}-docker" + - name: SUMO_ACCESS_ID + value: "{{ sumo_access_id }}" + - name: SUMO_ACCESS_KEY + value: "{{ sumo_access_key }}" + - name: SUMO_COLLECTOR_NAME_PREFIX + value: "" + - name: SUMO_CLOBBER + value: "true" + mountPoints: + - containerPath: /var/run/docker.sock + sourceVolume: docker-sock + readOnly: true + volumes: + - name: docker-sock + host: + sourcePath: /var/run/docker.sock + network_mode: bridge + family: "{{ project }}-{{tier}}-sumo_docker" + state: present + memory: '512' + cpu: '128' + region: "{{region}}" + register: task_output + +- name: create task definition - newrelic docker + ecs_taskdefinition: + containers: + - name: newrelic-docker + essential: true + image: "newrelic/infrastructure-bundle:latest" + environment: + - name: NRIA_LICENSE_KEY + value: "{{ newrelic_license_key }}" + - name: NRIA_DISPLAY_NAME + value: "{{ app_name }}-docker" + - name: NEW_RELIC_HOST + value: "gov-collector.newrelic.com" + - name: NEW_RELIC_LABELS + value: "Project:{{ project }};Environment:{{ tier }}" + - name: STATUS_URL + value: "{% if tier == 'prod' %}https://bento-tools.org/nginx_status{% else %}https://bento-{{ tier }}.bento-tools.org/nginx_status{% endif %}" + entryPoint: + - "/bin/ash" + - "-c" + - 'echo -e "integrations:\n - name: nri-nginx\n env:\n REMOTE_MONITORING: true\n METRICS: 1" > /etc/newrelic-infra/integrations.d/nginx-config.yml && /sbin/tini -- /usr/bin/newrelic-infra-service' + mountPoints: + - containerPath: /var/run/docker.sock + sourceVolume: docker-sock + readOnly: true + - containerPath: /host + sourceVolume: docker-host + readOnly: true + volumes: + - name: docker-sock + host: + sourcePath: /var/run/docker.sock + - name: docker-host + host: + sourcePath: / + network_mode: bridge + family: "{{ project }}-{{tier}}-nr_docker" + state: present + memory: '128' + cpu: '128' + region: "{{region}}" + register: task_output + +############################################################################################################################ + +# Task Definition Queries + +############################################################################################################################ + +- name: query task definition - {{ project }}-{{tier}}-frontend + ecs_taskdefinition_info: + task_definition: "{{ project }}-{{tier}}-frontend" + region: "{{region}}" + register: task_frontend + +- name: query task definition - {{ project }}-{{tier}}-backend + ecs_taskdefinition_info: + task_definition: "{{ project }}-{{tier}}-backend" + region: "{{region}}" + register: task_backend + +- name: query task definition - sumologic syslog + ecs_taskdefinition_info: + task_definition: "{{ project }}-{{tier}}-sumo_syslog" + region: "{{region}}" + register: task_sumo_syslog + +- name: query task definition - sumologic docker + ecs_taskdefinition_info: + task_definition: "{{ project }}-{{tier}}-sumo_docker" + region: "{{region}}" + register: task_sumo_docker + +- name: query task definition - newrelic docker + ecs_taskdefinition_info: + task_definition: "{{ project }}-{{tier}}-nr_docker" + region: "{{region}}" + register: task_nr_docker + +############################################################################################################################ + +# Service Queries + +############################################################################################################################ + +- name: query {{ project }}-{{tier}}-backend service + ecs_service_info: + cluster: "{{ project }}-{{tier}}" + service: "{{ project }}-{{tier}}-backend" + details: true + region: "{{region}}" + register: service_backend + +- name: query {{ project }}-{{tier}}-frontend service + ecs_service_info: + cluster: "{{ project }}-{{tier}}" + service: "{{ project }}-{{tier}}-frontend" + details: true + region: "{{region}}" + register: service_frontend + +- name: query sumologic syslog service + ecs_service_info: + cluster: "{{ project }}-{{tier}}" + service: "{{ project }}-{{tier}}-sumo_syslog" + details: true + region: "{{region}}" + register: service_sumo_syslog + +- name: query sumologic docker service + ecs_service_info: + cluster: "{{ project }}-{{tier}}" + service: "{{ project }}-{{tier}}-sumo_docker" + details: true + region: "{{region}}" + register: service_sumo_docker + +- name: query newrelic docker service + ecs_service_info: + cluster: "{{ project }}-{{tier}}" + service: "{{ project }}-{{tier}}-nr_docker" + details: true + region: "{{region}}" + register: service_nr_docker + +############################################################################################################################ + +- name: set facts + set_fact: + frontend_url: "{% if tier == 'prod' %}https://{{project}}.bento-tools.org/{% else %}https://{{project}}-{{ tier }}.bento-tools.org/{% endif %}" + frontend_revision: "{{task_frontend.revision}}" + backend_revision: "{{task_backend.revision}}" + task_backend_name: "{{task_backend.family}}" + task_frontend_name: "{{task_frontend.family}}" + task_sumo_syslog_name: "{{task_sumo_syslog.family}}" + task_sumo_docker_name: "{{task_sumo_docker.family}}" + task_nr_docker_name: "{{task_nr_docker.family}}" + lb_frontend: "{{service_frontend.services[0].loadBalancers}}" + lb_backend: "{{service_backend.services[0].loadBalancers}}" + role_arn: "{{service_backend.services[0].roleArn}}" + +############################################################################################################################ + +# Update Services + +############################################################################################################################ + +- name: update sumologic syslog service + ecs_service: + state: present + name: "{{ project }}-{{tier}}-sumo_syslog" + cluster: "{{ project }}-{{tier}}" + task_definition: "{{task_sumo_syslog_name}}" + force_new_deployment: no + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + region: "{{region}}" + register: service_sumo_syslog_output + +- name: update sumologic docker service + ecs_service: + state: present + name: "{{ project }}-{{tier}}-sumo_docker" + cluster: "{{ project }}-{{tier}}" + task_definition: "{{task_sumo_docker_name}}" + force_new_deployment: no + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + region: "{{region}}" + register: service_sumo_docker_output + +- name: update newrelic docker service + ecs_service: + state: present + name: "{{ project }}-{{tier}}-nr_docker" + cluster: "{{ project }}-{{tier}}" + task_definition: "{{task_nr_docker_name}}" + force_new_deployment: no + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + region: "{{region}}" + register: service_nr_docker_output + +- name: update {{ project }}-{{tier}}-backend service + ecs_service: + state: present + name: "{{ project }}-{{tier}}-backend" + cluster: "{{ project }}-{{tier}}" + task_definition: "{{task_backend_name}}:{{backend_revision}}" + role: "{{role_arn}}" + force_new_deployment: yes + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + load_balancers: "{{ lb_backend }}" + region: "{{region}}" + register: service_backend_output + +- name: update {{ project }}-{{tier}}-frontend service + ecs_service: + state: present + name: "{{ project }}-{{tier}}-frontend" + cluster: "{{ project }}-{{tier}}" + task_definition: "{{task_frontend_name}}:{{frontend_revision}}" + role: "{{role_arn}}" + force_new_deployment: yes + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + load_balancers: "{{ lb_frontend }}" + region: "{{region}}" + register: service_frontend_output + +#- name: Pause for 20 seconds to allow updates to take effect +# pause: +# seconds: 20 + +#- name: "wait for {{ frontend_url }} to become available" +# uri: +# url: "{{ frontend_url }}" +# follow_redirects: none +# method: GET +# register: _result +# until: ('status' in _result) and (_result.status == 200) +# retries: 100 +# delay: 10 \ No newline at end of file diff --git a/ansible/roles/bento/tasks/main.yml b/ansible/roles/bento/tasks/main.yml new file mode 100644 index 000000000..e69de29bb diff --git a/ansible/roles/bento/tasks/redis.yml b/ansible/roles/bento/tasks/redis.yml new file mode 100644 index 000000000..46787e625 --- /dev/null +++ b/ansible/roles/bento/tasks/redis.yml @@ -0,0 +1,5 @@ +- name: confirm redis redis_host + debug: + msg: "{{redis_host[tier]}}" +- name: flush redis cache + shell: echo -e "get abc \nFLUSHALL ASYNC" | redis-cli -h {{ redis_host[tier]}} -p 6379 -c diff --git a/ansible/roles/bento/templates/env.j2 b/ansible/roles/bento/templates/env.j2 new file mode 100644 index 000000000..17c24f011 --- /dev/null +++ b/ansible/roles/bento/templates/env.j2 @@ -0,0 +1,9 @@ +{% if tier == "prod" %} +REACT_APP_BACKEND_API=https://api.bento-tools.org/v1/graphql/ +REACT_APP_APPLICATION_VERSION={{version}} +REACT_APP_ABOUT_CONTENT_URL=https://raw.githubusercontent.com/CBIIT/bento-frontend/master/src/content/{{tier}}/aboutPagesContent.yaml +{% else %} +REACT_APP_BACKEND_API=https://api-{{tier}}.bento-tools.org/v1/graphql/ +REACT_APP_APPLICATION_VERSION={{version}} +REACT_APP_ABOUT_CONTENT_URL=https://raw.githubusercontent.com/CBIIT/bento-frontend/master/src/content/{{tier}}/aboutPagesContent.yaml +{% endif %} \ No newline at end of file diff --git a/ansible/roles/bento/templates/nginx-config.yml.j2 b/ansible/roles/bento/templates/nginx-config.yml.j2 new file mode 100644 index 000000000..848a0dfb3 --- /dev/null +++ b/ansible/roles/bento/templates/nginx-config.yml.j2 @@ -0,0 +1,5 @@ +integrations: + - name: nri-nginx + env: + REMOTE_MONITORING: true + METRICS: 1 \ No newline at end of file diff --git a/ansible/roles/bento/tests/inventory b/ansible/roles/bento/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/bento/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/bento/tests/test.yml b/ansible/roles/bento/tests/test.yml new file mode 100644 index 000000000..c9b79015b --- /dev/null +++ b/ansible/roles/bento/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - cicd \ No newline at end of file diff --git a/ansible/roles/bento/vars/bento.yml b/ansible/roles/bento/vars/bento.yml new file mode 100644 index 000000000..857ab3b4f --- /dev/null +++ b/ansible/roles/bento/vars/bento.yml @@ -0,0 +1,13 @@ +--- +# vars file for bento +redis_host: + dev: "bento-dev-redis-cluster.l5vrvc.clustercfg.use1.cache.amazonaws.com" + qa: "bento-qa-redis-cluster.l5vrvc.clustercfg.use1.cache.amazonaws.com" + perf: "bento-perf-redis-cluster.l5vrvc.clustercfg.use1.cache.amazonaws.com" + icdc: "bento-icdc-redis-cluster.l5vrvc.clustercfg.use1.cache.amazonaws.com" + ctdc: "ctdc-dev-redis-cluster.l5vrvc.clustercfg.use1.cache.amazonaws.com" + prod: "bento-prod-redis-cluster.l5vrvc.clustercfg.use1.cache.amazonaws.com" + +schema_file: "bento-extended.graphql" + +about_content_url: "https://raw.githubusercontent.com/CBIIT/bento-frontend/master/src/content/{{ tier }}/aboutPagesContent.yaml" \ No newline at end of file diff --git a/ansible/roles/bento/vars/c3dc.yml b/ansible/roles/bento/vars/c3dc.yml new file mode 100644 index 000000000..9237822fb --- /dev/null +++ b/ansible/roles/bento/vars/c3dc.yml @@ -0,0 +1,8 @@ +--- +# vars file for gmb +redis_host: + dev: "c3dc-dev-redis-cluster.l5vrvc.clustercfg.use1.cache.amazonaws.com" + +schema_file: "bento-c3dc.graphql" + +about_content_url: "https://raw.githubusercontent.com/CBIIT/bento-c3dc-frontend/{{ frontend_version }}/src/content/{{tier}}/aboutPagesContent.yaml" \ No newline at end of file diff --git a/ansible/roles/bento/vars/gmb.yml b/ansible/roles/bento/vars/gmb.yml new file mode 100644 index 000000000..676d48868 --- /dev/null +++ b/ansible/roles/bento/vars/gmb.yml @@ -0,0 +1,8 @@ +--- +# vars file for gmb +redis_host: + dev: "gmb-dev-redis-cluster.l5vrvc.clustercfg.use1.cache.amazonaws.com" + +schema_file: "bento-gmb.graphql" + +about_content_url: "https://raw.githubusercontent.com/CBIIT/bento-gmb-frontend/develop/src/content/{{tier}}/aboutPagesContent.yaml" \ No newline at end of file diff --git a/ansible/roles/bento/vars/ins.yml b/ansible/roles/bento/vars/ins.yml new file mode 100644 index 000000000..023463a6f --- /dev/null +++ b/ansible/roles/bento/vars/ins.yml @@ -0,0 +1,10 @@ +--- +# vars file for ins +redis_host: + dev: "ins-dev-redis-cluster.l5vrvc.0001.use1.cache.amazonaws.com" + +schema_file: "bento-extended.graphql" + +about_content_url: "https://raw.githubusercontent.com/CBIIT/bento-frontend/master/src/content/{{ tier }}/aboutPagesContent.yaml" + + diff --git a/ansible/roles/bento/vars/main.yml.bak b/ansible/roles/bento/vars/main.yml.bak new file mode 100644 index 000000000..9758ae456 --- /dev/null +++ b/ansible/roles/bento/vars/main.yml.bak @@ -0,0 +1,63 @@ +--- +# vars file for cicd +#enable_redis: true +platform: aws +redis_tier_name: "{{project}}-{{tier}}" +redis_host: + bento-dev: "bento-dev-redis-cluster.l5vrvc.clustercfg.use1.cache.amazonaws.com" + bento-qa: "bento-qa-redis-cluster.l5vrvc.clustercfg.use1.cache.amazonaws.com" + bento-perf: "bento-perf-redis-cluster.l5vrvc.clustercfg.use1.cache.amazonaws.com" + bento-icdc: "bento-icdc-redis-cluster.l5vrvc.clustercfg.use1.cache.amazonaws.com" + bento-ctdc: "ctdc-dev-redis-cluster.l5vrvc.clustercfg.use1.cache.amazonaws.com" + bento-prod: "bento-prod-redis-cluster.l5vrvc.clustercfg.use1.cache.amazonaws.com" + gmb-dev: "gmb-dev-redis-cluster.l5vrvc.clustercfg.use1.cache.amazonaws.com" + ins-dev: "ins-dev-redis-cluster.l5vrvc.clustercfg.use1.cache.amazonaws.com" + +redis_password: "" +redis_port: 6379 +use_cluster: 'true' + +tier: "{{ lookup('env','TIER') }}" +es_host: "{{ lookup('env','ES_HOST') }}" +version: "{{ lookup('env','VERSION') }}" +workspace: "{{ lookup('env','WORKSPACE') }}" +docker_user: "{{ lookup('env','DOCKER_USER') }}" +docker_password: "{{ lookup('env','DOCKER_PASSWORD') }}" +build_number: "{{ lookup('env','BUILD_NUMBER')}}" +region: us-east-1 +newrelic_license_key: "{{ lookup('aws_ssm', 'newrelic_license_key', region='us-east-1' ) }}" +sumo_access_id: "{{ lookup('aws_ssm', 'sumo_access_id', region='us-east-1' ) }}" +sumo_access_key: "{{ lookup('aws_ssm', 'sumo_access_key', region='us-east-1' ) }}" +syslog_host: "{{ lookup('aws_ssm', 'syslog_host', region='us-east-1' ) }}" +app_name: "{{project}}-{{platform}}-{{tier}}" + +#### bento-icdc changes ################################################################################################################################################ +#schema_file: "{% if tier == 'icdc' %}icdc.graphql{% elif project == 'ctdc' %}ctdc.graphql{% else %}bento-extended.graphql{% endif %}" +######################################################################################################################################################################## +schemas: + icdc: "icdc.graphql" + bento: "bento-extended.graphql" + gmb: "bento-gmb.graphql" + ins: "bento-extended.graphql" + ccdi: "bento-extended.graphql" + +schema_file: "{{schemas[project]}}" + +frontend_commit_id: "{{ lookup('env','FE_COMMIT') | default('', true) }}" +backend_commit_id: "{{ lookup('env','BE_COMMIT') | default('', true) }}" +frontend_version: "{{ lookup('env','FE_VERSION') }}" +backend_version: "{{ lookup('env','BE_VERSION') }}" +bento_api_version: "{{ backend_version }}" +backend_frontend_version: "{{frontend_version }}" +backend_content_url: https://raw.githubusercontent.com/CBIIT/bento-frontend/master/src/content/{{ tier }}/aboutPagesContent.yaml +# neo4j_ip: "{{ lookup('env','NEO4J_IP') }}" +neo4j_bearer: "{{ lookup('env','BEARER') }}" +neo4j_user: "{{ lookup('env','NEO4J_USER') }}" +neo4j_password: "{{ lookup('env','NEO4J_PASS') }}" +bearer: "{{ lookup('env','BEARER') }}" + +about_content_url: + gmb: https://raw.githubusercontent.com/CBIIT/bento-gmb-frontend/develop/src/content/{{tier}}/aboutPagesContent.yaml + bento: https://raw.githubusercontent.com/CBIIT/bento-frontend/master/src/content/{{ tier }}/aboutPagesContent.yaml + ctdc: https://raw.githubusercontent.com/CBIIT/bento-ctdc-frontend/develop/src/content/pre-prod/aboutPagesContent.yml +# hostname: "{{ansible_fqdn}}" \ No newline at end of file diff --git a/ansible/roles/build-image-bento/README.md b/ansible/roles/build-image-bento/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/build-image-bento/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/build-image-bento/defaults/main.yml b/ansible/roles/build-image-bento/defaults/main.yml new file mode 100644 index 000000000..573b7b686 --- /dev/null +++ b/ansible/roles/build-image-bento/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for build-image \ No newline at end of file diff --git a/ansible/roles/build-image-bento/handlers/main.yml b/ansible/roles/build-image-bento/handlers/main.yml new file mode 100644 index 000000000..fecba5317 --- /dev/null +++ b/ansible/roles/build-image-bento/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for build-image \ No newline at end of file diff --git a/ansible/roles/build-image-bento/meta/main.yml b/ansible/roles/build-image-bento/meta/main.yml new file mode 100644 index 000000000..3a212a936 --- /dev/null +++ b/ansible/roles/build-image-bento/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.4 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/ansible/roles/build-image-bento/tasks/bento.yml b/ansible/roles/build-image-bento/tasks/bento.yml new file mode 100644 index 000000000..92e3d1d82 --- /dev/null +++ b/ansible/roles/build-image-bento/tasks/bento.yml @@ -0,0 +1,46 @@ +--- +# tasks file for build-image-bento +- name: list all dockerfiles in backend directory + find: + path: ../bento/backend + patterns: 'Dockerfile' + file_type: file + register: backend_dockerfile + +- name: list all dockerfiles in frontend directory + find: + path: ../bento/frontend + patterns: 'Dockerfile' + file_type: file + register: frontend_dockerfile + +- name: set name of backend dockerfiles + set_fact: + backendfiles: "{{ backend_dockerfile.files | json_query('[*].path')}}" + +- name: set name of frontend dockerfiles + set_fact: + frontendfiles: "{{ frontend_dockerfile.files | json_query('[*].path')}}" + +- name: merge file lists + set_fact: + dockerfiles: "{{ backendfiles + frontendfiles }}" + +- name: log into DockerHub + docker_login: + username: "{{docker_user}}" + password: "{{docker_password}}" + +- name: build all the docker images + docker_image: + build: + path: "{{ item | dirname }}" + dockerfile: "Dockerfile" + pull: yes + nocache: yes + name: "{{ dockerhub_path }}/bento-{{ item | dirname | basename }}" + tag: "release" + push: yes + force_source: yes + source: build + loop: "{{ dockerfiles }}" \ No newline at end of file diff --git a/ansible/roles/build-image-bento/tasks/icdc.yml b/ansible/roles/build-image-bento/tasks/icdc.yml new file mode 100644 index 000000000..085cdf30a --- /dev/null +++ b/ansible/roles/build-image-bento/tasks/icdc.yml @@ -0,0 +1,84 @@ +--- +# tasks file for build-image-bento +- name: clean the workspace of dangling images and volumes + command: > + docker + --tlsverify --tlscacert=/local/home/commonsdocker/.docker/ca.pem + --tlscert=/local/home/commonsdocker/.docker/jenkinscert.pem + --tlskey=/local/home/commonsdocker/.docker/jenkinskey.pem + -H={{ docker_host }} system prune -a --volumes + +- name: list all dockerfiles in backend directory + find: + path: ../bento/backend + patterns: '*-icdc-*' + file_type: file + register: backend_dockerfile + +- name: list all dockerfiles in frontend directory + find: + path: ../bento/frontend + file_type: file + register: frontend_dockerfile + +- name: set name of backend dockerfiles + set_fact: + backendfiles: "{{ backend_dockerfile.files | json_query('[*].path')}}" + +- name: set name of frontend dockerfiles + set_fact: + frontendfiles: "{{ frontend_dockerfile.files | json_query('[*].path')}}" + +- name: merge file lists + set_fact: + dockerfiles: "{{ backendfiles + frontendfiles }}" + +- name: Log into DockerHub + docker_login: + username: "{{ item.value.username }}" + password: "{{ item.value.password }}" + registry: "{{item.value.registry }}" + tls: yes + ca_cert: /local/home/commonsdocker/.docker/ca.pem + client_cert: /local/home/commonsdocker/.docker/jenkinscert.pem + client_key: /local/home/commonsdocker/.docker/jenkinskey.pem + docker_host: "{{docker_host}}" + loop: "{{ image_location|dict2items }}" + +- name: build all the docker images + docker_image: + build: + path: "{{ item | dirname }}" + dockerfile: "Dockerfile" + nocache: yes + force: yes + name: "{{ image_location.dockerhub.path }}/bento-{{ item | dirname | basename }}" + tag: "release" + push: yes + source: build + tls: yes + ca_cert: /local/home/commonsdocker/.docker/ca.pem + client_cert: /local/home/commonsdocker/.docker/jenkinscert.pem + client_key: /local/home/commonsdocker/.docker/jenkinskey.pem + docker_host: "{{docker_host}}" + repository: "{{ image_location.dockerhub.registry }}/{{ image_location.dockerhub.path }}/bento-{{ item | dirname | basename }}" + loop: "{{ dockerfiles }}" + +- name: list docker images + command: > + docker + --tlsverify --tlscacert=/local/home/commonsdocker/.docker/ca.pem + --tlscert=/local/home/commonsdocker/.docker/jenkinscert.pem + --tlskey=/local/home/commonsdocker/.docker/jenkinskey.pem + -H={{ docker_host }} images -q + register: docker_img + +- name: remove all docker images + command: > + docker + --tlsverify --tlscacert=/local/home/commonsdocker/.docker/ca.pem + --tlscert=/local/home/commonsdocker/.docker/jenkinscert.pem + --tlskey=/local/home/commonsdocker/.docker/jenkinskey.pem + -H={{ docker_host }} rmi {{item}} -f + loop: + "{{ docker_img.stdout_lines }}" diff --git a/ansible/roles/build-image-bento/tasks/main.yml b/ansible/roles/build-image-bento/tasks/main.yml new file mode 100644 index 000000000..4784a24ac --- /dev/null +++ b/ansible/roles/build-image-bento/tasks/main.yml @@ -0,0 +1,83 @@ +--- +# tasks file for build-image-bento +- name: clean the workspace of dangling images and volumes + command: > + docker + --tlsverify --tlscacert=/local/home/commonsdocker/.docker/ca.pem + --tlscert=/local/home/commonsdocker/.docker/jenkinscert.pem + --tlskey=/local/home/commonsdocker/.docker/jenkinskey.pem + -H={{ docker_host }} system prune -a --volumes + +- name: list all dockerfiles in backend directory + find: + path: ../bento/backend + file_type: file + register: backend_dockerfile + +- name: list all dockerfiles in frontend directory + find: + path: ../bento/frontend + file_type: file + register: frontend_dockerfile + +- name: set name of backend dockerfiles + set_fact: + backendfiles: "{{ backend_dockerfile.files | json_query('[*].path')}}" + +- name: set name of frontend dockerfiles + set_fact: + frontendfiles: "{{ frontend_dockerfile.files | json_query('[*].path')}}" + +- name: merge file lists + set_fact: + dockerfiles: "{{ backendfiles + frontendfiles }}" + +- name: Log into DockerHub + docker_login: + username: "{{ item.value.username }}" + password: "{{ item.value.password }}" + registry: "{{item.value.registry }}" + tls: yes + ca_cert: /local/home/commonsdocker/.docker/ca.pem + client_cert: /local/home/commonsdocker/.docker/jenkinscert.pem + client_key: /local/home/commonsdocker/.docker/jenkinskey.pem + docker_host: "{{docker_host}}" + loop: "{{ image_location|dict2items }}" + +- name: build all the docker images + docker_image: + build: + path: "{{ item | dirname }}" + dockerfile: "Dockerfile" + nocache: yes + force: yes + name: "{{ image_location.dockerhub.path }}/bento-{{ item | dirname | basename }}" + tag: "release" + push: yes + source: build + tls: yes + ca_cert: /local/home/commonsdocker/.docker/ca.pem + client_cert: /local/home/commonsdocker/.docker/jenkinscert.pem + client_key: /local/home/commonsdocker/.docker/jenkinskey.pem + docker_host: "{{docker_host}}" + repository: "{{ image_location.dockerhub.registry }}/{{ image_location.dockerhub.path }}/bento-{{ item | dirname | basename }}" + loop: "{{ dockerfiles }}" + +- name: list docker images + command: > + docker + --tlsverify --tlscacert=/local/home/commonsdocker/.docker/ca.pem + --tlscert=/local/home/commonsdocker/.docker/jenkinscert.pem + --tlskey=/local/home/commonsdocker/.docker/jenkinskey.pem + -H={{ docker_host }} images -q + register: docker_img + +- name: remove all docker images + command: > + docker + --tlsverify --tlscacert=/local/home/commonsdocker/.docker/ca.pem + --tlscert=/local/home/commonsdocker/.docker/jenkinscert.pem + --tlskey=/local/home/commonsdocker/.docker/jenkinskey.pem + -H={{ docker_host }} rmi {{item}} -f + loop: + "{{ docker_img.stdout_lines }}" diff --git a/ansible/roles/build-image-bento/tests/inventory b/ansible/roles/build-image-bento/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/build-image-bento/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/build-image-bento/tests/test.yml b/ansible/roles/build-image-bento/tests/test.yml new file mode 100644 index 000000000..7712c71d0 --- /dev/null +++ b/ansible/roles/build-image-bento/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - build-image-bento \ No newline at end of file diff --git a/ansible/roles/build-image-bento/vars/bento.yml b/ansible/roles/build-image-bento/vars/bento.yml new file mode 100644 index 000000000..af8244810 --- /dev/null +++ b/ansible/roles/build-image-bento/vars/bento.yml @@ -0,0 +1,5 @@ +--- +# vars file for build-image +workspace: "{{ lookup('env','WORKSPACE') }}" +docker_user: "{{ lookup('env','DOCKER_USER') }}" +docker_password: "{{ lookup('env','DOCKER_PASSWORD') }}" \ No newline at end of file diff --git a/ansible/roles/build-image-bento/vars/main.yml b/ansible/roles/build-image-bento/vars/main.yml new file mode 100644 index 000000000..bbf970edc --- /dev/null +++ b/ansible/roles/build-image-bento/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for build-image \ No newline at end of file diff --git a/ansible/roles/build-image/README.md b/ansible/roles/build-image/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/build-image/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/build-image/defaults/main.yml b/ansible/roles/build-image/defaults/main.yml new file mode 100644 index 000000000..573b7b686 --- /dev/null +++ b/ansible/roles/build-image/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for build-image \ No newline at end of file diff --git a/ansible/roles/build-image/handlers/main.yml b/ansible/roles/build-image/handlers/main.yml new file mode 100644 index 000000000..fecba5317 --- /dev/null +++ b/ansible/roles/build-image/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for build-image \ No newline at end of file diff --git a/ansible/roles/build-image/meta/main.yml b/ansible/roles/build-image/meta/main.yml new file mode 100644 index 000000000..3a212a936 --- /dev/null +++ b/ansible/roles/build-image/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.4 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/ansible/roles/build-image/tasks/main.yml b/ansible/roles/build-image/tasks/main.yml new file mode 100644 index 000000000..7f31c8b94 --- /dev/null +++ b/ansible/roles/build-image/tasks/main.yml @@ -0,0 +1,95 @@ +--- +# tasks file for build-image +- name: clean the workspace of dangling images and volumes + command: > + docker + --tlsverify --tlscacert=/local/home/commonsdocker/.docker/ca.pem + --tlscert=/local/home/commonsdocker/.docker/jenkinscert.pem + --tlskey=/local/home/commonsdocker/.docker/jenkinskey.pem + -H={{ docker_host }} system prune -a --volumes + + +- name: list all dockerfiles in docker directory + find: + #path: ../docker + path: ../bento/icdc + file_type: file + register: list_file + +- name: set name of dockerfiles + set_fact: + dockerfiles: "{{ list_file.files | json_query('[*].path')}}" + +- name: Log into DockerHub + docker_login: + username: "{{ item.value.username }}" + password: "{{ item.value.password }}" + registry: "{{item.value.registry }}" + tls: yes + ca_cert: /local/home/commonsdocker/.docker/ca.pem + client_cert: /local/home/commonsdocker/.docker/jenkinscert.pem + client_key: /local/home/commonsdocker/.docker/jenkinskey.pem + docker_host: "{{docker_host}}" + loop: "{{ image_location|dict2items }}" + + + +- name: build all the docker images + docker_image: + build: + #path: ../docker + path: ../bento/icdc + dockerfile: "{{ item.1 | basename}}" + nocache: yes + force: yes + name: "{{ item.0.value.path }}/{{ item.1 | basename }}" + tag: latest + push: yes + source: build + tls: yes + ca_cert: /local/home/commonsdocker/.docker/ca.pem + client_cert: /local/home/commonsdocker/.docker/jenkinscert.pem + client_key: /local/home/commonsdocker/.docker/jenkinskey.pem + docker_host: "{{docker_host}}" + repository: "{{ item.0.value.registry }}/{{ item.0.value.path }}/{{ item.1 | basename }}" + with_nested: + - "{{ image_location|dict2items }}" + - "{{ dockerfiles }}" + +# loop: +# "{{dockerfiles}}" + +# - name: clean up the built image +# docker_image: +# state: absent +# force: yes +# tls: yes +# ca_cert: /local/home/commonsdocker/.docker/ca.pem +# client_cert: /local/home/commonsdocker/.docker/jenkinscert.pem +# client_key: /local/home/commonsdocker/.docker/jenkinskey.pem +# docker_host: "{{docker_host}}" +# #repository: "{{ item.0.value.registry }}" +# # name: "{{ item.0.value.path }}/{{ item.1 | basename }}" +# name: "{{ item.0.value.registry }}/{{ item.0.value.path }}/{{ item.1 | basename }}" +# with_nested: +# - "{{ image_location|dict2items }}" +# - "{{ dockerfiles }}" + +- name: list docker images + command: > + docker + --tlsverify --tlscacert=/local/home/commonsdocker/.docker/ca.pem + --tlscert=/local/home/commonsdocker/.docker/jenkinscert.pem + --tlskey=/local/home/commonsdocker/.docker/jenkinskey.pem + -H={{ docker_host }} images -q + register: docker_img + +- name: remove all docker images + command: > + docker + --tlsverify --tlscacert=/local/home/commonsdocker/.docker/ca.pem + --tlscert=/local/home/commonsdocker/.docker/jenkinscert.pem + --tlskey=/local/home/commonsdocker/.docker/jenkinskey.pem + -H={{ docker_host }} rmi {{item}} -f + loop: + "{{ docker_img.stdout_lines }}" diff --git a/ansible/roles/build-image/tests/inventory b/ansible/roles/build-image/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/build-image/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/build-image/tests/test.yml b/ansible/roles/build-image/tests/test.yml new file mode 100644 index 000000000..9a7edf958 --- /dev/null +++ b/ansible/roles/build-image/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - build-image \ No newline at end of file diff --git a/ansible/roles/build-image/vars/main.yml b/ansible/roles/build-image/vars/main.yml new file mode 100644 index 000000000..bbf970edc --- /dev/null +++ b/ansible/roles/build-image/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for build-image \ No newline at end of file diff --git a/ansible/roles/ccdc-etl/files/inject.template.js b/ansible/roles/ccdc-etl/files/inject.template.js new file mode 100644 index 000000000..cb52bc80c --- /dev/null +++ b/ansible/roles/ccdc-etl/files/inject.template.js @@ -0,0 +1,4 @@ +window.injectedEnv = { + REACT_APP_ENVIRONMENT: '${REACT_APP_ENVIRONMENT}', + REACT_APP_API_URL: '${REACT_APP_API_URL}', +}; diff --git a/ansible/roles/ccdc-etl/tasks/build.yml b/ansible/roles/ccdc-etl/tasks/build.yml new file mode 100644 index 000000000..59c5f07f6 --- /dev/null +++ b/ansible/roles/ccdc-etl/tasks/build.yml @@ -0,0 +1,135 @@ +--- + +- name: run npm install in {{workspace}}/{{etl}} + command: "{{item}}" + args: + chdir: "{{workspace}}/{{etl}}" + loop: + - npm install + +- name: create digest_file_folder + file: + path: "{{workspace}}/{{etl}}/{{ digest_file_folder }}" + state: directory + +- name: Get list of files from S3 + aws_s3: + mode: list + bucket: "{{ aws_storage_bucket_name }}" + prefix: "{{ digest_file_folder }}/" + marker: "{{ digest_file_folder }}/" + register: s3_bucket_items + +- name: Print s3_bucket_items + debug: + msg: "s3_bucket_items Version: {{ s3_bucket_items.s3_keys }}" + +- name: Download files from S3 + aws_s3: + mode: get + bucket: "{{ aws_storage_bucket_name }}" + object: "{{ item }}" + dest: "{{workspace}}/{{etl}}/{{ digest_file_folder }}/{{ item|basename }}" + with_items: "{{ s3_bucket_items.s3_keys }}" + +# - name: build cbiitssrepo/{{etl | lower}} image +# docker_image: +# build: +# path: "{{workspace}}/{{etl}}" +# pull: yes +# nocache: yes +# name: cbiitssrepo/{{etl | lower}} +# tag: "{{etl_version}}-{{build_number}}" +# push: yes +# force_source: yes +# source: build + +# - name: Add tag latest to cbiitssrepo/{{ etl | lower}} image +# docker_image: +# name: "cbiitssrepo/{{ etl | lower}}:{{etl_version}}-{{build_number}}" +# repository: cbiitssrepo/{{ etl | lower}}:latest +# force_tag: yes +# push: yes +# source: local + +# - name: set environment variables required for js file execution +# shell: +# environment: +# REACT_APP_BACKEND_API: "{% if tier == 'prod' %}https://{{stack_name}}.bento-tools.org/service/{% else %}https://{{stack_name}}-{{ tier }}.bento-tools.org/service/{% endif %}" +# REACT_APP_ENVIRONMENT: "{{tier}}" +# RDB_HOST : "{{ rds_host }}" +# RDB_PORT : "{{ rds_port }}" +# RDB_USER : "{{ rds_user }}" +# RDB_PASSWORD : "{{ rds_password }}" +# ES_HOST : "{{ es_host }}" +# ES_PORT : "{{ es_port }}" +# DIGEST_FILE_FOLDER : "{{ digest_file_folder }}" + + +# - name: run npm install in {{workspace}}/{{ etl }} +# command: "{{item}}" +# args: +# chdir: "{{ workspace }}/{{ etl }}" +# loop: +# - npm install +# - npm run build + + +- name: start the application. Run dropTable.js + shell: + "node dropTable.js" #"docker run --rm -d cbiitssrepo/ccdc-etl:latest ./dropTable.js" # + args: + chdir: "{{ workspace }}/{{ etl }}" + environment: + RDB_HOST : "{{ rds_host }}" + RDB_PORT : "{{ rds_port }}" + RDB_USER : "{{ rds_user }}" + RDB_PASSWORD : "{{ rds_password }}" + ES_HOST : "{{ es_host }}" + ES_PORT : "{{ es_port }}" + DIGEST_FILE_FOLDER : "{{ digest_file_folder }}" + when: + deployment_type == "re-deployment" + register: drop_table + +- name: Print drop_table + debug: + msg: "drop_table output: {{ drop_table }}" + +- name: start the application. Run createTable.js + shell: + "node createTable.js" #docker run --rm -d cbiitssrepo/ccdc-etl:latest ./createTable.js" #"node createTable.js + args: + chdir: "{{ workspace }}/{{ etl }}" + environment: + RDB_HOST : "{{ rds_host }}" + RDB_PORT : "{{ rds_port }}" + RDB_USER : "{{ rds_user }}" + RDB_PASSWORD : "{{ rds_password }}" + ES_HOST : "{{ es_host }}" + ES_PORT : "{{ es_port }}" + DIGEST_FILE_FOLDER : "{{ digest_file_folder }}" + register: create_table + +- name: Print create_table + debug: + msg: "create_table output: {{ create_table }}" + +- name: start the application. Run index.js + shell: + "node index.js" #"docker run --rm -d cbiitssrepo/ccdc-etl:latest ./index.js" #"node index.js" + environment: + RDB_HOST : "{{ rds_host }}" + RDB_PORT : "{{ rds_port }}" + RDB_USER : "{{ rds_user }}" + RDB_PASSWORD : "{{ rds_password }}" + ES_HOST : "{{ es_host }}" + ES_PORT : "{{ es_port }}" + DIGEST_FILE_FOLDER : "{{ digest_file_folder }}" + args: + chdir: "{{ workspace }}/{{ etl }}" + register: loading + +- name: Print loading + debug: + msg: "loading output: {{ loading }}" diff --git a/ansible/roles/ccdc-etl/vars/main.yml b/ansible/roles/ccdc-etl/vars/main.yml new file mode 100644 index 000000000..ad07c4cd6 --- /dev/null +++ b/ansible/roles/ccdc-etl/vars/main.yml @@ -0,0 +1,46 @@ +--- +# vars file for cicd +stack_name: ccdc +platform: aws +tier: "{{ lookup('env','TIER') }}" +workspace: "{{ lookup('env','WORKSPACE') }}" +docker_user: "{{ lookup('env','DOCKER_USER') }}" +docker_password: "{{ lookup('env','DOCKER_PASSWORD') }}" +build_number: "{{ lookup('env','BUILD_NUMBER')}}" +newrelic_license_key: "{{ lookup('aws_ssm', 'newrelic_license_key', region='us-east-1' ) }}" +sumo_access_id: "{{ lookup('aws_ssm', 'sumo_access_id', region='us-east-1' ) }}" +sumo_access_key: "{{ lookup('aws_ssm', 'sumo_access_key', region='us-east-1' ) }}" +syslog_host: "{{ lookup('aws_ssm', 'syslog_host', region='us-east-1' ) }}" +app_name: "{{stack_name}}-{{platform}}-{{tier}}" +etl: CCDC-ETL +etl_version: "{{ lookup('env','ETL_VERSION') }}" +region: us-east-1 +rds_host: "{{ lookup('aws_ssm', '{{stack_name}}-{{tier}}-rds-host', region='us-east-1' ) }}" +rds_port: "{{ lookup('aws_ssm', 'rds_port', region='us-east-1' ) }}" +rds_user: "{{ lookup('aws_ssm', 'rds_user', region='us-east-1' ) }}" +rds_password: "{{ lookup('aws_ssm', '{{stack_name}}-{{tier}}-rds-password', region='us-east-1' ) }}" +es_host: "{{ lookup('aws_ssm', '{{stack_name}}-{{tier}}-es-host', region='us-east-1' ) }}" +es_port: "{{ lookup('aws_ssm', 'es_port', region='us-east-1' ) }}" +digest_file_folder: "{{ lookup('env','S3_FOLDER') }}" +aws_storage_bucket_name : ccdc-etl-digest +deployment_type: "{{ lookup('env','DEPLOYMENT_TYPE') }}" + +# environment_variables: +# - key: REACT_APP_BACKEND_API +# value : "{% if tier == 'prod' %}https://{{stack_name}}.bento-tools.org/service/{% else %}https://{{stack_name}}-{{ tier }}.bento-tools.org/service/{% endif %}" +# - key: REACT_APP_ENVIRONMENT +# value : "{{tier}}" +# - key: RDB_HOST +# value : "{{ rds_host }}" +# - key: RDB_PORT +# value : "{{ rds_port }}" +# - key: RDB_USER +# value : "{{ rds_user }}" +# - key: RDB_PASSWORD +# value : "{{ rds_password }}" +# - key: ES_HOST +# value : "{{ es_host }}" +# - key: ES_PORT +# value : "{{ es_port }}" +# - key: DIGEST_FILE_FOLDER +# value : "{{ digest_file_folder }}" \ No newline at end of file diff --git a/ansible/roles/ccdc/.travis.yml b/ansible/roles/ccdc/.travis.yml new file mode 100644 index 000000000..36bbf6208 --- /dev/null +++ b/ansible/roles/ccdc/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/ansible/roles/ccdc/README.md b/ansible/roles/ccdc/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/ccdc/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/ccdc/defaults/main.yml b/ansible/roles/ccdc/defaults/main.yml new file mode 100644 index 000000000..cd63f0d9b --- /dev/null +++ b/ansible/roles/ccdc/defaults/main.yml @@ -0,0 +1,3 @@ +--- +# defaults file for cicd +prod: prod diff --git a/ansible/roles/ccdc/files/inject.template.js b/ansible/roles/ccdc/files/inject.template.js new file mode 100644 index 000000000..cb52bc80c --- /dev/null +++ b/ansible/roles/ccdc/files/inject.template.js @@ -0,0 +1,4 @@ +window.injectedEnv = { + REACT_APP_ENVIRONMENT: '${REACT_APP_ENVIRONMENT}', + REACT_APP_API_URL: '${REACT_APP_API_URL}', +}; diff --git a/ansible/roles/ccdc/files/nginx-entrypoint.sh b/ansible/roles/ccdc/files/nginx-entrypoint.sh new file mode 100755 index 000000000..70875490e --- /dev/null +++ b/ansible/roles/ccdc/files/nginx-entrypoint.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +WWW_DIR=/usr/share/nginx/html +INJECT_FILE_SRC="${WWW_DIR}/inject.template.js" +INJECT_FILE_DST="${WWW_DIR}/injectEnv.js" +envsubst < "${INJECT_FILE_SRC}" > "${INJECT_FILE_DST}" + +[ -z "$@" ] && nginx -g 'daemon off;' || $@ \ No newline at end of file diff --git a/ansible/roles/ccdc/handlers/main.yml b/ansible/roles/ccdc/handlers/main.yml new file mode 100644 index 000000000..3c94d4172 --- /dev/null +++ b/ansible/roles/ccdc/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for cicd \ No newline at end of file diff --git a/ansible/roles/ccdc/meta/main.yml b/ansible/roles/ccdc/meta/main.yml new file mode 100644 index 000000000..227ad9c34 --- /dev/null +++ b/ansible/roles/ccdc/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.9 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/ansible/roles/ccdc/tasks/build.yml b/ansible/roles/ccdc/tasks/build.yml new file mode 100644 index 000000000..75ce8c3ba --- /dev/null +++ b/ansible/roles/ccdc/tasks/build.yml @@ -0,0 +1,190 @@ +--- + +- name: log into DockerHub + docker_login: + username: "{{docker_user}}" + password: "{{docker_password}}" + +- name: copy nginx conf + copy: + remote_src: yes + src: '{{workspace}}/icdc-devops/docker/dockerfiles/nginx.conf' + dest: '{{workspace}}/{{project}}/nginx.conf' + +- name: copy entrypoint.sh to workspace + copy: + src: "nginx-entrypoint.sh" + dest: "{{workspace}}/{{project}}/nginx-entrypoint.sh" + mode: 0755 + +- name: run npm install in {{workspace}}/{{project}} + command: "{{item}}" + args: + chdir: "{{workspace}}/{{project}}" + loop: + - npm install + - npm run build + +- name: rename build to dist + command: mv {{workspace}}/{{project}}/build {{workspace}}/{{project}}/dist + +- name: copy env to project + copy: + src: inject.template.js + dest: "{{workspace}}/{{project}}/dist/inject.template.js" + mode: 0755 + +- name: build cbiitssrepo/{{project | lower}} image + docker_image: + build: + path: "{{workspace}}/{{project}}" + dockerfile: "{{workspace}}/icdc-devops/docker/dockerfiles/ccdc-frontend-dockerfile" + pull: yes + nocache: yes + name: cbiitssrepo/{{project | lower}} + tag: "{{frontend_version}}-{{build_number}}" + push: yes + force_source: yes + source: build + +- name: Add tag latest to cbiitssrepo/{{ project | lower }} image + docker_image: + name: "cbiitssrepo/{{ project | lower }}:{{frontend_version}}-{{build_number}}" + repository: cbiitssrepo/{{ project | lower }}:latest + force_tag: yes + push: yes + source: local + + +############################################################################################################################ + +# Backend Build + +############################################################################################################################ + + +# - name: create service directory in backend +# file: +# state: directory +# path: "{{workspace}}/src/main/resources/service" + +# - name: create yaml directory in backend +# file: +# state: directory +# path: "{{workspace}}/src/main/resources/yaml" + +# - name: copy schema from frontend to resources +# template: +# src: "{{workspace}}/{{ project }}-frontend/graphql/{{ schema_file}}" +# dest: "{{workspace}}/src/main/resources/graphql/{{ schema_file}}" + +# - name: verify test queries file exists +# stat: +# path: "{{workspace}}/{{ project }}-frontend/yaml/{{ test_queries_file }}" +# register: test_queries + +# - name: copy test queries from frontend to resources +# template: +# src: "{{workspace}}/{{ project }}-frontend/yaml/{{ test_queries_file }}" +# dest: "{{workspace}}/src/main/resources/yaml/{{ test_queries_file }}" +# when: test_queries.stat.exists + +# - name: build springboot code +# command: mvn package -DskipTests +# args: +# chdir: "{{workspace}}" + +# - name: copy Bento-0.0.1.war to api.war +# copy: +# remote_src: yes +# src: "{{workspace}}/target/Bento-0.0.1.war" +# dest: "{{workspace}}/target/ROOT.war" + +- name: log into DockerHub + docker_login: + username: "{{docker_user}}" + password: "{{docker_password}}" + +- name: build cbiitssrepo/{{ backend | lower}} image + docker_image: + build: + path: "{{workspace}}" + dockerfile: "{{workspace}}/icdc-devops/docker/dockerfiles/ccdc-backend-dockerfile" + pull: yes + nocache: yes + name: cbiitssrepo/{{ backend | lower}} + tag: "{{backend_version}}-{{build_number}}" + push: yes + force_source: yes + source: build + +- name: Add tag latest to cbiitssrepo/{{ backend | lower}} image + docker_image: + name: "cbiitssrepo/{{ backend | lower}}:{{backend_version}}-{{build_number}}" + repository: cbiitssrepo/{{ backend | lower}}:latest + force_tag: yes + push: yes + source: local + + + +############################################################################################################################ + +# Database Build + +############################################################################################################################ + + + +- name: run npm install in {{workspace}}/{{etl}} + command: "{{item}}" + args: + chdir: "{{workspace}}/{{etl}}" + loop: + - npm install + +- name: create digest_file_folder + file: + path: "{{workspace}}/{{etl}}/{{ digest_file_folder }}" + state: directory + +- name: Get list of files from S3 + aws_s3: + mode: list + bucket: "{{ aws_storage_bucket_name }}" + prefix: "{{ digest_file_folder }}/" + marker: "{{ digest_file_folder }}/" + register: s3_bucket_items + +- name: Print s3_bucket_items + debug: + msg: "s3_bucket_items Version: {{ s3_bucket_items.s3_keys }}" + +- name: Download files from S3 + aws_s3: + mode: get + bucket: "{{ aws_storage_bucket_name }}" + object: "{{ item }}" + dest: "{{workspace}}/{{etl}}/{{ digest_file_folder }}/{{ item|basename }}" + with_items: "{{ s3_bucket_items.s3_keys }}" + +- name: build cbiitssrepo/{{etl | lower}} image + docker_image: + build: + path: "{{workspace}}/{{etl}}" + dockerfile: "{{workspace}}/icdc-devops/docker/dockerfiles/ccdc-database-dockerfile" + pull: yes + nocache: yes + name: cbiitssrepo/{{etl | lower}} + tag: "{{etl_version}}-{{build_number}}" + push: yes + force_source: yes + source: build + +- name: Add tag latest to cbiitssrepo/{{ etl | lower}} image + docker_image: + name: "cbiitssrepo/{{ etl | lower}}:{{etl_version}}-{{build_number}}" + repository: cbiitssrepo/{{ etl | lower}}:latest + force_tag: yes + push: yes + source: local \ No newline at end of file diff --git a/ansible/roles/ccdc/tasks/deploy.yml b/ansible/roles/ccdc/tasks/deploy.yml new file mode 100644 index 000000000..793d964bc --- /dev/null +++ b/ansible/roles/ccdc/tasks/deploy.yml @@ -0,0 +1,455 @@ +--- +- name: create task definition - {{backend}} + ecs_taskdefinition: + containers: + - name: backend + essential: true + image: "cbiitssrepo/{{backend | lower}}:{{backend_version}}" + environment: + - name: REACT_APP_BACKEND_API + value: "{% if tier == 'prod' %}https://{{stack_name}}.bento-tools.org/service/{% else %}https://{{stack_name}}-{{ tier }}.bento-tools.org/service/{% endif %}" + - name: REACT_APP_ENVIRONMENT + value: "{{tier}}" + - name: NEW_RELIC_LICENSE_KEY + value: "{{ newrelic_license_key }}" + - name: NEW_RELIC_APP_NAME + value: "{{ app_name }}-backend" + - name: NEW_RELIC_DISTRIBUTED_TRACING_ENABLED + value: true + - name: NEW_RELIC_HOST + value: "gov-collector.newrelic.com" + - name: NEW_RELIC_NO_CONFIG_FILE + value: true + - name: RDB_HOST + value: "{{ rds_host }}" + - name: RDB_PORT + value: "{{ rds_port }}" + - name: RDB_USER + value: "{{ rds_user }}" + - name: RDB_PASSWORD + value: "{{ rds_password }}" + - name: ES_HOST + value: "{{ es_host }}" + - name: ES_PORT + value: "{{ es_port }}" + portMappings: + - containerPort: "8080" + hostPort: "8080" + network_mode: bridge + family: "{{stack_name }}-{{tier}}-backend" + memory: '1024' + cpu: '512' + state: present + region: "{{region}}" + register: task_output + + +- name: create task definition - {{project}} + ecs_taskdefinition: + containers: + - name: frontend + essential: true + image: "cbiitssrepo/{{project | lower}}:{{frontend_version}}" + environment: + - name: REACT_APP_BACKEND_API + value: "{% if tier == 'prod' %}https://{{stack_name}}.bento-tools.org/service/{% else %}https://{{stack_name}}-{{ tier }}.bento-tools.org/service/{% endif %}" + - name: REACT_APP_ENVIRONMENT + value: "{{tier}}" + - name: NEW_RELIC_LICENSE_KEY + value: "{{ newrelic_license_key }}" + - name: NEW_RELIC_APP_NAME + value: "{{ app_name }}-frontend" + - name: NEW_RELIC_DISTRIBUTED_TRACING_ENABLED + value: true + - name: NEW_RELIC_HOST + value: "gov-collector.newrelic.com" + - name: NEW_RELIC_NO_CONFIG_FILE + value: true + portMappings: + - containerPort: "80" + hostPort: "80" + # logConfiguration: + # logDriver: syslog + # options: + # syslog-address: tcp://{{ syslog_host }}:514 + # tag: "{{ app_name }}-frontend" + # syslog-format: "rfc5424micro" + network_mode: bridge + family: "{{stack_name }}-{{tier}}-frontend" + state: present + memory: '512' + cpu: '128' + region: "{{region}}" + register: task_output + +- name: create task definition - {{etl}} + ecs_taskdefinition: + containers: + - name: etl + essential: true + image: "cbiitssrepo/{{etl | lower}}:{{etl_version}}" + environment: + - name: REACT_APP_BACKEND_API + value: "{% if tier == 'prod' %}https://{{stack_name}}.bento-tools.org/service/{% else %}https://{{stack_name}}-{{ tier }}.bento-tools.org/service/{% endif %}" + - name: REACT_APP_ENVIRONMENT + value: "{{tier}}" + - name: NEW_RELIC_LICENSE_KEY + value: "{{ newrelic_license_key }}" + - name: NEW_RELIC_APP_NAME + value: "{{ app_name }}-etl" + - name: NEW_RELIC_DISTRIBUTED_TRACING_ENABLED + value: true + - name: NEW_RELIC_HOST + value: "gov-collector.newrelic.com" + - name: NEW_RELIC_NO_CONFIG_FILE + value: true + - name: RDB_HOST + value: "{{ rds_host }}" + - name: RDB_PORT + value: "{{ rds_port }}" + - name: RDB_USER + value: "{{ rds_user }}" + - name: RDB_PASSWORD + value: "{{ rds_password }}" + - name: ES_HOST + value: "{{ es_host }}" + - name: ES_PORT + value: "{{ es_port }}" + - name: DIGEST_FILE_FOLDER + value: "{{ digest_file_folder }}" + portMappings: + - containerPort: "8081" + hostPort: "8081" + network_mode: bridge + family: "{{stack_name }}-{{tier}}-etl" + memory: '1024' + cpu: '512' + state: present + region: "{{region}}" + register: task_output + +- name: Print task etl + debug: + msg: "task_output Version: {{ task_output }}" + +- name: create task definition - sumo syslog + ecs_taskdefinition: + containers: + - name: sumologic-syslog + essential: true + image: "sumologic/collector:latest-syslog" + environment: + - name: SUMO_COLLECTOR_NAME + value: "{{ app_name }}-syslog" + - name: SUMO_ACCESS_ID + value: "{{ sumo_access_id }}" + - name: SUMO_ACCESS_KEY + value: "{{ sumo_access_key }}" + - name: SUMO_COLLECTOR_NAME_PREFIX + value: "" + - name: SUMO_CLOBBER + value: "true" + portMappings: + - containerPort: "514" + hostPort: "514" + network_mode: bridge + family: "{{stack_name }}-{{tier}}-sumo_syslog" + state: present + memory: '512' + cpu: '128' + region: "{{region}}" + register: task_output + +- name: create task definition - sumo docker + ecs_taskdefinition: + containers: + - name: sumologic-docker + essential: true + image: "sumologic/collector:latest" + environment: + - name: SUMO_COLLECTOR_NAME + value: "{{ app_name }}-docker" + - name: SUMO_ACCESS_ID + value: "{{ sumo_access_id }}" + - name: SUMO_ACCESS_KEY + value: "{{ sumo_access_key }}" + - name: SUMO_COLLECTOR_NAME_PREFIX + value: "" + - name: SUMO_CLOBBER + value: "true" + mountPoints: + - containerPath: /var/run/docker.sock + sourceVolume: docker-sock + readOnly: true + volumes: + - name: docker-sock + host: + sourcePath: /var/run/docker.sock + network_mode: bridge + family: "{{stack_name }}-{{tier}}-sumo_docker" + state: present + memory: '512' + cpu: '128' + region: "{{region}}" + register: task_output + +- name: create task definition - newrelic docker + ecs_taskdefinition: + containers: + - name: newrelic-docker + essential: true + image: "newrelic/infrastructure-bundle:latest" + environment: + - name: NRIA_LICENSE_KEY + value: "{{ newrelic_license_key }}" + - name: NRIA_DISPLAY_NAME + value: "{{ app_name }}-docker" + - name: NEW_RELIC_HOST + value: "gov-collector.newrelic.com" + - name: STATUS_URL + value: "{% if tier == 'prod' %}https://bento-tools.org/nginx_status{% else %}https://{{ tier }}.bento-tools.org/nginx_status{% endif %}" + entryPoint: + - "/bin/bash" + - "-c" + - 'echo -e "integrations:\n - name: nri-nginx\n env:\n REMOTE_MONITORING: true\n METRICS: 1" > /etc/newrelic-infra/integrations.d/nginx-config.yml && /sbin/tini -- /usr/bin/newrelic-infra-service' + mountPoints: + - containerPath: /var/run/docker.sock + sourceVolume: docker-sock + readOnly: true + - containerPath: /host + sourceVolume: docker-host + readOnly: true + volumes: + - name: docker-sock + host: + sourcePath: /var/run/docker.sock + - name: docker-host + host: + sourcePath: / + network_mode: bridge + family: "{{stack_name }}-{{tier}}-nr_docker" + state: present + memory: '128' + cpu: '128' + region: "{{region}}" + register: task_output + +############################################################################################################################ + +# Task Definition Queries + +############################################################################################################################ + +- name: query task definition - {{project | lower}} + ecs_taskdefinition_info: + task_definition: "{{stack_name }}-{{tier}}-frontend" + region: "{{region}}" + register: task_frontend + +- name: query task definition - {{backend | lower}} + ecs_taskdefinition_info: + task_definition: "{{stack_name }}-{{tier}}-backend" + region: "{{region}}" + register: task_backend + +- name: query task definition - {{etl | lower}} + ecs_taskdefinition_info: + task_definition: "{{stack_name }}-{{tier}}-etl" + region: "{{region}}" + register: task_etl + +- name: Print query task etl + debug: + msg: "task_etl Version: {{ task_etl }}" + +- name: query task definition - sumologic syslog + ecs_taskdefinition_info: + task_definition: "{{stack_name }}-{{tier}}-sumo_syslog" + region: "{{region}}" + register: task_sumo_syslog + +- name: query task definition - sumologic docker + ecs_taskdefinition_info: + task_definition: "{{stack_name }}-{{tier}}-sumo_docker" + region: "{{region}}" + register: task_sumo_docker + +- name: query task definition - newrelic docker + ecs_taskdefinition_info: + task_definition: "{{stack_name }}-{{tier}}-nr_docker" + region: "{{region}}" + register: task_nr_docker + +############################################################################################################################ + +# Service Queries + +############################################################################################################################ + +- name: query backend service + ecs_service_info: + cluster: "{{stack_name }}-{{tier}}" + service: "{{stack_name }}-{{tier}}-backend" + details: true + region: "{{region}}" + register: service_backend + +- name: query ecs service + ecs_service_info: + cluster: "{{stack_name }}-{{tier}}" + service: "{{stack_name }}-{{tier}}-frontend" + details: true + region: "{{region}}" + register: service_frontend + +- name: query etl service + ecs_service_info: + cluster: "{{stack_name }}-{{tier}}" + service: "{{stack_name }}-{{tier}}-etl" + details: true + region: "{{region}}" + register: service_etl + +- name: Print service_etl + debug: + msg: "service_etl Version: {{ service_etl }}" + +- name: query sumologic syslog service + ecs_service_info: + cluster: "{{stack_name }}-{{tier}}" + service: "{{stack_name }}-{{tier}}-sumo_syslog" + details: true + region: "{{region}}" + register: service_sumo_syslog + +- name: query sumologic docker service + ecs_service_info: + cluster: "{{stack_name }}-{{tier}}" + service: "{{stack_name }}-{{tier}}-sumo_docker" + details: true + region: "{{region}}" + register: service_sumo_docker + +- name: query newrelic docker service + ecs_service_info: + cluster: "{{stack_name }}-{{tier}}" + service: "{{stack_name }}-{{tier}}-nr_docker" + details: true + region: "{{region}}" + register: service_nr_docker + +############################################################################################################################ + +- name: set facts + set_fact: + frontend_url: "{% if tier == 'prod' %}https://{{stack_name}}.bento-tools.org/{% else %}https://{{stack_name}}-{{ tier }}.bento-tools.org/{% endif %}" + frontend_revision: "{{task_frontend.revision}}" + backend_revision: "{{task_backend.revision}}" + etl_revision: "{{task_etl.revision}}" + task_backend_name: "{{task_backend.family}}" + task_frontend_name: "{{task_frontend.family}}" + task_etl_name: "{{task_etl.family}}" + task_sumo_syslog_name: "{{task_sumo_syslog.family}}" + task_sumo_docker_name: "{{task_sumo_docker.family}}" + task_nr_docker_name: "{{task_nr_docker.family}}" + lb_frontend: "{{service_frontend.services[0].loadBalancers}}" + lb_backend: "{{service_backend.services[0].loadBalancers}}" + lb_etl: "{{service_etl.services[0].loadBalancers}}" + role_arn: "{{service_backend.services[0].roleArn}}" + + +############################################################################################################################ + +# Update Services + +############################################################################################################################ + +- name: update sumologic syslog service + ecs_service: + state: present + name: "{{stack_name }}-{{tier}}-sumo_syslog" + cluster: "{{stack_name }}-{{tier}}" + task_definition: "{{task_sumo_syslog_name}}" + force_new_deployment: no + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + region: "{{region}}" + register: service_sumo_syslog_output + +- name: update sumologic docker service + ecs_service: + state: present + name: "{{stack_name }}-{{tier}}-sumo_docker" + cluster: "{{stack_name }}-{{tier}}" + task_definition: "{{task_sumo_docker_name}}" + force_new_deployment: no + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + region: "{{region}}" + register: service_sumo_docker_output + +- name: update newrelic docker service + ecs_service: + state: present + name: "{{stack_name }}-{{tier}}" + cluster: "{{stack_name }}-{{tier}}" + task_definition: "{{task_nr_docker_name}}" + force_new_deployment: no + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + region: "{{region}}" + register: service_nr_docker_output + +- name: update backend service + ecs_service: + state: present + name: "{{stack_name }}-{{tier}}-backend" + cluster: "{{stack_name }}-{{tier}}" + task_definition: "{{task_backend_name}}:{{backend_revision}}" + role: "{{role_arn}}" + force_new_deployment: yes + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + load_balancers: "{{ lb_backend }}" + region: "{{region}}" + register: service_backend_output + +- name: update frontend service + ecs_service: + state: present + name: "{{stack_name }}-{{tier}}-frontend" + cluster: "{{stack_name }}-{{tier}}" + task_definition: "{{task_frontend_name}}:{{frontend_revision}}" + role: "{{role_arn}}" + force_new_deployment: yes + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + load_balancers: "{{ lb_frontend }}" + region: "{{region}}" + register: service_frontend_output + +- name: update etl service + ecs_service: + state: present + name: "{{stack_name }}-{{tier}}-etl" + cluster: "{{stack_name }}-{{tier}}" + task_definition: "{{task_etl_name}}:{{etl_revision}}" + role: "{{role_arn}}" + force_new_deployment: yes + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + load_balancers: "{{ lb_etl }}" + region: "{{region}}" + register: service_etl_output diff --git a/ansible/roles/ccdc/tasks/main.yml b/ansible/roles/ccdc/tasks/main.yml new file mode 100644 index 000000000..e69de29bb diff --git a/ansible/roles/ccdc/tasks/redis.yml b/ansible/roles/ccdc/tasks/redis.yml new file mode 100644 index 000000000..46787e625 --- /dev/null +++ b/ansible/roles/ccdc/tasks/redis.yml @@ -0,0 +1,5 @@ +- name: confirm redis redis_host + debug: + msg: "{{redis_host[tier]}}" +- name: flush redis cache + shell: echo -e "get abc \nFLUSHALL ASYNC" | redis-cli -h {{ redis_host[tier]}} -p 6379 -c diff --git a/ansible/roles/ccdc/templates/env.j2 b/ansible/roles/ccdc/templates/env.j2 new file mode 100644 index 000000000..17c24f011 --- /dev/null +++ b/ansible/roles/ccdc/templates/env.j2 @@ -0,0 +1,9 @@ +{% if tier == "prod" %} +REACT_APP_BACKEND_API=https://api.bento-tools.org/v1/graphql/ +REACT_APP_APPLICATION_VERSION={{version}} +REACT_APP_ABOUT_CONTENT_URL=https://raw.githubusercontent.com/CBIIT/bento-frontend/master/src/content/{{tier}}/aboutPagesContent.yaml +{% else %} +REACT_APP_BACKEND_API=https://api-{{tier}}.bento-tools.org/v1/graphql/ +REACT_APP_APPLICATION_VERSION={{version}} +REACT_APP_ABOUT_CONTENT_URL=https://raw.githubusercontent.com/CBIIT/bento-frontend/master/src/content/{{tier}}/aboutPagesContent.yaml +{% endif %} \ No newline at end of file diff --git a/ansible/roles/ccdc/templates/nginx-config.yml.j2 b/ansible/roles/ccdc/templates/nginx-config.yml.j2 new file mode 100644 index 000000000..848a0dfb3 --- /dev/null +++ b/ansible/roles/ccdc/templates/nginx-config.yml.j2 @@ -0,0 +1,5 @@ +integrations: + - name: nri-nginx + env: + REMOTE_MONITORING: true + METRICS: 1 \ No newline at end of file diff --git a/ansible/roles/ccdc/tests/inventory b/ansible/roles/ccdc/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/ccdc/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/ccdc/tests/test.yml b/ansible/roles/ccdc/tests/test.yml new file mode 100644 index 000000000..c9b79015b --- /dev/null +++ b/ansible/roles/ccdc/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - cicd \ No newline at end of file diff --git a/ansible/roles/ccdc/vars/main.yml b/ansible/roles/ccdc/vars/main.yml new file mode 100644 index 000000000..d51b6e6ea --- /dev/null +++ b/ansible/roles/ccdc/vars/main.yml @@ -0,0 +1,31 @@ +--- +# vars file for cicd +stack_name: ccdc +platform: aws +tier: "{{ lookup('env','TIER') }}" +workspace: "{{ lookup('env','WORKSPACE') }}" +docker_user: "{{ lookup('env','DOCKER_USER') }}" +docker_password: "{{ lookup('env','DOCKER_PASSWORD') }}" +build_number: "{{ lookup('env','BUILD_NUMBER')}}" +newrelic_license_key: "{{ lookup('aws_ssm', 'newrelic_license_key', region='us-east-1' ) }}" +sumo_access_id: "{{ lookup('aws_ssm', 'sumo_access_id', region='us-east-1' ) }}" +sumo_access_key: "{{ lookup('aws_ssm', 'sumo_access_key', region='us-east-1' ) }}" +syslog_host: "{{ lookup('aws_ssm', 'syslog_host', region='us-east-1' ) }}" +app_name: "{{stack_name}}-{{platform}}-{{tier}}" +project: CCDC-WebPortal +backend: CCDC-WebService +etl: CCDC-ETL +frontend_version: "{{ lookup('env','FE_VERSION') }}" +backend_version: "{{ lookup('env','BE_VERSION') }}" +etl_version: "{{ lookup('env','ETL_VERSION') }}" +region: us-east-1 +rds_host: "{{ lookup('aws_ssm', '{{stack_name}}-{{tier}}-rds-host', region='us-east-1' ) }}" +rds_port: "{{ lookup('aws_ssm', 'rds_port', region='us-east-1' ) }}" +rds_user: "{{ lookup('aws_ssm', 'rds_user', region='us-east-1' ) }}" +rds_password: "{{ lookup('aws_ssm', '{{stack_name}}-{{tier}}-rds-password', region='us-east-1' ) }}" +es_host: "{{ lookup('aws_ssm', '{{stack_name}}-{{tier}}-es-host', region='us-east-1' ) }}" +es_port: "{{ lookup('aws_ssm', 'es_port', region='us-east-1' ) }}" +digest_file_folder: "{{ lookup('env','S3_FOLDER') }}" +aws_storage_bucket_name : ccdc-etl-digest + + diff --git a/ansible/roles/cicd/.travis.yml b/ansible/roles/cicd/.travis.yml new file mode 100644 index 000000000..36bbf6208 --- /dev/null +++ b/ansible/roles/cicd/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/ansible/roles/cicd/README.md b/ansible/roles/cicd/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/cicd/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/cicd/defaults/main.yml b/ansible/roles/cicd/defaults/main.yml new file mode 100644 index 000000000..fc5714050 --- /dev/null +++ b/ansible/roles/cicd/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for cicd diff --git a/ansible/roles/cicd/handlers/main.yml b/ansible/roles/cicd/handlers/main.yml new file mode 100644 index 000000000..3c94d4172 --- /dev/null +++ b/ansible/roles/cicd/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for cicd \ No newline at end of file diff --git a/ansible/roles/cicd/meta/main.yml b/ansible/roles/cicd/meta/main.yml new file mode 100644 index 000000000..227ad9c34 --- /dev/null +++ b/ansible/roles/cicd/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.9 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/ansible/roles/cicd/tasks/build.yml b/ansible/roles/cicd/tasks/build.yml new file mode 100644 index 000000000..618e892d2 --- /dev/null +++ b/ansible/roles/cicd/tasks/build.yml @@ -0,0 +1,81 @@ +--- +# tasks file for cicd +# - name: copy application_example.properties to application.properties +# copy: +# remote_src: yes +# src: "{{workspace}}/src/main/resources/application_example.properties" +# dest: "{{workspace}}/src/main/resources/application.properties" + +- name: remove the application_example.properties file + file: + path: "{{workspace}}/src/main/resources/application_example.properties" + state: absent + +- name: copy application.properties file to /src/main/resources/ + template: + src: application.properties.j2 + dest: "{{workspace}}/src/main/resources/application.properties" + +- name: build springboot code + command: mvn package -DskipTests + args: + chdir: "{{workspace}}" +- name: copy ICDC-0.0.1.war to ROOT.war + copy: + remote_src: yes + src: "{{workspace}}/target/ICDC-0.0.1.war" + dest: "{{workspace}}/target/ROOT.war" + +- name: remove ICDC-0.0.1.war file + file: + path: "{{workspace}}/target/ICDC-0.0.1.war" + state: absent + +- name: log into DockerHub + docker_login: + username: "{{docker_user}}" + password: "{{docker_password}}" + +- name: build cbiitssrepo/backend image + docker_image: + build: + path: "{{workspace}}" + dockerfile: "{{workspace}}/dockerfiles/backend-dockerfile" + pull: yes + name: cbiitssrepo/backend + tag: "{{build_number}}" + push: yes + source: build + +- name: run npm install in {{workspace}}/bento-frontend/ + command: "{{item}}" + args: + chdir: "{{workspace}}/bento-frontend" + loop: + - npm install + - npm install --save https://github.com/skiran86/mui-custom-datatables/tarball/master + +- name: run npm install and build in {{workspace}}/bento-frontend/node_modules/mui-custom-datatables + command: "{{item}}" + args: + chdir: "{{workspace}}/bento-frontend/node_modules/mui-custom-datatables" + loop: + - npm install + - npm run build + +- name: run npm build in frontend + command: npm run-script build + args: + chdir: "{{workspace}}/bento-frontend" + +# - name: build cbiitssrepo/app image +# docker_image: +# build: +# path: "{{workspace}}/src/main/frontend" +# dockerfile: "{{workspace}}/dockerfiles/frontend-dockerfile" +# pull: yes +# name: cbiitssrepo/app +# tag: "{{build_number}}" +# push: yes +# source: build + diff --git a/ansible/roles/cicd/tasks/deploy-backend.yml b/ansible/roles/cicd/tasks/deploy-backend.yml new file mode 100644 index 000000000..8489aa176 --- /dev/null +++ b/ansible/roles/cicd/tasks/deploy-backend.yml @@ -0,0 +1,62 @@ +--- +- name: create task definition + ecs_taskdefinition: + containers: + - name: backend + essential: true + image: "cbiitssrepo/backend:{{build_number}}" + memory: 512 + portMappings: + - containerPort: "8080" + hostPort: "80" + network_mode: bridge + family: ctdc + state: present + region: "{{region}}" + register: task_output + +- name: query task definition + ecs_taskdefinition_info: + task_definition: ctdc + region: "{{region}}" + register: task_output + +- name: query ecs service + ecs_service_info: + cluster: ctdc-ecs + service: ctdc_ecs_service + details: true + region: "{{region}}" + register: service_info + +- name: set facts + set_fact: + revision: "{{task_output.revision}}" + task_name: "{{task_output.family}}" + lb_target_arn: "{{service_info.services[0].loadBalancers[0].targetGroupArn}}" + lb_container_port: "{{service_info.services[0].loadBalancers[0].containerPort}}" + lb_container_name: "{{service_info.services[0].loadBalancers[0].containerName}}" + role_arn: "{{service_info.services[0].roleArn}}" + +- name: update ecs service + ecs_service: + state: present + name: ctdc_ecs_service + cluster: ctdc-ecs + task_definition: "{{task_name}}:{{revision}}" + role: "{{role_arn}}" + force_new_deployment: yes + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + load_balancers: + - targetGroupArn: "{{lb_target_arn}}" + containerName: "{{lb_container_name}}" + containerPort: "{{ lb_container_port }}" + region: "{{region}}" + register: service_output + +- name: show service service + debug: + msg: "{{service_output}}" \ No newline at end of file diff --git a/ansible/roles/cicd/tasks/deploy-frontend.yml b/ansible/roles/cicd/tasks/deploy-frontend.yml new file mode 100644 index 000000000..8587ee766 --- /dev/null +++ b/ansible/roles/cicd/tasks/deploy-frontend.yml @@ -0,0 +1,10 @@ +- name: put dist content to s3 + s3_sync: + bucket: "{{s3_bucket}}" + file_root: "{{workspace}}/bento-frontend/dist" + file_change_strategy: force + permission: public-read + cache_control: "public, max-age=300" + include: "*" + region: "{{region}}" + \ No newline at end of file diff --git a/ansible/roles/cicd/tasks/main.yml b/ansible/roles/cicd/tasks/main.yml new file mode 100644 index 000000000..e69de29bb diff --git a/ansible/roles/cicd/templates/application.properties.j2 b/ansible/roles/cicd/templates/application.properties.j2 new file mode 100644 index 000000000..969247c94 --- /dev/null +++ b/ansible/roles/cicd/templates/application.properties.j2 @@ -0,0 +1,27 @@ +spring.mvc.throw-exception-if-no-handler-found=true +spring.data.neo4j.username={{neo4j_user}} +spring.data.neo4j.password={{neo4j_ip}} +neo4j.jdbc.server=jdbc:neo4j:bolt://{{neo4j_ip}} +graphql.schema=graphql/person.graphqls, graphql/icdc.graphqls +neo4j.graphql.endpoint=http://{{neo4j_ip}}:7474/graphql/ +neo4j.java.driver.server=bolt://{{neo4j_ip}}:7687 +neo4j.authorization={{bearer}} +spring.mvc.view.prefix=/WEB-INF/ +spring.mvc.view.suffix=.jsp +error.redirect_url=http://localhost/error.html +api.version=v1 +session.timeout=30 +data.model.version = 1 +allow_grapqh_query = true +allow_graphql_mutation =false +fence.client_id={{fence_id}} +fence.client_credential={{fence_credential}} +fence.redirect_url=https://{{fence_url}}/ +fence.url=https://nci-crdc-staging.datacommons.io/ +fence.exchange_token_url=https://nci-crdc-staging.datacommons.io/user/oauth2/token +fence.log_out_url = https://nci-crdc-staging.datacommons.io/user/logout + +neo4j_query.getversion= query { numberOfStudies }; +graphql_api.version = 1.0.0 +rest_api.version=1.0.0 +front_end.version =1.0.0 \ No newline at end of file diff --git a/ansible/roles/cicd/tests/inventory b/ansible/roles/cicd/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/cicd/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/cicd/tests/test.yml b/ansible/roles/cicd/tests/test.yml new file mode 100644 index 000000000..c9b79015b --- /dev/null +++ b/ansible/roles/cicd/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - cicd \ No newline at end of file diff --git a/ansible/roles/cicd/vars/main.yml b/ansible/roles/cicd/vars/main.yml new file mode 100644 index 000000000..29b1edcd2 --- /dev/null +++ b/ansible/roles/cicd/vars/main.yml @@ -0,0 +1,15 @@ +--- +# vars file for cicd +workspace: "{{ lookup('env','WORKSPACE') }}" +neo4j_ip: "{{ lookup('env','NEO4J_IP') }}" +bearer: "{{ lookup('env','BEARER') }}" +neo4j_password: "{{ lookup('env','NEO4J_PASSWORD') }}" +neo4j_user: "{{ lookup('env','NEO4J_USER') }}" +fence_id: "{{ lookup('env','FENCE_ID') }}" +fence_credential: "{{ lookup('env','FENCE_CREDENTIAL') }}" +docker_user: "{{ lookup('env','DOCKER_USER') }}" +docker_password: "{{ lookup('env','DOCKER_PASSWORD') }}" +build_number: "{{ lookup('env','BUILD_NUMBER')}}" +region: us-east-1 +s3_bucket: "bento.essential-dev.com" +fence_url: "{{ lookup('env','FENCE_URL') }}" diff --git a/ansible/roles/cloudone-ecs/.travis.yml b/ansible/roles/cloudone-ecs/.travis.yml new file mode 100644 index 000000000..36bbf6208 --- /dev/null +++ b/ansible/roles/cloudone-ecs/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/ansible/roles/cloudone-ecs/README.md b/ansible/roles/cloudone-ecs/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/cloudone-ecs/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/cloudone-ecs/defaults/main.yml b/ansible/roles/cloudone-ecs/defaults/main.yml new file mode 100644 index 000000000..e020f74f8 --- /dev/null +++ b/ansible/roles/cloudone-ecs/defaults/main.yml @@ -0,0 +1,3 @@ +--- +# defaults file for cicd +platform: aws diff --git a/ansible/roles/cloudone-ecs/handlers/main.yml b/ansible/roles/cloudone-ecs/handlers/main.yml new file mode 100644 index 000000000..3c94d4172 --- /dev/null +++ b/ansible/roles/cloudone-ecs/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for cicd \ No newline at end of file diff --git a/ansible/roles/cloudone-ecs/meta/main.yml b/ansible/roles/cloudone-ecs/meta/main.yml new file mode 100644 index 000000000..227ad9c34 --- /dev/null +++ b/ansible/roles/cloudone-ecs/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.9 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/ansible/roles/cloudone-ecs/tasks/build.yml b/ansible/roles/cloudone-ecs/tasks/build.yml new file mode 100644 index 000000000..9cc3a79e3 --- /dev/null +++ b/ansible/roles/cloudone-ecs/tasks/build.yml @@ -0,0 +1,100 @@ +--- +- name: set demo environment facts + set_fact: + neo4j_ip: "{{ lookup('env','NEO4J_ECS_DEV_IP') }}" + bearer: "{{ lookup('env','BEARER') }}" + when: tier == "ecs-dev" + +- name: copy environment file to {{workspace}}/src/main/frontend + template: + src: env.j2 + dest: "{{workspace}}/src/main/frontend/.env" + +- name: run npm install in {{workspace}}/src/main/frontend/ + command: "{{item}}" + args: + chdir: "{{workspace}}/src/main/frontend/" + loop: + - npm install + - npm install --save https://github.com/skiran86/mui-custom-datatables/tarball/master + + +# - name: run npm install in {{workspace}}/icdc-frontend/ +# command: "{{item}}" +# args: +# chdir: "{{workspace}}/src/main/frontend/" +# loop: +# - npm install npm@latest -g +# - npm install +# - npm run build + + +- name: run npm install and build in {{workspace}}/src/main/frontend/node_modules/mui-custom-datatables + command: "{{item}}" + args: + chdir: "{{workspace}}/src/main/frontend/node_modules/mui-custom-datatables" + loop: + - npm install + - npm run build + +- name: run npm build in frontend + command: npm run build + args: + chdir: "{{workspace}}/src/main/frontend" + +- name: copy application.properties file to /src/main/resources/ + template: + src: application.properties.j2 + dest: "{{workspace}}/src/main/resources/application.properties" + +- name: build springboot code + command: mvn package -DskipTests + args: + chdir: "{{workspace}}" + +- name: copy ICDC-0.0.1.war to ROOT.war + copy: + remote_src: yes + src: "{{workspace}}/target/ICDC-0.0.1.war" + dest: "{{workspace}}/target/ROOT.war" + + +- name: log into DockerHub + docker_login: + username: "{{docker_user}}" + password: "{{docker_password}}" + +# - name: login into ecr +# shell: "$(/usr/local/bin/aws ecr get-login-password --region us-east-1 | docker login --username AWS --password-stdin {{ecr}})" +# ignore_errors: True + +- name: build {{ecr}}:icdc image + docker_image: + build: + path: "{{workspace}}" + dockerfile: "{{workspace}}/icdc-devops/docker/dockerfiles/backend-dockerfile" + pull: yes + nocache: yes + # name: "{{ecr}}:icdc" + name: cbiitssrepo/icdc + tag: "{{build_number}}" + # tag: ecs + push: yes + source: build + + +# - name: post schemas +# uri: +# url: http://{{neo4j_ip}}:7474/graphql/idl/ +# method: POST +# body: "{{ lookup('file','{{workspace}}/src/main/resources/graphql/icdc.graphql') }}" +# headers: +# Accept: "application/json" +# Authorization: "{{bearer}}" +# register: schema + +# - name: schema output +# debug: +# msg: "{{schema}}" + + \ No newline at end of file diff --git a/ansible/roles/cloudone-ecs/tasks/deploy.yml b/ansible/roles/cloudone-ecs/tasks/deploy.yml new file mode 100644 index 000000000..39460f7a5 --- /dev/null +++ b/ansible/roles/cloudone-ecs/tasks/deploy.yml @@ -0,0 +1,75 @@ +--- + +- name: create task definition + ecs_taskdefinition: + containers: + - name: icdc + essential: true + # image: "{{ecr}}:icdc" + image: cbiitssrepo/icdc:{{build_number}} + environment: + - name: NEW_RELIC_LICENSE_KEY + value: "{{ newrelic_license_key }}" + - name: NEW_RELIC_APP_NAME + value: "{{ app_name }}" + - name: NEW_RELIC_DISTRIBUTED_TRACING_ENABLED + value: true + - name: NEW_RELIC_HOST + value: "gov-collector.newrelic.com" + portMappings: + - containerPort: "8080" + hostPort: "8080" + network_mode: bridge + family: icdc-{{tier}}-task-definition + memory: '1024' + cpu: '512' + state: present + region: "{{region}}" + register: task_output + + +- name: query task definition + ecs_taskdefinition_info: + task_definition: icdc-{{tier}}-task-definition + region: "{{region}}" + register: task_icdc + + +- name: query demo service + ecs_service_info: + cluster: nci-cbiit-icdc-dev + service: icdc-{{tier}}-service + details: true + region: "{{region}}" + register: service_icdc + + +- name: set facts + set_fact: + icdc_revision: "{{task_icdc.revision}}" + task_icdc_name: "{{task_icdc.family}}" + + # lb_icdc: "{{service_icdc.services[0].loadBalancers}}" + # role_arn: "{{service_icdc.services[0].roleArn}}" + + +- name: update icdc service + ecs_service: + state: present + name: icdc-{{tier}}-service + cluster: nci-cbiit-icdc-dev + task_definition: "{{task_icdc_name}}:{{icdc_revision}}" + role: "{{service_role_arn}}" + force_new_deployment: yes + + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + placement_constraints: + - type: memberOf + expression: 'attribute:project==icdc' + desired_count: 1 + load_balancers: + - { "containerName": "icdc", "containerPort": "8080", "targetGroupArn" : "{{alb_target_arn}}" } + region: "{{region}}" + diff --git a/ansible/roles/cloudone-ecs/tasks/main.yml b/ansible/roles/cloudone-ecs/tasks/main.yml new file mode 100644 index 000000000..e69de29bb diff --git a/ansible/roles/cloudone-ecs/tasks/maintenance.yml b/ansible/roles/cloudone-ecs/tasks/maintenance.yml new file mode 100644 index 000000000..4537a4738 --- /dev/null +++ b/ansible/roles/cloudone-ecs/tasks/maintenance.yml @@ -0,0 +1,130 @@ +--- + + +- name: create task definition + ecs_taskdefinition: + containers: + - name: icdc-cloudone + essential: true + # image: "{{ecr}}:icdc" + image: cbiitssrepo/icdc:ecs + environment: + - name: NEW_RELIC_LICENSE_KEY + value: "{{ newrelic_license_key }}" + - name: NEW_RELIC_APP_NAME + value: "{{ app_name }}" + - name: NEW_RELIC_DISTRIBUTED_TRACING_ENABLED + value: true + - name: NEW_RELIC_HOST + value: "gov-collector.newrelic.com" + portMappings: + - containerPort: "8080" + hostPort: "8080" + # logConfiguration: + # logDriver: sumologic + # options: + # sumo-url: "{{sumo_collector_url}}" + # sumo-source-category: "{{tier}}/{{platform}}/bento/api/logs" + # sumo-source-name: "bento-{{platform}}-api-docker-{{tier}}" + network_mode: bridge + family: icdc-{{tier}}-task-definition + memory: '1024' + cpu: '512' + state: present + region: "{{region}}" + register: task_output + + +- name: query task definition + ecs_taskdefinition_info: + task_definition: icdc-{{tier}}-task-definition + region: "{{region}}" + register: task_icdc + + + +- name: query demo service + ecs_service_info: + cluster: default + service: icdc-{{tier}}-service + details: true + region: "{{region}}" + register: service_icdc + + + +- name: set facts + set_fact: + icdc_revision: "{{task_icdc.revision}}" + task_icdc_name: "{{task_icdc.family}}" + # lb_icdc: "{{service_icdc.services[0].loadBalancers}}" + # role_arn: "{{service_icdc.services[0].roleArn}}" + + +# - name: create ecs service +# ecs_service: +# state: present +# name: icdc-{{tier}}-service +# cluster: default +# task_definition: "{{task_icdc_name}}:{{icdc_revision}}" +# desired_count: 1 +# deployment_configuration: +# minimum_healthy_percent: 0 +# maximum_percent: 100 +# # placement_constraints: +# # - type: memberOf +# # expression: 'attribute:flavor==test' +# # placement_strategy: +# # - type: binpack +# # field: memory +# region: "{{region}}" + +# - name: update icdc service +# ecs_service: +# state: present +# name: icdc-{{tier}}-service +# cluster: default +# task_definition: "{{task_icdc_name}}:{{icdc_revision}}" +# # role: "{{role_arn}}" +# force_new_deployment: yes + +# deployment_configuration: +# minimum_healthy_percent: 0 +# maximum_percent: 100 +# placement_constraints: +# - type: memberOf +# expression: 'attribute:project==icdc' +# desired_count: 1 +# # load_balancers: "{{ lb_icdc }}" +# region: "{{region}}" + + + + +- name: update icdc service + ecs_service: + state: present + name: icdc-{{tier}}-service + cluster: default + task_definition: "{{task_icdc_name}}:{{icdc_revision}}" + # force_new_deployment: yes + + desired_count: 0 + # load_balancers: "{{ lb_icdc }}" + region: "{{region}}" + + + + + + +- name: update icdc service + ecs_service: + state: absent + name: icdc-{{tier}}-service + cluster: default + # task_definition: "{{task_icdc_name}}:{{icdc_revision}}" + + # desired_count: 1 + # load_balancers: "{{ lb_icdc }}" + region: "{{region}}" \ No newline at end of file diff --git a/ansible/roles/cloudone-ecs/templates/application.properties.j2 b/ansible/roles/cloudone-ecs/templates/application.properties.j2 new file mode 100644 index 000000000..d75fa1487 --- /dev/null +++ b/ansible/roles/cloudone-ecs/templates/application.properties.j2 @@ -0,0 +1,26 @@ +spring.mvc.throw-exception-if-no-handler-found=true +spring.data.neo4j.username={{neo4j_user}} +spring.data.neo4j.password={{neo4j_ip}} +neo4j.jdbc.server=jdbc:neo4j:bolt://{{neo4j_ip}} +graphql.schema=graphql/person.graphqls, graphql/icdc.graphqls +neo4j.graphql.endpoint=http://{{neo4j_ip}}:7474/graphql/ +neo4j.java.driver.server=bolt://{{neo4j_ip}}:7687 +neo4j.authorization={{bearer}} +spring.mvc.view.prefix=/WEB-INF/ +spring.mvc.view.suffix=.jsp +error.redirect_url=http://localhost/error.html +api.version=v1 +session.timeout=30 +data.model.version = 1 +allow_grapqh_query = true +allow_graphql_mutation =false +fence.client_id={{fence_id}} +fence.client_credential={{fence_credential}} +fence.redirect_url=https://{{fence_url}}/ +fence.url=https://nci-crdc-staging.datacommons.io/ +fence.exchange_token_url=https://nci-crdc-staging.datacommons.io/user/oauth2/token +fence.log_out_url = https://nci-crdc-staging.datacommons.io/user/logout +neo4j_query.getversion= query { numberOfStudies }; +graphql_api.version = 1.0.0 +rest_api.version =1.0.0 +front_end.version =1.0.0 \ No newline at end of file diff --git a/ansible/roles/cloudone-ecs/templates/env.j2 b/ansible/roles/cloudone-ecs/templates/env.j2 new file mode 100644 index 000000000..937253538 --- /dev/null +++ b/ansible/roles/cloudone-ecs/templates/env.j2 @@ -0,0 +1,6 @@ +REACT_APP_BACKEND_GETUSERINFO_API=https://{{ecs_url}}/fence/login/ +REACT_APP_LOGIN_URL=https://nci-crdc-staging.datacommons.io/user/oauth2/authorize?client_id=82pslYFJqA7auRvKYfTOK67jzQAMb8f6C33tlmZz&response_type=code&redirect_uri=https%3A%2F%2Fcaninecommons-demo.cancer.gov%2F&scope=openid%20user +REACT_APP_USER_LOGOUT_URL=https://{{ecs_url}}/fence/logout +REACT_APP_BACKEND_API=https://{{ecs_url}}/v1/graphql/ +REACT_APP_APPLICATION_VERSION={{version}} +REACT_APP_ABOUT_CONTENT_URL=https://raw.githubusercontent.com/CBIIT/icdc-codebase/master/src/main/frontend/src/content/dev/aboutPagesContent.yaml \ No newline at end of file diff --git a/ansible/roles/cloudone-ecs/tests/inventory b/ansible/roles/cloudone-ecs/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/cloudone-ecs/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/cloudone-ecs/tests/test.yml b/ansible/roles/cloudone-ecs/tests/test.yml new file mode 100644 index 000000000..c9b79015b --- /dev/null +++ b/ansible/roles/cloudone-ecs/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - cicd \ No newline at end of file diff --git a/ansible/roles/cloudone-ecs/vars/main.yml b/ansible/roles/cloudone-ecs/vars/main.yml new file mode 100644 index 000000000..846f3b44c --- /dev/null +++ b/ansible/roles/cloudone-ecs/vars/main.yml @@ -0,0 +1,22 @@ +--- +# vars file for cicd +tier: "{{ lookup('env','TIER') }}" +ecr: "{{ lookup('env','ECR') }}" +version: "{{ lookup('env','VERSION') }}" +workspace: "{{ lookup('env','WORKSPACE') }}" +docker_user: "{{ lookup('env','DOCKER_USER') }}" +docker_password: "{{ lookup('env','DOCKER_PASSWORD') }}" +build_number: "{{ lookup('env','BUILD_NUMBER')}}" +region: us-east-1 +newrelic_license_key: "{{ lookup('aws_ssm', 'newrelic_license_key', region='us-east-1' ) }}" +sumo_collector_url: "{{ lookup('aws_ssm', 'sumo_collector_url', region='us-east-1' ) }}" +app_name: bento-{{platform}}-{{tier}} +neo4j_password: "{{ lookup('env','NEO4J_PASSWORD') }}" +neo4j_user: "{{ lookup('env','NEO4J_USER') }}" +fence_id: "{{ lookup('env','FENCE_ID') }}" +fence_credential: "{{ lookup('env','FENCE_CREDENTIAL') }}" +fence_url: "{{ lookup('env','FENCE_URL') }}" +ecs_url: "{{lookup('env','ECS_DEV_URL')}}" +alb_target_arn: "{{lookup('env','ALB_TARGET_ARN')}}" +service_role_arn: "{{lookup('env','SERVICE_ROLE_ARN')}}" +# hostname: "{{ansible_fqdn}}" \ No newline at end of file diff --git a/ansible/roles/common/README.md b/ansible/roles/common/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/common/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/common/defaults/main.yml b/ansible/roles/common/defaults/main.yml new file mode 100644 index 000000000..fa3055099 --- /dev/null +++ b/ansible/roles/common/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for common \ No newline at end of file diff --git a/ansible/roles/common/handlers/main.yml b/ansible/roles/common/handlers/main.yml new file mode 100644 index 000000000..c6a8f0c7b --- /dev/null +++ b/ansible/roles/common/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for common \ No newline at end of file diff --git a/ansible/roles/k9dc/meta/main.yml b/ansible/roles/common/meta/main.yml similarity index 100% rename from ansible/roles/k9dc/meta/main.yml rename to ansible/roles/common/meta/main.yml diff --git a/ansible/roles/common/tasks/main.yml b/ansible/roles/common/tasks/main.yml new file mode 100644 index 000000000..a0230f2b4 --- /dev/null +++ b/ansible/roles/common/tasks/main.yml @@ -0,0 +1,11 @@ +--- +# tasks file for common +- name: Set timezone to America/New_York + timezone: + name: America/New_York + +# - name: set hostname +# hostname: +# name: "{{ hostvars[inventory_hostname].group_names[0] }}-{{ env }}" +# tags: +# - sandbox \ No newline at end of file diff --git a/ansible/roles/common/tests/inventory b/ansible/roles/common/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/common/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/common/tests/test.yml b/ansible/roles/common/tests/test.yml new file mode 100644 index 000000000..8d24282da --- /dev/null +++ b/ansible/roles/common/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - common \ No newline at end of file diff --git a/ansible/roles/common/vars/main.yml b/ansible/roles/common/vars/main.yml new file mode 100644 index 000000000..feaa92f9b --- /dev/null +++ b/ansible/roles/common/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for common \ No newline at end of file diff --git a/ansible/roles/community-neo4j/README.md b/ansible/roles/community-neo4j/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/community-neo4j/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/community-neo4j/defaults/main.yml b/ansible/roles/community-neo4j/defaults/main.yml new file mode 100644 index 000000000..c68748e1f --- /dev/null +++ b/ansible/roles/community-neo4j/defaults/main.yml @@ -0,0 +1,10 @@ +--- +# defaults file for neo4j +newrelic: no +#neo4j_home: /var/lib/neo4j +#neo4j_version: 3.5.21 +#graphql_version: 3.5.15.5 +neo4j_version: "4.4.19" +#neo4j_version: "4.2.19" +apoc_version: "4.4.0.18-core" +#apoc_version: "4.2.0.11-core" diff --git a/ansible/roles/community-neo4j/handlers/main.yml b/ansible/roles/community-neo4j/handlers/main.yml new file mode 100644 index 000000000..0168d6d56 --- /dev/null +++ b/ansible/roles/community-neo4j/handlers/main.yml @@ -0,0 +1,12 @@ +--- +# handlers file for neo4j +- name: restart_neo4j + service: + name: neo4j + state: restarted + +- name: restart_firewalld + service: + name: firewalld + state: started + enabled: yes diff --git a/ansible/roles/community-neo4j/meta/main.yml b/ansible/roles/community-neo4j/meta/main.yml new file mode 100644 index 000000000..5d50bf41b --- /dev/null +++ b/ansible/roles/community-neo4j/meta/main.yml @@ -0,0 +1,60 @@ +galaxy_info: + author: your name + description: your description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Some suggested licenses: + # - BSD (default) + # - MIT + # - GPLv2 + # - GPLv3 + # - Apache + # - CC-BY + license: license (GPLv2, CC-BY, etc) + + min_ansible_version: 2.4 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # Optionally specify the branch Galaxy will use when accessing the GitHub + # repo for this role. During role install, if no tags are available, + # Galaxy will use this branch. During import Galaxy will access files on + # this branch. If Travis integration is configured, only notifications for this + # branch will be accepted. Otherwise, in all cases, the repo's default branch + # (usually master) will be used. + #github_branch: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. \ No newline at end of file diff --git a/ansible/roles/community-neo4j/tasks/main.yml b/ansible/roles/community-neo4j/tasks/main.yml new file mode 100644 index 000000000..b071222d2 --- /dev/null +++ b/ansible/roles/community-neo4j/tasks/main.yml @@ -0,0 +1,129 @@ +--- +# tasks file for neo4j +- name: install systems packages + yum: + name: + - firewalld + # - epel-release + - unzip + - wget + state: latest + disable_gpg_check: yes + +- name: create neo4j group + group: + name: neo4j + state: present + +- name: create neo4j user + user: + name: neo4j + groups: neo4j + append: yes + +- name: add neo4j repository + yum_repository: + name: neo4j + description: neo4j repository + file: neo4j + baseurl: https://yum.neo4j.com/stable + gpgcheck: no + enabled: yes + +- name: install open-jdk-11 + #command: amazon-linux-extras install java-openjdk11 + command: dnf -y install java-11-openjdk + +- name: install neo4j community {{ neo4j_version }} and java 11 JRE + yum: + name: + - neo4j-{{ neo4j_version }} + state: installed + +- name: Generate an OpenSSL private key + openssl_privatekey: + path: "/var/lib/neo4j/certificates/neo4j.key" + size: 4096 + type: RSA + backup: yes + +- name: Generate an OpenSSL Certificate Signing Request with Subject information + openssl_csr: + path: "/var/lib/neo4j/certificates/neo4j.csr" + privatekey_path: "/var/lib/neo4j/certificates/neo4j.key" + common_name: "localhost" + +- name: Generate a Self Signed OpenSSL certificate + openssl_certificate: + path: "/var/lib/neo4j/certificates/neo4j.cert" + privatekey_path: "/var/lib/neo4j/certificates/neo4j.key" + csr_path: "/var/lib/neo4j/certificates/neo4j.csr" + provider: selfsigned + +- name: change neo4j parameters + lineinfile: + path: /etc/neo4j/neo4j.conf + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + state: present + with_items: + - { regexp: '^#dbms.allow_upgrade=true', line: 'dbms.allow_upgrade=true' } + - { regexp: '^#dbms.default_listen_address=0.0.0.0', line: 'dbms.default_listen_address=0.0.0.0' } + - { regexp: '^#dbms.connector.bolt.tls_level=DISABLED', line: 'dbms.connector.bolt.tls_level=OPTIONAL' } + - { regexp: '^#dbms.connector.bolt.listen_address=:7687', line: 'dbms.connector.bolt.listen_address=:7687' } + - { regexp: '^#dbms.ssl.policy.bolt.enabled=true', line: 'dbms.ssl.policy.bolt.enabled=true' } + - { regexp: '^#dbms.ssl.policy.bolt.base_directory=certificates/bolt', line: 'dbms.ssl.policy.bolt.base_directory=certificates' } + - { regexp: '^#dbms.ssl.policy.bolt.private_key=private.key', line: 'dbms.ssl.policy.bolt.private_key=neo4j.key' } + - { regexp: '^#dbms.ssl.policy.bolt.public_certificate=public.crt', line: 'dbms.ssl.policy.bolt.public_certificate=neo4j.cert' } + - { regexp: '^#dbms.ssl.policy.bolt.client_auth=NONE', line: 'dbms.ssl.policy.bolt.client_auth=NONE' } + +- name: add APOC + copy: + remote_src: yes + src: "/var/lib/neo4j/labs/apoc-{{ apoc_version }}.jar" + dest: "/var/lib/neo4j/plugins/apoc-{{ apoc_version }}.jar" + owner: neo4j + group: neo4j + +- name: update ownership of /var/lib/neo4j + file: + path: /var/lib/neo4j + owner: neo4j + group: neo4j + recurse: yes + +- name: start and enable neo4j service + service: + name: neo4j + state: started + enabled: yes + +- name: add DefaultLimitNOFILE=60000 to /etc/systemd/user.conf + lineinfile: + path: /etc/systemd/user.conf + line: 'DefaultLimitNOFILE=60000' + regex: '#DefaultLimitNOFILE=' + state: present + +- name: reload system daemon + systemd: + daemon_reload: yes + +- name: start and enable firewalld service + service: + name: firewalld + state: started + enabled: yes + +- name: open neo4j data ports + firewalld: + port: "{{item}}/tcp" + zone: public + immediate: yes + permanent: yes + state: enabled + loop: + - 7474 + - 7473 + - 7687 + notify: restart_firewalld diff --git a/ansible/roles/community-neo4j/tasks/main.yml.bak b/ansible/roles/community-neo4j/tasks/main.yml.bak new file mode 100644 index 000000000..229b8d429 --- /dev/null +++ b/ansible/roles/community-neo4j/tasks/main.yml.bak @@ -0,0 +1,103 @@ +--- +# tasks file for neo4j +- name: install systems packages + yum: + name: + - firewalld + - epel-release + - unzip + - wget + - java-11-openjdk-devel + state: latest + disable_gpg_check: yes + +- name: create neo4j group + group: + name: neo4j + state: present + +- name: create tomcat user + user: + name: neo4j + groups: neo4j + append: yes + +- name: download neo4j tar ball + get_url: + url: https://dist.neo4j.org/neo4j-community-{{neo4j_version}}-unix.tar.gz + dest: /tmp/neo4j-community.tar.gz + +- name: extract neo4j + unarchive: + src: /tmp/neo4j-community.tar.gz + dest: "{{neo4j_home}}" + remote_src: yes + +- name: change ownership of /var/lib/neo4j + file: + path: "{{neo4j_home}}" + owner: neo4j + group: neo4j + recurse: yes + +- name: change permission on /var/lib/neo4j/data + file: + path: "{{neo4j_home}}/{{item}}" + owner: neo4j + group: neo4j + recurse: yes + mode: '777' + loop: + - data + - logs + +- name: copy neo4j.conf and service "{{neo4j_home}}" + template: + src: "{{item.src}}" + dest: "{{item.dest}}" + owner: neo4j + group: neo4j + loop: + - { src: 'neo4j.conf.j2', dest: "{{neo4j_home}}/conf/neo4j.conf" } + - { src: 'neo4j.service.j2', dest: "/usr/lib/systemd/system/neo4j.service" } + notify: restart_neo4j + + +- name: add DefaultLimitNOFILE=60000 to /etc/systemd/user.conf + lineinfile: + path: /etc/systemd/user.conf + line: 'DefaultLimitNOFILE=60000' + regex: '#DefaultLimitNOFILE=' + state: present + +- name: reload system daemon + systemd: + daemon_reload: yes + +- name: start and enable neo4j service + service: + name: neo4j + state: started + enabled: yes + +- name: open neo4j data ports + firewalld: + port: "{{item}}/tcp" + zone: public + immediate: yes + permanent: yes + state: enabled + loop: + - 7474 + - 7473 + - 7687 + notify: restart_firewalld + +- name: download neo4j-graphql plugins + get_url: + url: https://github.com/neo4j-graphql/neo4j-graphql/releases/download/{{graphql_version}}/neo4j-graphql-{{graphql_version}}.jar + dest: "{{neo4j_home}}/plugins" + owner: neo4j + group: neo4j + + diff --git a/ansible/roles/community-neo4j/tasks/newrelic.yml b/ansible/roles/community-neo4j/tasks/newrelic.yml new file mode 100644 index 000000000..bc6701afd --- /dev/null +++ b/ansible/roles/community-neo4j/tasks/newrelic.yml @@ -0,0 +1,87 @@ +# - name: download nodejs rpm script +# get_url: +# url: https://rpm.nodesource.com/setup_{{node_version}}.x +# dest: /tmp/nodejs.sh +# mode: 0755 + +# - name: run the nodejs.sh script +# shell: /tmp/nodejs.sh + +# - name: cleanup nodejs.sh file +# file: +# path: /tmp/nodejs.sh +# state: absent + +# - name: install nodes and npm +# yum: +# name: nodejs +# state: installed + +# - name: install newrelic neo4j plugin +# npm: +# name: newrelic-neo4j +# global: yes +# unsafe_perm: yes + +# - name: configure newrelic-neo4j +# template: +# src: newrelic-neo4j.js.j2 +# dest: /etc/newrelic/newrelic-neo4j.js + +# - name: configure newrelic-neo4j service +# template: +# src: newrelic-neo4j.service.j2 +# dest: /etc/systemd/system/newrelic-neo4j.service + +# - name: reload systemd +# systemd: +# daemon_reload: yes + +# - name: enable and start newrelic-neo4j +# systemd: +# name: newrelic-neo4j +# state: started +# enabled: yes + +# - name: run newrelic-neo4j +# command: newrelic-neo4j >/dev/null 2>&1 & + +- name: download newrelic apm agent + get_url: + url: http://download.newrelic.com/newrelic/java-agent/newrelic-agent/current/newrelic-java.zip + dest: /var/lib/neo4j + +- name: unzip newrelic-java.zip + unarchive: + src: /var/lib/neo4j/newrelic-java.zip + dest: /var/lib/neo4j + remote_src: yes + owner: neo4j + group: neo4j + +- name: remove newrelic-java.zip + file: + path: /var/lib/neo4j/newrelic-java.zip + state: absent + +- name: update newrelic.yml with license info + lineinfile: + path: /var/lib/neo4j/newrelic/newrelic.yml + regexp: '^ license_key' + line: ' license_key: {{newrelic_license_key}}' + state: present + +- name: update newrelic.yml with app_name + lineinfile: + path: /var/lib/neo4j/newrelic/newrelic.yml + insertafter: '# The first application name must be unique.' + regexp: '^ app_name: My Application$' + state: present + line: ' app_name: {{env}}-neo4j' + +- name: change neo4j parameters + lineinfile: + path: /etc/neo4j/neo4j.conf + line: dbms.jvm.additional=-javaagent:/var/lib/neo4j/newrelic/newrelic.jar + state: present + notify: restart neo4j \ No newline at end of file diff --git a/ansible/roles/community-neo4j/tasks/newrelic.yml.new b/ansible/roles/community-neo4j/tasks/newrelic.yml.new new file mode 100644 index 000000000..c3dac8a1a --- /dev/null +++ b/ansible/roles/community-neo4j/tasks/newrelic.yml.new @@ -0,0 +1,42 @@ +--- +- name: add newrelic-infra gpg key + rpm_key: + state: present + key: https://download.newrelic.com/infrastructure_agent/gpg/newrelic-infra.gpg + +- name: setup newrelic repo + yum_repository: + name: newrelic-infra + description: Newrelic infrastruture repository + baseurl: https://download.newrelic.com/infrastructure_agent/linux/yum/el/7/x86_64/ + +- name: install newrelic-infra + package: + name: + - libcap + - newrelic-infra + state: installed + environment: + NRIA_MODE: PRIVILEGED + +- name: copy newrelic config file to /etc/ + template: + src: newrelic-infra.yml.j2 + dest: /etc/newrelic-infra.yml + +- name: add nri-agent to docker group + user: + name: nri-agent + groups: docker + append: yes + +- name: enable and restart newrelic-infra service + service: + name: newrelic-infra + state: restarted + enabled: yes + + + + # https://download.newrelic.com/infrastructure_agent/linux/yum/el/7/x86_64/newrelic-infra.repo + diff --git a/ansible/roles/community-neo4j/templates/neo4j.conf.j2 b/ansible/roles/community-neo4j/templates/neo4j.conf.j2 new file mode 100644 index 000000000..2a455de88 --- /dev/null +++ b/ansible/roles/community-neo4j/templates/neo4j.conf.j2 @@ -0,0 +1,366 @@ +#***************************************************************** +# Neo4j configuration +# +# For more details and a complete list of settings, please see +# https://neo4j.com/docs/operations-manual/3.5/reference/configuration-settings/ +#***************************************************************** + +# The name of the database to mount +#dbms.active_database=graph.db + +# Paths of directories in the installation. +#dbms.directories.data=data +#dbms.directories.plugins=plugins +#dbms.directories.certificates=certificates +#dbms.directories.logs=logs +#dbms.directories.lib=lib +#dbms.directories.run=run + +# This setting constrains all `LOAD CSV` import files to be under the `import` directory. Remove or comment it out to +# allow files to be loaded from anywhere in the filesystem; this introduces possible security problems. See the +# `LOAD CSV` section of the manual for details. +dbms.directories.import=import + +# Whether requests to Neo4j are authenticated. +# To disable authentication, uncomment this line +#dbms.security.auth_enabled=false + +# Enable this to be able to upgrade a store from an older version. +#dbms.allow_upgrade=true + +# Java Heap Size: by default the Java heap size is dynamically +# calculated based on available system resources. +# Uncomment these lines to set specific initial and maximum +# heap size. +dbms.memory.heap.initial_size=512m +dbms.memory.heap.max_size=512m + +# The amount of memory to use for mapping the store files, in bytes (or +# kilobytes with the 'k' suffix, megabytes with 'm' and gigabytes with 'g'). +# If Neo4j is running on a dedicated server, then it is generally recommended +# to leave about 2-4 gigabytes for the operating system, give the JVM enough +# heap to hold all your transaction state and query context, and then leave the +# rest for the page cache. +# The default page cache memory assumes the machine is dedicated to running +# Neo4j, and is heuristically set to 50% of RAM minus the max Java heap size. +#dbms.memory.pagecache.size=10g + +#***************************************************************** +# Network connector configuration +#***************************************************************** + +# With default configuration Neo4j only accepts local connections. +# To accept non-local connections, uncomment this line: +dbms.connectors.default_listen_address=0.0.0.0 + +# You can also choose a specific network interface, and configure a non-default +# port for each connector, by setting their individual listen_address. + +# The address at which this server can be reached by its clients. This may be the server's IP address or DNS name, or +# it may be the address of a reverse proxy which sits in front of the server. This setting may be overridden for +# individual connectors below. +#dbms.connectors.default_advertised_address=localhost + +# You can also choose a specific advertised hostname or IP address, and +# configure an advertised port for each connector, by setting their +# individual advertised_address. + +# Bolt connector +dbms.connector.bolt.enabled=true +#dbms.connector.bolt.tls_level=OPTIONAL +dbms.connector.bolt.listen_address=:7687 + +# HTTP Connector. There can be zero or one HTTP connectors. +dbms.connector.http.enabled=true +#dbms.connector.http.listen_address=:7474 + +# HTTPS Connector. There can be zero or one HTTPS connectors. +dbms.connector.https.enabled=true +#dbms.connector.https.listen_address=:7473 + +# Number of Neo4j worker threads. +#dbms.threads.worker_count= + +#***************************************************************** +# SSL system configuration +#***************************************************************** + +# Names of the SSL policies to be used for the respective components. + +# The legacy policy is a special policy which is not defined in +# the policy configuration section, but rather derives from +# dbms.directories.certificates and associated files +# (by default: neo4j.key and neo4j.cert). Its use will be deprecated. + +# The policies to be used for connectors. +# +# N.B: Note that a connector must be configured to support/require +# SSL/TLS for the policy to actually be utilized. +# +# see: dbms.connector.*.tls_level + +#bolt.ssl_policy=legacy +#https.ssl_policy=legacy + +#***************************************************************** +# SSL policy configuration +#***************************************************************** + +# Each policy is configured under a separate namespace, e.g. +# dbms.ssl.policy..* +# +# The example settings below are for a new policy named 'default'. + +# The base directory for cryptographic objects. Each policy will by +# default look for its associated objects (keys, certificates, ...) +# under the base directory. +# +# Every such setting can be overridden using a full path to +# the respective object, but every policy will by default look +# for cryptographic objects in its base location. +# +# Mandatory setting + +#dbms.ssl.policy.default.base_directory=certificates/default + +# Allows the generation of a fresh private key and a self-signed +# certificate if none are found in the expected locations. It is +# recommended to turn this off again after keys have been generated. +# +# Keys should in general be generated and distributed offline +# by a trusted certificate authority (CA) and not by utilizing +# this mode. + +#dbms.ssl.policy.default.allow_key_generation=false + +# Enabling this makes it so that this policy ignores the contents +# of the trusted_dir and simply resorts to trusting everything. +# +# Use of this mode is discouraged. It would offer encryption but no security. + +#dbms.ssl.policy.default.trust_all=false + +# The private key for the default SSL policy. By default a file +# named private.key is expected under the base directory of the policy. +# It is mandatory that a key can be found or generated. + +#dbms.ssl.policy.default.private_key= + +# The private key for the default SSL policy. By default a file +# named public.crt is expected under the base directory of the policy. +# It is mandatory that a certificate can be found or generated. + +#dbms.ssl.policy.default.public_certificate= + +# The certificates of trusted parties. By default a directory named +# 'trusted' is expected under the base directory of the policy. It is +# mandatory to create the directory so that it exists, because it cannot +# be auto-created (for security purposes). +# +# To enforce client authentication client_auth must be set to 'require'! + +#dbms.ssl.policy.default.trusted_dir= + +# Client authentication setting. Values: none, optional, require +# The default is to require client authentication. +# +# Servers are always authenticated unless explicitly overridden +# using the trust_all setting. In a mutual authentication setup this +# should be kept at the default of require and trusted certificates +# must be installed in the trusted_dir. + +#dbms.ssl.policy.default.client_auth=require + +# It is possible to verify the hostname that the client uses +# to connect to the remote server. In order for this to work, the server public +# certificate must have a valid CN and/or matching Subject Alternative Names. + +# Note that this is irrelevant on host side connections (sockets receiving +# connections). + +# To enable hostname verification client side on nodes, set this to true. + +#dbms.ssl.policy.default.verify_hostname=false + +# A comma-separated list of allowed TLS versions. +# By default only TLSv1.2 is allowed. + +#dbms.ssl.policy.default.tls_versions= + +# A comma-separated list of allowed ciphers. +# The default ciphers are the defaults of the JVM platform. + +#dbms.ssl.policy.default.ciphers= + +#***************************************************************** +# Logging configuration +#***************************************************************** + +# To enable HTTP logging, uncomment this line +#dbms.logs.http.enabled=true + +# Number of HTTP logs to keep. +#dbms.logs.http.rotation.keep_number=5 + +# Size of each HTTP log that is kept. +#dbms.logs.http.rotation.size=20m + +# To enable GC Logging, uncomment this line +#dbms.logs.gc.enabled=true + +# GC Logging Options +# see http://docs.oracle.com/cd/E19957-01/819-0084-10/pt_tuningjava.html#wp57013 for more information. +#dbms.logs.gc.options=-XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCApplicationStoppedTime -XX:+PrintPromotionFailure -XX:+PrintTenuringDistribution + +# For Java 9 and newer GC Logging Options +# see https://docs.oracle.com/javase/10/tools/java.htm#JSWOR-GUID-BE93ABDC-999C-4CB5-A88B-1994AAAC74D5 +#dbms.logs.gc.options=-Xlog:gc*,safepoint,age*=trace + +# Number of GC logs to keep. +#dbms.logs.gc.rotation.keep_number=5 + +# Size of each GC log that is kept. +#dbms.logs.gc.rotation.size=20m + +# Log level for the debug log. One of DEBUG, INFO, WARN and ERROR. Be aware that logging at DEBUG level can be very verbose. +#dbms.logs.debug.level=INFO + +# Size threshold for rotation of the debug log. If set to zero then no rotation will occur. Accepts a binary suffix "k", +# "m" or "g". +#dbms.logs.debug.rotation.size=20m + +# Maximum number of history files for the internal log. +#dbms.logs.debug.rotation.keep_number=7 + +#***************************************************************** +# Miscellaneous configuration +#***************************************************************** + +# Enable this to specify a parser other than the default one. +#cypher.default_language_version=2.3 + +# Determines if Cypher will allow using file URLs when loading data using +# `LOAD CSV`. Setting this value to `false` will cause Neo4j to fail `LOAD CSV` +# clauses that load data from the file system. +#dbms.security.allow_csv_import_from_file_urls=true + + +# Value of the Access-Control-Allow-Origin header sent over any HTTP or HTTPS +# connector. This defaults to '*', which allows broadest compatibility. Note +# that any URI provided here limits HTTP/HTTPS access to that URI only. +#dbms.security.http_access_control_allow_origin=* + +# Value of the HTTP Strict-Transport-Security (HSTS) response header. This header +# tells browsers that a webpage should only be accessed using HTTPS instead of HTTP. +# It is attached to every HTTPS response. Setting is not set by default so +# 'Strict-Transport-Security' header is not sent. Value is expected to contain +# directives like 'max-age', 'includeSubDomains' and 'preload'. +#dbms.security.http_strict_transport_security= + +# Retention policy for transaction logs needed to perform recovery and backups. +dbms.tx_log.rotation.retention_policy=1 days + +# Only allow read operations from this Neo4j instance. This mode still requires +# write access to the directory for lock purposes. +#dbms.read_only=false + +# Comma separated list of JAX-RS packages containing JAX-RS resources, one +# package name for each mountpoint. The listed package names will be loaded +# under the mountpoints specified. Uncomment this line to mount the +# org.neo4j.examples.server.unmanaged.HelloWorldResource.java from +# neo4j-server-examples under /examples/unmanaged, resulting in a final URL of +# http://localhost:7474/examples/unmanaged/helloworld/{nodeId} +#dbms.unmanaged_extension_classes=org.neo4j.examples.server.unmanaged=/examples/unmanaged + +# A comma separated list of procedures and user defined functions that are allowed +# full access to the database through unsupported/insecure internal APIs. +#dbms.security.procedures.unrestricted=my.extensions.example,my.procedures.* + +# A comma separated list of procedures to be loaded by default. +# Leaving this unconfigured will load all procedures found. +#dbms.security.procedures.whitelist=apoc.coll.*,apoc.load.* + +#******************************************************************** +# JVM Parameters +#******************************************************************** + +# G1GC generally strikes a good balance between throughput and tail +# latency, without too much tuning. +dbms.jvm.additional=-XX:+UseG1GC + +# Have common exceptions keep producing stack traces, so they can be +# debugged regardless of how often logs are rotated. +dbms.jvm.additional=-XX:-OmitStackTraceInFastThrow + +# Make sure that `initmemory` is not only allocated, but committed to +# the process, before starting the database. This reduces memory +# fragmentation, increasing the effectiveness of transparent huge +# pages. It also reduces the possibility of seeing performance drop +# due to heap-growing GC events, where a decrease in available page +# cache leads to an increase in mean IO response time. +# Try reducing the heap memory, if this flag degrades performance. +dbms.jvm.additional=-XX:+AlwaysPreTouch + +# Trust that non-static final fields are really final. +# This allows more optimizations and improves overall performance. +# NOTE: Disable this if you use embedded mode, or have extensions or dependencies that may use reflection or +# serialization to change the value of final fields! +dbms.jvm.additional=-XX:+UnlockExperimentalVMOptions +dbms.jvm.additional=-XX:+TrustFinalNonStaticFields + +# Disable explicit garbage collection, which is occasionally invoked by the JDK itself. +dbms.jvm.additional=-XX:+DisableExplicitGC + +# Remote JMX monitoring, uncomment and adjust the following lines as needed. Absolute paths to jmx.access and +# jmx.password files are required. +# Also make sure to update the jmx.access and jmx.password files with appropriate permission roles and passwords, +# the shipped configuration contains only a read only role called 'monitor' with password 'Neo4j'. +# For more details, see: http://download.oracle.com/javase/8/docs/technotes/guides/management/agent.html +# On Unix based systems the jmx.password file needs to be owned by the user that will run the server, +# and have permissions set to 0600. +# For details on setting these file permissions on Windows see: +# http://docs.oracle.com/javase/8/docs/technotes/guides/management/security-windows.html +#dbms.jvm.additional=-Dcom.sun.management.jmxremote.port=3637 +#dbms.jvm.additional=-Dcom.sun.management.jmxremote.authenticate=true +#dbms.jvm.additional=-Dcom.sun.management.jmxremote.ssl=false +#dbms.jvm.additional=-Dcom.sun.management.jmxremote.password.file=/absolute/path/to/conf/jmx.password +#dbms.jvm.additional=-Dcom.sun.management.jmxremote.access.file=/absolute/path/to/conf/jmx.access + +# Some systems cannot discover host name automatically, and need this line configured: +#dbms.jvm.additional=-Djava.rmi.server.hostname=$THE_NEO4J_SERVER_HOSTNAME + +# Expand Diffie Hellman (DH) key size from default 1024 to 2048 for DH-RSA cipher suites used in server TLS handshakes. +# This is to protect the server from any potential passive eavesdropping. +dbms.jvm.additional=-Djdk.tls.ephemeralDHKeySize=2048 + +# This mitigates a DDoS vector. +dbms.jvm.additional=-Djdk.tls.rejectClientInitiatedRenegotiation=true + +# This filter prevents deserialization of arbitrary objects via java object serialization, addressing potential vulnerabilities. +# By default this filter whitelists all neo4j classes, as well as classes from the hazelcast library and the java standard library. +# These defaults should only be modified by expert users! +# For more details (including filter syntax) see: https://openjdk.java.net/jeps/290 +#dbms.jvm.additional=-Djdk.serialFilter=java.**;org.neo4j.**;com.neo4j.**;com.hazelcast.**;net.sf.ehcache.Element;com.sun.proxy.*;org.openjdk.jmh.**;!* + +#******************************************************************** +# Wrapper Windows NT/2000/XP Service Properties +#******************************************************************** +# WARNING - Do not modify any of these properties when an application +# using this configuration file has been installed as a service. +# Please uninstall the service before modifying this section. The +# service can then be reinstalled. + +# Name of the service +dbms.windows_service_name=neo4j + +#******************************************************************** +# Other Neo4j system properties +#******************************************************************** +dbms.jvm.additional=-Dunsupported.dbms.udc.source=tarball +metrics.enabled=true +metrics.neo4j.enabled=true +metrics.neo4j.tx.enabled=true +metrics.neo4j.pagecache.enabled=true +metrics.neo4j.counts.enabled=true +metrics.neo4j.network.enabled=true +dbms.unmanaged_extension_classes=org.neo4j.graphql=/graphql diff --git a/ansible/roles/community-neo4j/templates/neo4j.service.j2 b/ansible/roles/community-neo4j/templates/neo4j.service.j2 new file mode 100644 index 000000000..a829bd81a --- /dev/null +++ b/ansible/roles/community-neo4j/templates/neo4j.service.j2 @@ -0,0 +1,16 @@ +[Unit] +Description=Neo4j Graph Database +After=network-online.target +Wants=network-online.target + +[Service] +ExecStart=/var/lib/neo4j/bin/neo4j console +Restart=on-failure +User=neo4j +Group=neo4j +Environment="NEO4J_CONF=/var/lib/neo4j/conf" "NEO4J_HOME=/var/lib/neo4j" +LimitNOFILE=60000 +TimeoutSec=120 + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/ansible/roles/community-neo4j/templates/newrelic-infra.yml.j2 b/ansible/roles/community-neo4j/templates/newrelic-infra.yml.j2 new file mode 100644 index 000000000..6850bca74 --- /dev/null +++ b/ansible/roles/community-neo4j/templates/newrelic-infra.yml.j2 @@ -0,0 +1,6 @@ +license_key: {{ newrelic_license_key }} +log_file: /var/log/newrelic-infra/newrelic-infra.log +display_name: {{ collector_name }} +collector_url: {{ newrelic_collector_url }} +identity_url: {{ newrelic_identity_url }} +command_channel_url: {{ newrelic_command_channel_url }} diff --git a/ansible/roles/community-neo4j/tests/inventory b/ansible/roles/community-neo4j/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/community-neo4j/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/community-neo4j/tests/test.yml b/ansible/roles/community-neo4j/tests/test.yml new file mode 100644 index 000000000..ba5c658c4 --- /dev/null +++ b/ansible/roles/community-neo4j/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - neo4j \ No newline at end of file diff --git a/ansible/roles/community-neo4j/vars/main.yml b/ansible/roles/community-neo4j/vars/main.yml new file mode 100644 index 000000000..b531af7dd --- /dev/null +++ b/ansible/roles/community-neo4j/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for neo4j diff --git a/ansible/roles/ctdc-pipeline/.travis.yml b/ansible/roles/ctdc-pipeline/.travis.yml new file mode 100644 index 000000000..36bbf6208 --- /dev/null +++ b/ansible/roles/ctdc-pipeline/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/ansible/roles/ctdc-pipeline/README.md b/ansible/roles/ctdc-pipeline/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/ctdc-pipeline/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/ctdc-pipeline/defaults/main.yml b/ansible/roles/ctdc-pipeline/defaults/main.yml new file mode 100644 index 000000000..fc5714050 --- /dev/null +++ b/ansible/roles/ctdc-pipeline/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for cicd diff --git a/ansible/roles/ctdc-pipeline/handlers/main.yml b/ansible/roles/ctdc-pipeline/handlers/main.yml new file mode 100644 index 000000000..3c94d4172 --- /dev/null +++ b/ansible/roles/ctdc-pipeline/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for cicd \ No newline at end of file diff --git a/ansible/roles/ctdc-pipeline/meta/main.yml b/ansible/roles/ctdc-pipeline/meta/main.yml new file mode 100644 index 000000000..227ad9c34 --- /dev/null +++ b/ansible/roles/ctdc-pipeline/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.9 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/ansible/roles/ctdc-pipeline/tasks/main.yml b/ansible/roles/ctdc-pipeline/tasks/main.yml new file mode 100644 index 000000000..4e23438ed --- /dev/null +++ b/ansible/roles/ctdc-pipeline/tasks/main.yml @@ -0,0 +1,47 @@ +--- +- name: run npm install in {{workspace}}/src/main/frontend/ + command: "{{item}}" + args: + chdir: "{{workspace}}/src/main/frontend/" + loop: + - npm install + - npm install --save https://github.com/skiran86/mui-custom-datatables/tarball/master + +- name: run npm install and build in {{workspace}}/src/main/frontend/node_modules/mui-custom-datatables + command: "{{item}}" + args: + chdir: "{{workspace}}/src/main/frontend/node_modules/mui-custom-datatables" + loop: + - npm install + - npm run build + +- name: run npm build in frontend + command: npm run-script build + args: + chdir: "{{workspace}}/src/main/frontend" + +- name: remove the application_example.properties file + file: + path: "{{workspace}}/src/main/resources/application_example.properties" + state: absent + +- name: copy application.properties file to /src/main/resources/ + template: + src: application.properties.j2 + dest: "{{workspace}}/src/main/resources/application.properties" + +- name: build springboot code + command: mvn package -DskipTests + args: + chdir: "{{workspace}}" + +- name: copy CTDC-0.0.1.war to ROOT.war + copy: + remote_src: yes + src: "{{workspace}}/target/CTDC-0.0.1.war" + dest: "{{workspace}}/target/ROOT.war" + + + + + diff --git a/ansible/roles/ctdc-pipeline/templates/application.properties.j2 b/ansible/roles/ctdc-pipeline/templates/application.properties.j2 new file mode 100644 index 000000000..d75fa1487 --- /dev/null +++ b/ansible/roles/ctdc-pipeline/templates/application.properties.j2 @@ -0,0 +1,26 @@ +spring.mvc.throw-exception-if-no-handler-found=true +spring.data.neo4j.username={{neo4j_user}} +spring.data.neo4j.password={{neo4j_ip}} +neo4j.jdbc.server=jdbc:neo4j:bolt://{{neo4j_ip}} +graphql.schema=graphql/person.graphqls, graphql/icdc.graphqls +neo4j.graphql.endpoint=http://{{neo4j_ip}}:7474/graphql/ +neo4j.java.driver.server=bolt://{{neo4j_ip}}:7687 +neo4j.authorization={{bearer}} +spring.mvc.view.prefix=/WEB-INF/ +spring.mvc.view.suffix=.jsp +error.redirect_url=http://localhost/error.html +api.version=v1 +session.timeout=30 +data.model.version = 1 +allow_grapqh_query = true +allow_graphql_mutation =false +fence.client_id={{fence_id}} +fence.client_credential={{fence_credential}} +fence.redirect_url=https://{{fence_url}}/ +fence.url=https://nci-crdc-staging.datacommons.io/ +fence.exchange_token_url=https://nci-crdc-staging.datacommons.io/user/oauth2/token +fence.log_out_url = https://nci-crdc-staging.datacommons.io/user/logout +neo4j_query.getversion= query { numberOfStudies }; +graphql_api.version = 1.0.0 +rest_api.version =1.0.0 +front_end.version =1.0.0 \ No newline at end of file diff --git a/ansible/roles/ctdc-pipeline/tests/inventory b/ansible/roles/ctdc-pipeline/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/ctdc-pipeline/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/ctdc-pipeline/tests/test.yml b/ansible/roles/ctdc-pipeline/tests/test.yml new file mode 100644 index 000000000..c9b79015b --- /dev/null +++ b/ansible/roles/ctdc-pipeline/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - cicd \ No newline at end of file diff --git a/ansible/roles/ctdc-pipeline/vars/main.yml b/ansible/roles/ctdc-pipeline/vars/main.yml new file mode 100644 index 000000000..388dbe3f8 --- /dev/null +++ b/ansible/roles/ctdc-pipeline/vars/main.yml @@ -0,0 +1,10 @@ +--- +# vars file for cicd +workspace: "{{ lookup('env','WORKSPACE') }}" +neo4j_ip: "{{ lookup('env','NEO4J_IP') }}" +bearer: "{{ lookup('env','BEARER') }}" +neo4j_password: "{{ lookup('env','NEO4J_PASSWORD') }}" +neo4j_user: "{{ lookup('env','NEO4J_USER') }}" +fence_id: "{{ lookup('env','FENCE_ID') }}" +fence_credential: "{{ lookup('env','FENCE_CREDENTIAL') }}" +fence_url: "{{ lookup('env','FENCE_URL') }}" diff --git a/ansible/roles/ctdc/README.md b/ansible/roles/ctdc/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/ctdc/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/ctdc/defaults/main.yml b/ansible/roles/ctdc/defaults/main.yml new file mode 100644 index 000000000..406cde47f --- /dev/null +++ b/ansible/roles/ctdc/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for k9dc diff --git a/ansible/roles/k9dc/handlers/main.yml b/ansible/roles/ctdc/handlers/main.yml similarity index 100% rename from ansible/roles/k9dc/handlers/main.yml rename to ansible/roles/ctdc/handlers/main.yml diff --git a/ansible/roles/ctdc/meta/main.yml b/ansible/roles/ctdc/meta/main.yml new file mode 100644 index 000000000..5d50bf41b --- /dev/null +++ b/ansible/roles/ctdc/meta/main.yml @@ -0,0 +1,60 @@ +galaxy_info: + author: your name + description: your description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Some suggested licenses: + # - BSD (default) + # - MIT + # - GPLv2 + # - GPLv3 + # - Apache + # - CC-BY + license: license (GPLv2, CC-BY, etc) + + min_ansible_version: 2.4 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # Optionally specify the branch Galaxy will use when accessing the GitHub + # repo for this role. During role install, if no tags are available, + # Galaxy will use this branch. During import Galaxy will access files on + # this branch. If Travis integration is configured, only notifications for this + # branch will be accepted. Otherwise, in all cases, the repo's default branch + # (usually master) will be used. + #github_branch: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. \ No newline at end of file diff --git a/ansible/roles/ctdc/tasks/main.yml b/ansible/roles/ctdc/tasks/main.yml new file mode 100644 index 000000000..03e9c3828 --- /dev/null +++ b/ansible/roles/ctdc/tasks/main.yml @@ -0,0 +1,56 @@ +--- +- name: open port 8080 + firewalld: + port: 8080/tcp + zone: public + immediate: yes + permanent: yes + state: enabled + +- name: create tomcat group + group: + name: tomcat + gid: 3001 + state: present + +- name: create tomcat user + user: + name: tomcat + uid: 3001 + groups: tomcat,docker + append: yes + +- name: create ctdc deployments directory + file: + path: "{{ item }}" + state: directory + owner: tomcat + group: tomcat + loop: + - "{{ deployments }}" + - "{{ docker_home }}" + - "{{ ctdc_home }}/logs" + +- name: copy docker files + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + owner: tomcat + group: tomcat + loop: + - {src: 'docker-compose.yml.j2',dest: '{{docker_home}}/docker-compose.yml'} + +- name: start ctdc + command: docker-compose up -d + args: + chdir: "{{ docker_home }}" + warn: no + tags: + - cloudone + +- name: start the ctdc + docker_compose: + project_src: "{{ docker_home }}" + state: present + tags: + - sandbox \ No newline at end of file diff --git a/ansible/roles/ctdc/templates/docker-compose.yml.j2 b/ansible/roles/ctdc/templates/docker-compose.yml.j2 new file mode 100644 index 000000000..9256d055e --- /dev/null +++ b/ansible/roles/ctdc/templates/docker-compose.yml.j2 @@ -0,0 +1,15 @@ +version: '3.1' +services: + match: + container_name: ctdc + image: cbiitssrepo/tomcat9 + ports: + - 8080:8080 + volumes: + - {{ ctdc_home }}/logs:/usr/local/tomcat/logs + restart: always + environment: + - NEW_RELIC_LICENSE_KEY={{ newrelic_license_key }} + - NEW_RELIC_APP_NAME={{ app_name }} + - NEW_RELIC_HOST=gov-collector.newrelic.com + diff --git a/ansible/roles/ctdc/tests/inventory b/ansible/roles/ctdc/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/ctdc/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/k9dc/tests/test.yml b/ansible/roles/ctdc/tests/test.yml similarity index 100% rename from ansible/roles/k9dc/tests/test.yml rename to ansible/roles/ctdc/tests/test.yml diff --git a/ansible/roles/ctdc/vars/main.yml b/ansible/roles/ctdc/vars/main.yml new file mode 100644 index 000000000..f4a74ecc7 --- /dev/null +++ b/ansible/roles/ctdc/vars/main.yml @@ -0,0 +1,15 @@ +--- +# vars file for ctdc +docker_home: /local/content/docker +ctdc_home: /local/content/ctdc +deployments: /local/content/canine-data +container_name: ctdc +app_name: "{{env}}-ctdc" +collector_name: "{{ env }}-ctdc" +newrelic_license_key: "{{ newrelic_key }}" +additional_logs: + - name: "{{ env }} ctdc Logs" + description: "{{ env }} ctdc logs" + category: "{{env }}/app/ctdc" + path: "/local/content/ctdc/logs/*.log" + filters: "" \ No newline at end of file diff --git a/ansible/roles/data-dictionary/.travis.yml b/ansible/roles/data-dictionary/.travis.yml new file mode 100644 index 000000000..36bbf6208 --- /dev/null +++ b/ansible/roles/data-dictionary/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/ansible/roles/data-dictionary/README.md b/ansible/roles/data-dictionary/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/data-dictionary/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/data-dictionary/defaults/main.yml b/ansible/roles/data-dictionary/defaults/main.yml new file mode 100644 index 000000000..cd63f0d9b --- /dev/null +++ b/ansible/roles/data-dictionary/defaults/main.yml @@ -0,0 +1,3 @@ +--- +# defaults file for cicd +prod: prod diff --git a/ansible/roles/data-dictionary/files/inject.template.js b/ansible/roles/data-dictionary/files/inject.template.js new file mode 100644 index 000000000..da7d90ee7 --- /dev/null +++ b/ansible/roles/data-dictionary/files/inject.template.js @@ -0,0 +1,4 @@ +window.injectedEnv = { + REACT_APP_MODEL_URL: '${REACT_APP_MODEL_URL}', + REACT_APP_MODEL_PROPS_URL: '${REACT_APP_MODEL_PROPS_URL}', +}; diff --git a/ansible/roles/data-dictionary/files/nginx-entrypoint.sh b/ansible/roles/data-dictionary/files/nginx-entrypoint.sh new file mode 100755 index 000000000..70875490e --- /dev/null +++ b/ansible/roles/data-dictionary/files/nginx-entrypoint.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +WWW_DIR=/usr/share/nginx/html +INJECT_FILE_SRC="${WWW_DIR}/inject.template.js" +INJECT_FILE_DST="${WWW_DIR}/injectEnv.js" +envsubst < "${INJECT_FILE_SRC}" > "${INJECT_FILE_DST}" + +[ -z "$@" ] && nginx -g 'daemon off;' || $@ \ No newline at end of file diff --git a/ansible/roles/data-dictionary/handlers/main.yml b/ansible/roles/data-dictionary/handlers/main.yml new file mode 100644 index 000000000..3c94d4172 --- /dev/null +++ b/ansible/roles/data-dictionary/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for cicd \ No newline at end of file diff --git a/ansible/roles/data-dictionary/meta/main.yml b/ansible/roles/data-dictionary/meta/main.yml new file mode 100644 index 000000000..227ad9c34 --- /dev/null +++ b/ansible/roles/data-dictionary/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.9 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/ansible/roles/data-dictionary/tasks/build.yml b/ansible/roles/data-dictionary/tasks/build.yml new file mode 100644 index 000000000..ef227c22e --- /dev/null +++ b/ansible/roles/data-dictionary/tasks/build.yml @@ -0,0 +1,53 @@ +--- + +- name: log into DockerHub + docker_login: + username: "{{docker_user}}" + password: "{{docker_password}}" + +- name: copy nginx conf + copy: + remote_src: yes + src: '{{workspace}}/icdc-devops/docker/dockerfiles/nginx.conf' + dest: '{{workspace}}/{{project}}/nginx.conf' + +- name: copy entrypoint.sh to workspace + copy: + src: "nginx-entrypoint.sh" + dest: "{{workspace}}/{{project}}/nginx-entrypoint.sh" + mode: 0755 + +- name: run npm install in {{workspace}}/{{project}} + command: "{{item}}" + args: + chdir: "{{workspace}}/{{project}}" + loop: + - npm install + - npm run build + +- name: copy env to dist + copy: + src: inject.template.js + dest: "{{workspace}}/{{project}}/dist/inject.template.js" + mode: 0755 + +- name: build cbiitssrepo/{{project | lower}} image + docker_image: + build: + path: "{{workspace}}/{{{project}}" + dockerfile: "{{workspace}}/icdc-devops/docker/dockerfiles/ccdc-frontend-dockerfile" + pull: yes + nocache: yes + name: cbiitssrepo/{{project | lower}} + tag: "{{frontend_version}}-{{build_number}}" + push: yes + force_source: yes + source: build + +- name: Add tag latest to cbiitssrepo/{{ project | lower }} image + docker_image: + name: "cbiitssrepo/{{ project | lower }}:{{frontend_version}}-{{build_number}}" + repository: cbiitssrepo/{{ project }}:latest + force_tag: yes + push: yes + source: local \ No newline at end of file diff --git a/ansible/roles/data-dictionary/tasks/deploy.yml b/ansible/roles/data-dictionary/tasks/deploy.yml new file mode 100644 index 000000000..dd0ff260b --- /dev/null +++ b/ansible/roles/data-dictionary/tasks/deploy.yml @@ -0,0 +1,105 @@ +--- + +- name: create task definition - {{project}} + ecs_taskdefinition: + containers: + - name: dictionary + essential: true + image: "cbiitssrepo/{{project | lower}:{{frontend_version}}" + environment: + - name: REACT_APP_MODEL_URL + value: "https://raw.githubusercontent.com/CBIIT/icdc-model-tool/master/model-desc/icdc-model.yml" + - name: REACT_APP_MODEL_PROPS_URL + value: "https://raw.githubusercontent.com/CBIIT/icdc-model-tool/master/model-desc/icdc-model-props.yml" + - name: NEW_RELIC_LICENSE_KEY + value: "{{ newrelic_license_key }}" + - name: NEW_RELIC_APP_NAME + value: "{{ app_name }}-frontend" + - name: NEW_RELIC_DISTRIBUTED_TRACING_ENABLED + value: true + - name: NEW_RELIC_HOST + value: "gov-collector.newrelic.com" + - name: NEW_RELIC_NO_CONFIG_FILE + value: true + portMappings: + - containerPort: "81" + hostPort: "81" + logConfiguration: + logDriver: syslog + options: + syslog-address: tcp://{{ syslog_host }}:514 + tag: "{{ app_name }}-dictionary" + syslog-format: "rfc5424micro" + network_mode: bridge + family: "{{stack_name }}-{{tier}}-dictionary" + state: present + memory: '512' + cpu: '128' + region: "{{region}}" + register: task_output + + +############################################################################################################################ + +# Task Definition Queries + +############################################################################################################################ + +- name: query task definition - {{project | lower}} + ecs_taskdefinition_info: + task_definition: "{{stack_name }}-{{tier}}-dictionary" + region: "{{region}}" + register: task_dictionary + + + + +############################################################################################################################ + +# Service Queries + +############################################################################################################################ + + +- name: query ecs service + ecs_service_info: + cluster: "{{stack_name }}-{{tier}}" + service: "{{stack_name }}-{{tier}}-dictionary" + details: true + region: "{{region}}" + register: service_dictionary + + +############################################################################################################################ + +- name: set facts + set_fact: + dictionary_url: "{% if tier == 'prod' %}https://{{stack_name}}.bento-tools.org/{% else %}https://{{stack_name}}-{{ tier }}.bento-tools.org/{% endif %}" + dictionary_revision: "{{task_dictionary.revision}}" + task_dictionary_name: "{{task_dictionary.family}}" + lb_frontend: "{{service_frontend.services[0].loadBalancers}}" + # lb_backend: "{{service_backend.services[0].loadBalancers}}" + role_arn: "{{service_backend.services[0].roleArn}}" + + +############################################################################################################################ + +# Update Services + +############################################################################################################################ + +- name: update frontend service + ecs_service: + state: present + name: "{{stack_name }}-{{tier}}-frontend" + cluster: "{{stack_name }}-{{tier}}" + task_definition: "{{task_frontend_name}}:{{frontend_revision}}" + role: "{{role_arn}}" + force_new_deployment: yes + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + load_balancers: "{{ lb_frontend }}" + region: "{{region}}" + register: service_frontend_output diff --git a/ansible/roles/data-dictionary/tasks/main.yml b/ansible/roles/data-dictionary/tasks/main.yml new file mode 100644 index 000000000..e69de29bb diff --git a/ansible/roles/data-dictionary/tasks/redis.yml b/ansible/roles/data-dictionary/tasks/redis.yml new file mode 100644 index 000000000..46787e625 --- /dev/null +++ b/ansible/roles/data-dictionary/tasks/redis.yml @@ -0,0 +1,5 @@ +- name: confirm redis redis_host + debug: + msg: "{{redis_host[tier]}}" +- name: flush redis cache + shell: echo -e "get abc \nFLUSHALL ASYNC" | redis-cli -h {{ redis_host[tier]}} -p 6379 -c diff --git a/ansible/roles/data-dictionary/templates/env.j2 b/ansible/roles/data-dictionary/templates/env.j2 new file mode 100644 index 000000000..17c24f011 --- /dev/null +++ b/ansible/roles/data-dictionary/templates/env.j2 @@ -0,0 +1,9 @@ +{% if tier == "prod" %} +REACT_APP_BACKEND_API=https://api.bento-tools.org/v1/graphql/ +REACT_APP_APPLICATION_VERSION={{version}} +REACT_APP_ABOUT_CONTENT_URL=https://raw.githubusercontent.com/CBIIT/bento-frontend/master/src/content/{{tier}}/aboutPagesContent.yaml +{% else %} +REACT_APP_BACKEND_API=https://api-{{tier}}.bento-tools.org/v1/graphql/ +REACT_APP_APPLICATION_VERSION={{version}} +REACT_APP_ABOUT_CONTENT_URL=https://raw.githubusercontent.com/CBIIT/bento-frontend/master/src/content/{{tier}}/aboutPagesContent.yaml +{% endif %} \ No newline at end of file diff --git a/ansible/roles/data-dictionary/templates/nginx-config.yml.j2 b/ansible/roles/data-dictionary/templates/nginx-config.yml.j2 new file mode 100644 index 000000000..848a0dfb3 --- /dev/null +++ b/ansible/roles/data-dictionary/templates/nginx-config.yml.j2 @@ -0,0 +1,5 @@ +integrations: + - name: nri-nginx + env: + REMOTE_MONITORING: true + METRICS: 1 \ No newline at end of file diff --git a/ansible/roles/data-dictionary/tests/inventory b/ansible/roles/data-dictionary/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/data-dictionary/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/data-dictionary/tests/test.yml b/ansible/roles/data-dictionary/tests/test.yml new file mode 100644 index 000000000..c9b79015b --- /dev/null +++ b/ansible/roles/data-dictionary/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - cicd \ No newline at end of file diff --git a/ansible/roles/data-dictionary/vars/main.yml b/ansible/roles/data-dictionary/vars/main.yml new file mode 100644 index 000000000..737c45f27 --- /dev/null +++ b/ansible/roles/data-dictionary/vars/main.yml @@ -0,0 +1,17 @@ +--- +# vars file for cicd +stack_name: ccdc +platform: aws +tier: "{{ lookup('env','TIER') }}" +workspace: "{{ lookup('env','WORKSPACE') }}" +docker_user: "{{ lookup('env','DOCKER_USER') }}" +docker_password: "{{ lookup('env','DOCKER_PASSWORD') }}" +build_number: "{{ lookup('env','BUILD_NUMBER')}}" +newrelic_license_key: "{{ lookup('aws_ssm', 'newrelic_license_key', region='us-east-1' ) }}" +sumo_access_id: "{{ lookup('aws_ssm', 'sumo_access_id', region='us-east-1' ) }}" +sumo_access_key: "{{ lookup('aws_ssm', 'sumo_access_key', region='us-east-1' ) }}" +syslog_host: "{{ lookup('aws_ssm', 'syslog_host', region='us-east-1' ) }}" +app_name: "{{stack_name}}-{{platform}}-{{tier}}" +project: visualiser-standalone +frontend_version: "{{ lookup('env','FE_VERSION') }}" +backend_version: "{{ lookup('env','BE_VERSION') }}" \ No newline at end of file diff --git a/ansible/roles/data-processing/tasks/bento-ctdc-data-loader.yml b/ansible/roles/data-processing/tasks/bento-ctdc-data-loader.yml new file mode 100644 index 000000000..41fccf6e5 --- /dev/null +++ b/ansible/roles/data-processing/tasks/bento-ctdc-data-loader.yml @@ -0,0 +1,73 @@ +--- + +- name: set model files location for icdc + set_fact: + model_file1: "{{workspace}}/icdc-model/model-desc/icdc-model.yml" + model_file2: "{{workspace}}/icdc-model/model-desc/icdc-model-props.yml" + property_file: "config/props-icdc-pmvp.yml" + when: project == "icdc" + +- name: set model files location for bento + set_fact: + model_file1: "{{workspace}}/bento-model/model-desc/bento_tailorx_model_file.yaml" + model_file2: "{{workspace}}/bento-model/model-desc/bento_tailorx_model_properties.yaml" + property_file: "config/props-bento-ext.yml" + when: project == "bento" + +- name: set model files location for ctdc + set_fact: + model_file1: "{{workspace}}/ctdc-model/model-desc/ctdc_model_file.yaml" + model_file2: "{{workspace}}/ctdc-model/model-desc/ctdc_model_properties_file.yaml" + property_file: "config/props-ctdc.yml" + when: project == "ctdc" + +- name: Check if tmp directory exist + stat: + path: "{{workspace}}/tmp" + register: stat_result + +- name: Check if tmp {{s3_folder}} exist + stat: + path: "{{workspace}}/{{s3_folder}}" + register: s3folder_result + +- name: remove the tmp if exists + file: + path: "{{workspace}}/tmp" + state: absent + when: stat_result.stat.exists + +- name: remove the {{s3_folder}} if exists + file: + path: "{{workspace}}/{{s3_folder}}" + state: absent + when: stat_result.stat.exists + +- name: update settings + template: + dest: "{{workspace}}/config/config.yml" + src: "{{workspace}}/config/config.yml.j2" + +- name: install python3 + yum: + name: python3 + state: installed + +- name: pip install requirements + pip: + requirements: "{{workspace}}/requirements.txt" + executable: pip3 + + +- name: loader data + shell: + cmd: > + python3 + loader.py + {{workspace}}/config/config.yml + chdir: "{{workspace}}" + register: data_loader + +- name: show dataloader output + debug: + msg: "{{data_loader}}" diff --git a/ansible/roles/data-processing/tasks/bento-data-loader.yml b/ansible/roles/data-processing/tasks/bento-data-loader.yml new file mode 100644 index 000000000..90354df00 --- /dev/null +++ b/ansible/roles/data-processing/tasks/bento-data-loader.yml @@ -0,0 +1,125 @@ +--- +- name: gather instance facts + ec2_instance_facts: + region: us-east-1 + filters: + "tag:Name": "{{project}}-{{tier}}-neo4j-4" + "instance-state-name": running + "tag:Environment": "{{tier}}" + register: neo4j + +- name: set instance name + set_fact: + neo4j_ip: "{{ neo4j.instances[0].network_interfaces[0].private_ip_address }}" + +- name: show db + debug: + msg: "{{neo4j_ip}}" + +- name: set model files location for icdc + set_fact: + model_file1: "{{workspace}}/icdc-model/model-desc/icdc-model.yml" + model_file2: "{{workspace}}/icdc-model/model-desc/icdc-model-props.yml" + property_file: "config/props-icdc-pmvp.yml" + when: project == "icdc" + +- name: set model files location for bento + set_fact: + model_file1: "{{workspace}}/bento-model/model-desc/bento_tailorx_model_file.yaml" + model_file2: "{{workspace}}/bento-model/model-desc/bento_tailorx_model_properties.yaml" + property_file: "config/props-bento-ext.yml" + when: project == "bento" + +- name: set model files location for github + set_fact: + model_file1: "{{workspace}}/bento-model/model-desc/bento_tailorx_model_file.yaml" + model_file2: "{{workspace}}/bento-model/model-desc/bento_tailorx_model_properties.yaml" + property_file: "config/props-bento-ext.yml" + when: project == "github-actions" + +- name: set model files location for ctdc + set_fact: + model_file1: "{{workspace}}/ctdc-model/model-desc/ctdc_model_file.yaml" + model_file2: "{{workspace}}/ctdc-model/model-desc/ctdc_model_properties_file.yaml" + property_file: "config/props-ctdc.yml" + when: project == "ctdc" + +- name: set model files location for gmb + set_fact: + model_file1: "{{workspace}}/gmb-model/model-desc/000048_Model.yml" + model_file2: "{{workspace}}/gmb-model/model-desc/000048_Model_Props.yml" + property_file: "config/props-gmb-mock.yml" + when: project == "gmb" + + +- name: set model files location for pcdc + set_fact: + model_file1: "{{workspace}}/c3dc-model/model-desc/c3dc_Model.yml" + model_file2: "{{workspace}}/c3dc-model/model-desc/c3dc_Model_Props.yml" + property_file: "config/props-pcdc-mock.yml" + when: project == "c3dc" + +- name: set model files location for ccdi + set_fact: + model_file1: "{{workspace}}/ccdi-model/model-desc/ccdi_portal_model_file.yaml" + model_file2: "{{workspace}}/ccdi-model/model-desc/ccdi_portal_model_properties.yaml" + property_file: "{{workspace}}/ccdi-model/model-desc/props-ccdi-portal.yml" + when: project == "ccdi" + +- name: set model files location for ins + set_fact: + model_file1: "{{workspace}}/ins-model/data_loading/model-desc/ins_model_file.yaml" + model_file2: "{{workspace}}/ins-model/data_loading/model-desc/ins_model_properties.yaml" + property_file: "{{workspace}}/ins-model/data_loading/model-desc/props-ins.yml" + when: project == "ins" + +- name: Check if tmp directory exist + stat: + path: "{{workspace}}/tmp" + register: stat_result + +- name: Check if tmp {{s3_folder}} exist + stat: + path: "{{workspace}}/{{s3_folder}}" + register: s3folder_result + +- name: remove the tmp if exists + file: + path: "{{workspace}}/tmp" + state: absent + when: stat_result.stat.exists + +- name: remove the {{s3_folder}} if exists + file: + path: "{{workspace}}/{{s3_folder}}" + state: absent + when: stat_result.stat.exists + +- name: update settings + template: + dest: "{{workspace}}/config/config.yml" + src: "{{workspace}}/config/config.yml.j2" + +- name: install python3 + yum: + name: python3 + state: installed + +- name: pip install requirements + pip: + requirements: "{{workspace}}/requirements.txt" + executable: pip3 + + +- name: loader data + shell: + cmd: > + python3 + loader.py + {{workspace}}/config/config.yml + chdir: "{{workspace}}" + register: data_loader + +- name: show dataloader output + debug: + msg: "{{data_loader}}" diff --git a/ansible/roles/data-processing/tasks/bento-es-loader.yml b/ansible/roles/data-processing/tasks/bento-es-loader.yml new file mode 100644 index 000000000..33b463f64 --- /dev/null +++ b/ansible/roles/data-processing/tasks/bento-es-loader.yml @@ -0,0 +1,55 @@ +--- +- name: gather instance facts + ec2_instance_facts: + region: us-east-1 + filters: + "tag:Name": "{{project}}-{{tier}}-neo4j-4" + "instance-state-name": running + "tag:Environment": "{{tier}}" + register: neo4j + +- name: set instance name + set_fact: + neo4j_ip: "{{ neo4j.instances[0].network_interfaces[0].private_ip_address }}" + +- name: set model files location for bento + set_fact: + model_file1: "{{workspace}}/bento-model/model-desc/bento_tailorx_model_file.yaml" + model_file2: "{{workspace}}/bento-model/model-desc/bento_tailorx_model_properties.yaml" + property_file: "config/props-bento-ext.yml" + +- name: set about_file location for bento + set_fact: + about_file: "{{workspace}}/bento-frontend/src/content/{{ tier }}/aboutPagesContent.yaml" + +- name: update indices file + template: + dest: "{{workspace}}/config/es_indices_bento.yml" + src: "{{workspace}}/bento-backend/src/main/resources/yaml/es_indices_bento.yml" + +- name: update server config file + template: + dest: "{{workspace}}/config/es_loader_bento.yml" + src: "{{workspace}}/config/es_loader.yml.j2" + +- name: install python3 + yum: + name: python3 + state: installed + +- name: pip install requirements + pip: + requirements: "{{workspace}}/requirements.txt" + executable: pip3 + + +- name: load data + shell: + cmd: > + python3 es_loader.py config/es_indices_bento.yml config/es_loader_bento.yml + chdir: "{{workspace}}" + register: data_loader + +- name: show dataloader output + debug: + msg: "{{data_loader}}" diff --git a/ansible/roles/data-processing/tasks/ctdc-data-loader.yml b/ansible/roles/data-processing/tasks/ctdc-data-loader.yml new file mode 100644 index 000000000..b3663b79c --- /dev/null +++ b/ansible/roles/data-processing/tasks/ctdc-data-loader.yml @@ -0,0 +1,66 @@ +--- +- name: show project + debug: + msg: "{{project}}{{s3_folder}}{{wipe_db}}{{cheat_mode}}" + +- name: set model files location for ctdc + set_fact: + model_file1: "{{workspace}}/ctdc-model/model-desc/ctdc_model_file.yaml" + model_file2: "{{workspace}}/ctdc-model/model-desc/ctdc_model_properties_file.yaml" + property_file: "config/props-ctdc.yml" + +- name: Check if tmp directory exist + stat: + path: "{{workspace}}/tmp" + register: stat_result + +- name: Check if tmp {{s3_folder}} exist + stat: + path: "{{workspace}}/{{s3_folder}}" + register: s3folder_result + +- name: remove the tmp if exists + file: + path: "{{workspace}}/tmp" + state: absent + when: stat_result.stat.exists + +- name: remove the {{s3_folder}} if exists + file: + path: "{{workspace}}/{{s3_folder}}" + state: absent + when: stat_result.stat.exists + +- name: update settings + template: + dest: "{{workspace}}/config/config.yml" + src: "{{workspace}}/config/config.yml.j2" + +- name: install python3 + yum: + name: python3 + state: installed + +- name: pip install requirements + pip: + requirements: "{{workspace}}/requirements.txt" + executable: pip3 + +- name: pip install requests + pip: + name: urllib3 + executable: pip3 + state: latest + +- name: loader data + shell: + cmd: > + python3 + loader.py + {{workspace}}/config/config.yml + chdir: "{{workspace}}" + register: data_loader + +- name: show dataloader output + debug: + msg: "{{data_loader}}" diff --git a/ansible/roles/data-processing/tasks/ctdc-file-loader.yml b/ansible/roles/data-processing/tasks/ctdc-file-loader.yml new file mode 100644 index 000000000..d2b0b2f2a --- /dev/null +++ b/ansible/roles/data-processing/tasks/ctdc-file-loader.yml @@ -0,0 +1,52 @@ + +- name: Check if tmp directory exist + stat: + path: "{{workspace}}/tmp" + register: stat_result + +- name: remove the tmp if exists + file: + path: "{{workspace}}/tmp" + state: absent + when: stat_result.stat.exists == True + +- name: set facts + set_fact: + arm_id: "{{ arm_Id.split(',') }}" + phs_id: "{{phs_Id.split(',')}}" + bucket_name: "{{bucket_Name.split(',')}}" + + +# - name: create dict from arms_id and phps_id +# set_fact: +# match_Ids: "{{ dict(arm_Id.split(',') | zip(phs_Id.split(','))|zip(bucket_name.split(','))) }}" + +- name: copy config.json.j2 config.json + template: + remote_src: yes + src: "{{workspace}}/config/config.json.j2" + dest: "{{workspace}}/config/config.json" + +- name: get file content + include_vars: + file: "{{workspace}}/config/config.json" + name: json_file + +- name: show file + debug: + msg: "{{ json_file }}" + +- name: execute file loader script + shell: + cmd: > + /usr/bin/python3 + matchMain.py + {{workspace}}/config/config.json + chdir: "{{workspace}}" + register: file_loader + + +- name: show file_loader output + debug: + msg: "{{file_loader}}" + diff --git a/ansible/roles/data-processing/tasks/ctdc-file-validator.yml b/ansible/roles/data-processing/tasks/ctdc-file-validator.yml new file mode 100644 index 000000000..1d5168b22 --- /dev/null +++ b/ansible/roles/data-processing/tasks/ctdc-file-validator.yml @@ -0,0 +1,71 @@ + +#- name: Check if tmp directory exist +# stat: +# path: "{{workspace}}/tmp" +# register: stat_result + +#- name: remove the tmp if exists +# file: +# path: "{{workspace}}/tmp" +# state: absent +# when: stat_result.stat.exists == True + +- name: set facts + set_fact: + arm_id: "{{ arm_Id.split(',') }}" + phs_id: "{{phs_Id.split(',')}}" + bucket_name: "{{bucket_Name.split(',')}}" + + +# - name: create dict from arms_id and phps_id +# set_fact: +# match_Ids: "{{ dict(arm_Id.split(',') | zip(phs_Id.split(','))|zip(bucket_name.split(','))) }}" + +- name: copy config.json.j2 config.json + template: + remote_src: yes + src: "{{workspace}}/config/config.json.j2" + dest: "{{workspace}}/config/config.json" + +#- name: get config file content +# include_vars: +# file: "{{workspace}}/config/config.json" +# name: json_file + +#- name: show config file +# debug: +# msg: "{{ json_file }}" + +- name: execute metadata validator script + shell: + cmd: > + /usr/bin/python3 + match_metadata_validator.py + --config-file {{workspace}}/config/config.json + chdir: "{{workspace}}" + register: metadata_validator_out + +- name: show metadata validator output + debug: + msg: "{{metadata_validator_out}}" + +- name: get manifest file from S3 + aws_s3: + bucket: "{{s3_bucket}}" + object: "/{{manifest_file}}" + dest: "{{workspace}}/manifest.tsv" + mode: get + +- name: execute manifest validator script + shell: + cmd: > + /usr/bin/python3 + match_manifest_validator.py + --config-file {{workspace}}/config/config.json + {{workspace}}/manifest.tsv + chdir: "{{workspace}}" + register: manifest_validator_out + +- name: show manifest validator output + debug: + msg: "{{manifest_validator_out}}" \ No newline at end of file diff --git a/ansible/roles/data-processing/tasks/data-loader.yml b/ansible/roles/data-processing/tasks/data-loader.yml new file mode 100644 index 000000000..89c909f1f --- /dev/null +++ b/ansible/roles/data-processing/tasks/data-loader.yml @@ -0,0 +1,76 @@ + +- name: Check if tmp directory exist + stat: + path: "{{workspace}}/tmp" + register: stat_result + +- name: remove the tmp if exists + file: + path: "{{workspace}}/tmp" + state: absent + when: stat_result.stat.exists == True + +# - name: list the content of {{ data_bucket }} +# aws_s3: +# bucket: "{{data_bucket}}" +# prefix: "{{s3_folder}}" +# mode: list +# marker: "{{s3_folder}}" +# register: s3_content + +# - name: download all the files from {{s3_folder}} +# aws_s3: +# bucket: "{{data_bucket}}" +# object: "{{ item }}" +# dest: "{{ workspace }}/{{ item | basename }}" +# mode: get +# loop: "{{ s3_content.s3_keys }}" + +- name: copy config.sample.ini config.ini + copy: + remote_src: yes + src: "{{workspace}}/config/config.example.ini" + dest: "{{workspace}}/config/config.ini" + +- name: execute dataloader script + shell: + cmd: > + /usr/bin/python3 + loader.py -i bolt://{{ neo4j_ip }}:7687 + --config-file config/config.ini + --prop-file config/props-icdc.yml + -p {{ neo4j_password }} + -s tests/data/icdc-model.yml + -s tests/data/icdc-model-props.yml + -c -b {{ data_bucket }} + -f {{ s3_folder }} tmp + chdir: "{{workspace}}" + register: data_loader + when: wipe_db == "no" + +- name: show dataloader output + debug: + msg: "{{data_loader}}" + +- name: wipe out database then execute dataloader script + shell: + cmd: > + /usr/bin/python3 + loader.py -i bolt://{{ neo4j_ip }}:7687 + --config-file config/config.ini + --prop-file config/props-icdc.yml + -p {{ neo4j_password }} + -s tests/data/icdc-model.yml + -s tests/data/icdc-model-props.yml + -c -b {{ data_bucket }} + -f {{ s3_folder }} tmp + --wipe-db + --yes + chdir: "{{workspace}}" + register: data_loader + when: wipe_db == "yes" + +- name: show dataloader output + debug: + msg: "{{data_loader}}" + diff --git a/ansible/roles/data-processing/tasks/db-backup.yml b/ansible/roles/data-processing/tasks/db-backup.yml new file mode 100644 index 000000000..e69de29bb diff --git a/ansible/roles/data-processing/tasks/icdc-data-dump-push.yml b/ansible/roles/data-processing/tasks/icdc-data-dump-push.yml new file mode 100644 index 000000000..d40e3cee9 --- /dev/null +++ b/ansible/roles/data-processing/tasks/icdc-data-dump-push.yml @@ -0,0 +1,19 @@ +--- +# tasks file for neo4j-dump + +- name: ensure that remote workspace exists + file: + path: "{{ remote_workspace }}-{{ tier }}" + state: directory + +- name: verify file is present + stat: + path: "{{ remote_workspace }}-{{ tier }}/{{ dump_file_name }}.dump" + register: file_status + +- name: upload dump file to s3 + aws_s3: + bucket: "{{ s3_bucket_name }}" + object: "/{{ s3_folder_name }}/{{ dump_file_name }}-{{ timestamp }}.dump" + src: "{{ remote_workspace }}-{{ tier }}/{{ dump_file_name }}.dump" + mode: put \ No newline at end of file diff --git a/ansible/roles/data-processing/tasks/icdc-data-dump.yml b/ansible/roles/data-processing/tasks/icdc-data-dump.yml new file mode 100644 index 000000000..922b3edd3 --- /dev/null +++ b/ansible/roles/data-processing/tasks/icdc-data-dump.yml @@ -0,0 +1,37 @@ +--- +# tasks file for neo4j-loader + +- name: ensure backup directory exists + file: + path: "{{ backup_directory }}" + state: directory + owner: neo4j + group: neo4j + +- name: Remove old backup files if found + file: path="{{ backup_directory }}/{{ dump_file_name }}.dump" state=absent + +- name: stop neo4j + service: + name: neo4j + state: stopped + +- name: backup neo4j db + command: "neo4j-admin dump --database=neo4j --to={{ backup_directory }}/{{ dump_file_name }}.dump" + become_user: neo4j + +- name: verify file is present + stat: + path: "{{ backup_directory }}/{{ dump_file_name }}.dump" + register: file_status + +- name: start neo4j + service: + name: neo4j + state: started + +- name: ansible copy file from remote to local + fetch: + src: "{{ backup_directory }}/{{ dump_file_name }}.dump" + dest: "{{ remote_workspace }}-{{ tier }}/" + flat: yes \ No newline at end of file diff --git a/ansible/roles/data-processing/tasks/icdc-data-loader.yml b/ansible/roles/data-processing/tasks/icdc-data-loader.yml new file mode 100644 index 000000000..2b538f042 --- /dev/null +++ b/ansible/roles/data-processing/tasks/icdc-data-loader.yml @@ -0,0 +1,82 @@ +--- +- name: show project + debug: + msg: "{{project}}{{s3_folder}}{{wipe_db}}{{cheat_mode}}" + +- name: set model files location for icdc + set_fact: + model_file1: "{{workspace}}/icdc-model/model-desc/icdc-model.yml" + model_file2: "{{workspace}}/icdc-model/model-desc/icdc-model-props.yml" + #property_file: "{{workspace}}/config/props-icdc-pmvp.yml" + property_file: "config/props-icdc-pmvp.yml" + when: project == "icdc" + +# - name: set model files location for bento +# set_fact: +# model_file1: "{{workspace}}/bento-model/model-desc/bento_model_file.yaml" +# model_file2: "{{workspace}}/bento-model/model-desc/bento_model_properties_file.yaml" +# property_file: "config/props-bento-ext.yml" +# when: project == "bento" + +# - name: set model files location for ctdc +# set_fact: +# model_file1: "{{workspace}}/ctdc-model/model-desc/ctdc_model_file.yaml" +# model_file2: "{{workspace}}/ctdc-model/model-desc/ctdc_model_properties_file.yaml" +# property_file: "config/props-ctdc.yml" +# when: project == "ctdc" + +- name: Check if tmp directory exist + stat: + path: "{{workspace}}/tmp" + register: stat_result + +- name: Check if tmp {{s3_folder}} exist + stat: + path: "{{workspace}}/{{s3_folder}}" + register: s3folder_result + +- name: remove the tmp if exists + file: + path: "{{workspace}}/tmp" + state: absent + when: stat_result.stat.exists + +- name: remove the {{s3_folder}} if exists + file: + path: "{{workspace}}/{{s3_folder}}" + state: absent + when: stat_result.stat.exists + +- name: update settings + template: + dest: "{{workspace}}/config/config.yml" + src: "{{workspace}}/config/config.yml.j2" + +- name: install python3 + yum: + name: python3 + state: installed + +- name: pip install requirements + pip: + requirements: "{{workspace}}/requirements.txt" + executable: pip3 + +- name: pip install requests + pip: + name: urllib3 + executable: pip3 + state: latest + +- name: loader data + shell: + cmd: > + python3 + loader.py + {{workspace}}/config/config.yml + chdir: "{{workspace}}" + register: data_loader + +- name: show dataloader output + debug: + msg: "{{data_loader}}" diff --git a/ansible/roles/data-processing/tasks/schema.yml b/ansible/roles/data-processing/tasks/schema.yml new file mode 100644 index 000000000..8cd156178 --- /dev/null +++ b/ansible/roles/data-processing/tasks/schema.yml @@ -0,0 +1,18 @@ +- name: clone bento-custodian + git: + repo: 'https://github.com/CBIIT/bento-backend' + dest: "{{workspace}}/bento-backend" + +- name: post schemas + uri: + url: http://{{neo4j_ip}}:7474/graphql/idl/ + method: POST + body: "{{ lookup('file','{{workspace}}/bento-backend/src/main/resources/graphql/bento-extended.graphql') }}" + headers: + Accept: "application/json" + Authorization: "{{bearer}}" + register: schema + +- name: schema output + debug: + msg: "{{schema}}" \ No newline at end of file diff --git a/ansible/roles/data-processing/templates/icdc-config.yml.j2 b/ansible/roles/data-processing/templates/icdc-config.yml.j2 new file mode 100644 index 000000000..0a1e96ea6 --- /dev/null +++ b/ansible/roles/data-processing/templates/icdc-config.yml.j2 @@ -0,0 +1,42 @@ +Config: + temp_folder: tmp + backup_folder: /tmp/data-loader-backups + + neo4j: + # Location of Neo4j server, e.g., bolt://127.0.0.1:7687 + uri: bolt://{{neo4j_ip}}:7687 + # Neo4j username + user: neo4j + # Neo4j password + password: {{neo4j_password}} + + # Schema files' locations + schema: + - {{workspace}}/{{model_repo}}/model-desc/{{model_file1}} + - {{workspace}}/{{model_repo}}/model-desc/{{model_file2}} + + #Property file location + prop_file: {{workspace}}/config/{{property_file}} + + # Skip validations, aka. Cheat Mode + cheat_mode: {{cheat_mode}} + # Validations only, skip loading + dry_run: false + # Wipe out database before loading, you'll lose all data! + wipe_db: {{wipe_db}} + # Skip backup step + no_backup: false + # Automatically confirm deletion and database wiping (without asking user to confirm) + no_confirmation: true + # Max violations to display, default is 10 + max_violations: 10 + no_parents: false + + # S3 bucket name, if you are loading from an S3 bucket + s3_bucket: {{data_bucket}} + # S3 folder for dataset + s3_folder: {{s3_folder}} + # Loading mode, can be UPSERT_MODE, NEW_MODE or DELETE_MODE, default is UPSERT_MODE + loading_mode: UPSERT_MODE + # Location of dataset + dataset: "{{s3_folder}}" diff --git a/ansible/roles/data-processing/vars/icdc_data_dump.yml b/ansible/roles/data-processing/vars/icdc_data_dump.yml new file mode 100644 index 000000000..c494c306e --- /dev/null +++ b/ansible/roles/data-processing/vars/icdc_data_dump.yml @@ -0,0 +1,14 @@ +--- +# vars file for neo4j-loader +backup_directory: /backups +remote_workspace: /tmp/neo4j +s3_bucket_name: 'nci-cbiit-caninedatacommons-dev' +dump_file_name: "{{ lookup('env','DUMP_FILE') }}" +s3_folder_name: 'dump_files' +timestamp: "{{ lookup('pipe','date +%Y-%m-%d-%H-%M-%S') }}" + +tier: "{{ lookup('env','TIER') }}" +region: us-east-1 + +neo4j_db_name: neo4j +neo4j_admin_cmd: 'neo4j-admin' \ No newline at end of file diff --git a/ansible/roles/data-processing/vars/main.yml b/ansible/roles/data-processing/vars/main.yml index 21dbd126c..88c7dfdcc 100644 --- a/ansible/roles/data-processing/vars/main.yml +++ b/ansible/roles/data-processing/vars/main.yml @@ -1,2 +1,46 @@ --- -# vars file for data-processing \ No newline at end of file +# vars file for data loading +neo4j_password: "{{ lookup('env','NEO4J_PASSWORD') }}" +neo4j_user: neo4j +tier: "{{ lookup('env','TIER') }}" +workspace: "{{ lookup('env','WORKSPACE') }}" +neo4j_ip: "{{ lookup('env','NEO4J_IP') }}" +s3_folder: "{{ lookup('env','S3_FOLDER') }}" +wipe_db: "{{ lookup('env','WIPE_DB') }}" +cheat_mode: "{{ lookup('env','CHEAT_MODE')}}" +split_transactions: "{{ lookup('env','SPLIT')}}" +project: "{{lookup('env','PROJECT')}}" +region: us-east-1 +#vars for match files +match_base_url: "{{ lookup('env','MATCH_BASE_URL') }}" +okta_url: https://bioappdev.okta.com/oauth2/{{okta_key}}/v1/token +bucket_Name: "{{ lookup('env','S3_BUCKET') }}" +arm_Id: "{{ lookup('env','ARM_ID') }}" +phs_Id: "{{ lookup('env','PHS_ID') }}" +okta_key: "{{ lookup('env','OKTA_KEY') }}" +secret_name: "{{ lookup('env','SECRET_NAME') }}" +meta_data_path: "{{ lookup('env','META_DATA_PATH') }}" +#meta_data_bucket: "{{ lookup('env','META_DATA_BUCKET') }}" +meta_data_bucket: sample-meta-data-bucket +use_prod: "{{ lookup('env','USE_PROD') }}" +cipher_key: "{{ lookup('env','CIPHER_KEY') }}" + +#vars for data validation +ctdc_base_url: "https://trialcommons-dev.cancer.gov" +s3_bucket: "{{ lookup('env','S3_BUCKET') }}" +manifest_file: "{{ lookup('env','MANIFEST_FILE') }}" + +data_bucket: "{{bucket[project]}}" +project: "{{ lookup('env','PROJECT') }}" +bucket: + bento: bento-metadata-dev + ccdi: ccdi-metadata-dev + ins: ins-metadata-dev + gmb: bento-gmb-metadata + ctdc: bento-ctdc-metadata + github-actions: bento-metadata-dev + c3dc: bento-c3dc-metadata + +# vars for elasticsearch loader +es_host: "{{ lookup('env','ES_HOST') }}" +frontend_branch: "{{ lookup('env','FRONTEND_BRANCH') }}" \ No newline at end of file diff --git a/ansible/roles/docker-as-service/.travis.yml b/ansible/roles/docker-as-service/.travis.yml new file mode 100644 index 000000000..36bbf6208 --- /dev/null +++ b/ansible/roles/docker-as-service/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/ansible/roles/docker-as-service/README.md b/ansible/roles/docker-as-service/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/docker-as-service/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/docker-as-service/defaults/main.yml b/ansible/roles/docker-as-service/defaults/main.yml new file mode 100644 index 000000000..a16b69dec --- /dev/null +++ b/ansible/roles/docker-as-service/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for docker-as-service \ No newline at end of file diff --git a/ansible/roles/docker-as-service/files/docker-container@app.service b/ansible/roles/docker-as-service/files/docker-container@app.service new file mode 100644 index 000000000..dc9924102 --- /dev/null +++ b/ansible/roles/docker-as-service/files/docker-container@app.service @@ -0,0 +1,15 @@ +[Unit] +Description=Docker Container %I +Requires=docker.service +After=cloud-final.service + +[Service] +Restart=always +ExecStartPre=-/usr/bin/docker rm -f %i +ExecStart=/usr/bin/docker run --name %i \ +--restart=on-failure:10 \ +amazon/amazon-ecs-agent:latest +ExecStop=/usr/bin/docker stop %i + +[Install] +WantedBy=default.target \ No newline at end of file diff --git a/ansible/roles/docker-as-service/handlers/main.yml b/ansible/roles/docker-as-service/handlers/main.yml new file mode 100644 index 000000000..0afb3b7a8 --- /dev/null +++ b/ansible/roles/docker-as-service/handlers/main.yml @@ -0,0 +1,5 @@ +--- +# handlers file for docker-as-service +- name: systemctl daemon-reload + systemd: + daemon_reload: true \ No newline at end of file diff --git a/ansible/roles/docker-as-service/meta/main.yml b/ansible/roles/docker-as-service/meta/main.yml new file mode 100644 index 000000000..227ad9c34 --- /dev/null +++ b/ansible/roles/docker-as-service/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.9 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/ansible/roles/docker-as-service/tasks/compose.yml b/ansible/roles/docker-as-service/tasks/compose.yml new file mode 100644 index 000000000..94f0ad522 --- /dev/null +++ b/ansible/roles/docker-as-service/tasks/compose.yml @@ -0,0 +1,53 @@ +- name: Create /etc/docker/compose directory + file: + name: /etc/docker/compose + state: directory + +- name: Install docker-compose@.service + copy: + dest: /etc/systemd/system/docker-compose@.service + content: | + # This file is managed by Ansible. + # See https://gist.github.com/mosquito/b23e1c1e5723a7fd9e6568e5cf91180f + # for source and motivations. + [Unit] + Description=%i service with docker compose + Requires=docker.service + After=docker.service + + [Service] + WorkingDirectory=/etc/docker/compose/%i + + # Remove old containers, images and volumes + ExecStartPre=/usr/bin/docker-compose down -v + ExecStartPre=/usr/bin/docker-compose rm -fv + ExecStartPre=-/bin/bash -c 'docker volume ls -qf "name=%i_" | xargs docker volume rm' + ExecStartPre=-/bin/bash -c 'docker network ls -qf "name=%i_" | xargs docker network rm' + ExecStartPre=-/bin/bash -c 'docker ps -aqf "name=%i_*" | xargs docker rm' + + # Compose up + ExecStart=/usr/bin/docker-compose up + + # Compose down, remove containers and volumes + ExecStop=/usr/bin/docker-compose down -v + + [Install] + WantedBy=multi-user.target + notify: systemctl daemon-reload + +- name: Create the {{app_name}} configuration directory + file: + name: /etc/{{app_name}} + state: directory + +- name: Create {{ app_name}} docker-compose configuration directory + file: + name: /etc/docker/compose/{{app_name}} + state: link + src: /etc/{{app_name}} + +- name: Start and enable the {{app_name}} service + service: + name: docker-compose@{{app.app_name}} + state: started + enabled: true \ No newline at end of file diff --git a/ansible/roles/docker-as-service/tasks/container.yml b/ansible/roles/docker-as-service/tasks/container.yml new file mode 100644 index 000000000..d91e001ea --- /dev/null +++ b/ansible/roles/docker-as-service/tasks/container.yml @@ -0,0 +1,11 @@ + +- name: copy docker service to systemd directory + copy: + src: docker-container@app.service + dest: /etc/systemd/system/docker-container@{{app_name}}.service + +- name: enable and start docker-container@ecs-agent.service + service: + name: docker-container@{{app_name}}.service + state: started + enabled: yes \ No newline at end of file diff --git a/ansible/roles/docker-as-service/tasks/main.yml b/ansible/roles/docker-as-service/tasks/main.yml new file mode 100644 index 000000000..561d76174 --- /dev/null +++ b/ansible/roles/docker-as-service/tasks/main.yml @@ -0,0 +1,55 @@ +--- +# tasks file for docker-as-service +- name: Create /etc/docker/compose directory + file: + name: /etc/docker/compose + state: directory + +- name: Install docker-compose@.service + copy: + dest: /etc/systemd/system/docker-compose@.service + content: | + # This file is managed by Ansible. + # See https://gist.github.com/mosquito/b23e1c1e5723a7fd9e6568e5cf91180f + # for source and motivations. + [Unit] + Description=%i service with docker compose + Requires=docker.service + After=docker.service + + [Service] + WorkingDirectory=/etc/docker/compose/%i + + # Remove old containers, images and volumes + ExecStartPre=/usr/bin/docker-compose down -v + ExecStartPre=/usr/bin/docker-compose rm -fv + ExecStartPre=-/bin/bash -c 'docker volume ls -qf "name=%i_" | xargs docker volume rm' + ExecStartPre=-/bin/bash -c 'docker network ls -qf "name=%i_" | xargs docker network rm' + ExecStartPre=-/bin/bash -c 'docker ps -aqf "name=%i_*" | xargs docker rm' + + # Compose up + ExecStart=/usr/bin/docker-compose up + + # Compose down, remove containers and volumes + ExecStop=/usr/bin/docker-compose down -v + + [Install] + WantedBy=multi-user.target + notify: systemctl daemon-reload + +- name: Create the {{app_name}} configuration directory + file: + name: /etc/{{app_name}} + state: directory + +- name: Create {{app_name}} docker-compose configuration directory + file: + name: /etc/docker/compose/{{app_name}} + state: link + src: /etc/{{app_name}} + +- name: Start and enable the {{app_name}} service + service: + name: docker-compose@{{app_name}} + state: started + enabled: true \ No newline at end of file diff --git a/ansible/roles/docker-as-service/tests/inventory b/ansible/roles/docker-as-service/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/docker-as-service/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/docker-as-service/tests/test.yml b/ansible/roles/docker-as-service/tests/test.yml new file mode 100644 index 000000000..c9b6dc06c --- /dev/null +++ b/ansible/roles/docker-as-service/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - docker-as-service \ No newline at end of file diff --git a/ansible/roles/docker-as-service/vars/main.yml b/ansible/roles/docker-as-service/vars/main.yml new file mode 100644 index 000000000..9b7aa80b7 --- /dev/null +++ b/ansible/roles/docker-as-service/vars/main.yml @@ -0,0 +1,3 @@ +--- + +app_name: "{{app_name}}" \ No newline at end of file diff --git a/ansible/roles/docker/handlers/main.yml b/ansible/roles/docker/handlers/main.yml index 85610a152..11684bcd3 100644 --- a/ansible/roles/docker/handlers/main.yml +++ b/ansible/roles/docker/handlers/main.yml @@ -1,2 +1,6 @@ --- -# handlers file for docker \ No newline at end of file +# handlers file for docker +- name: restart docker + service: + name: docker + state: restarted diff --git a/ansible/roles/docker/tasks/main.yml b/ansible/roles/docker/tasks/main.yml index a97abf49f..721eda724 100644 --- a/ansible/roles/docker/tasks/main.yml +++ b/ansible/roles/docker/tasks/main.yml @@ -1,18 +1,23 @@ --- -- name: Remove other Docker versions - yum: - name: - - docker - - docker-client - - docker-client-latest - - docker-common - - docker-latest - - docker-latest-logrotate - - docker-logrotate - - docker-engine - - docker-compose - state: absent +# - name: Remove other Docker versions +# yum: +# name: +# - docker +# - docker-client +# - docker-client-latest +# - docker-common +# - docker-latest +# - docker-latest-logrotate +# - docker-logrotate +# - docker-engine +# - docker-compose +# state: absent + # tasks file for docker +- name: install epel-release + yum: + name: + - epel-release - name: install systems packages needed for docker yum: name: @@ -21,26 +26,43 @@ - lvm2 - python-setuptools - firewalld - - http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm - state: latest + - python-pip + - docker-compose + state: installed +- name: install docker python module + pip: + name: + # - docker + - docker-py + + + - name: enable and start firewalld service: name: firewalld state: started enabled: yes + tags: + - master + +- name: open tcp port 2375 and 2376 + firewalld: + state: enabled + permanent: yes + port: "{{item}}/tcp" + immediate: yes + zone: public + loop: + - 2375 + - 2376 + tags: + - master # - name: enable extra repos when running on red hat # command: subscription-manager repos --enable "rhel-*-optional-rpms" --enable "rhel-*-extras-rpms" # when: ansible_distribution == 'Red Hat Enterprise Linux' -- name: install pip and docker-compose - command: "{{ item }}" - with_items: - - "easy_install pip" - - "pip install --upgrade --force-reinstall pip==9.0.3" - - "pip install docker-compose" - - name: add docker repo command: > yum-config-manager --add-repo @@ -49,7 +71,7 @@ - name: install docker yum: name: ['docker-ce', 'docker-ce-cli', 'containerd.io'] - state: latest + state: installed - name: enable and start docker service: @@ -57,8 +79,25 @@ enabled: yes state: restarted +- name: create docker systemd options directory + file: + path: /etc/systemd/system/docker.service.d + state: directory + tags: + - master + +- name: configure docker startup options + template: + src: startup-options.conf.j2 + dest: /etc/systemd/system/docker.service.d/startup_options.conf + notify: + - restart docker + tags: + - master + - name: reload systemctl daemon - command: systemctl daemon-reload + systemd: + daemon_reload: yes diff --git a/ansible/roles/docker/templates/startup-options.conf.j2 b/ansible/roles/docker/templates/startup-options.conf.j2 new file mode 100644 index 000000000..afa83a0aa --- /dev/null +++ b/ansible/roles/docker/templates/startup-options.conf.j2 @@ -0,0 +1,3 @@ +[Service] +ExecStart= +ExecStart=/usr/bin/dockerd -H fd:// -H tcp://0.0.0.0:2375 \ No newline at end of file diff --git a/ansible/roles/ecs-agent/README.md b/ansible/roles/ecs-agent/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/ecs-agent/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/ecs-agent/defaults/main.yml b/ansible/roles/ecs-agent/defaults/main.yml new file mode 100644 index 000000000..3bc56b749 --- /dev/null +++ b/ansible/roles/ecs-agent/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for ecs-agent \ No newline at end of file diff --git a/ansible/roles/ecs-agent/files/docker-container@ecs-agent.service b/ansible/roles/ecs-agent/files/docker-container@ecs-agent.service new file mode 100644 index 000000000..733f682bf --- /dev/null +++ b/ansible/roles/ecs-agent/files/docker-container@ecs-agent.service @@ -0,0 +1,22 @@ +[Unit] +Description=Docker Container %I +Requires=docker.service +After=cloud-final.service + +[Service] +Restart=always +ExecStartPre=-/usr/bin/docker rm -f %i +ExecStart=/usr/bin/docker run --name %i \ +--privileged \ +--restart=on-failure:10 \ +--volume=/var/run:/var/run \ +--volume=/var/log/ecs/:/log:Z \ +--volume=/var/lib/ecs/data:/data:Z \ +--volume=/etc/ecs:/etc/ecs \ +--net=host \ +--env-file=/etc/ecs/ecs.config \ +amazon/amazon-ecs-agent:latest +ExecStop=/usr/bin/docker stop %i + +[Install] +WantedBy=default.target \ No newline at end of file diff --git a/ansible/roles/ecs-agent/handlers/main.yml b/ansible/roles/ecs-agent/handlers/main.yml new file mode 100644 index 000000000..502c66361 --- /dev/null +++ b/ansible/roles/ecs-agent/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for ecs-agent \ No newline at end of file diff --git a/ansible/roles/ecs-agent/meta/main.yml b/ansible/roles/ecs-agent/meta/main.yml new file mode 100644 index 000000000..3a212a936 --- /dev/null +++ b/ansible/roles/ecs-agent/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.4 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/ansible/roles/ecs-agent/tasks/main.yml b/ansible/roles/ecs-agent/tasks/main.yml new file mode 100644 index 000000000..6dad87ed3 --- /dev/null +++ b/ansible/roles/ecs-agent/tasks/main.yml @@ -0,0 +1,106 @@ +--- +# tasks file for ecs-agent +- name: gather instance facts + ec2_instance_facts: + region: us-east-1 + filters: + "tag:Name": "{{stack_name}}-{{env}}-frontend" + "instance-state-name": running + "tag:Environment": "{{env}}" + register: frontend +# when: stack_name != "OpenPedCan" + +#- name: gather instance facts +# ec2_instance_facts: +# region: us-east-1 +# filters: +# "tag:Name": "{{stack_name}}-{{env}}-httpserver" +# "instance-state-name": running +# register: frontend +# when: stack_name == "OpenPedCan" + +- debug: + msg: "{{ frontend }}" + +- name: set instance name + set_fact: + frontend_ip: "{{ frontend.instances[0].network_interfaces[0].private_ip_address }}" + +#set local routing +- name: set localhost routing + sysctl: + name: net.ipv4.conf.all.route_localnet + value: '1' + sysctl_set: yes + state: present + reload: yes + +- name: install iptables + yum: + name: + - iptables-services + state: present + +- name: start iptables service + service: + name: iptables + state: started + enabled: yes + +- name: configure ecs-agent routing + iptables: + table: nat + chain: PREROUTING + protocol: tcp + destination: 169.254.170.2 + destination_port: '80' + jump: DNAT + to_destination: 127.0.0.1:51679 + comment: configure nat + +- name: configure ecs-agent redirect + iptables: + table: nat + chain: OUTPUT + protocol: tcp + match: tcp + destination: 169.254.170.2 + destination_port: '80' + jump: REDIRECT + to_ports: '51679' + comment: Redirect web traffic to port 51679 + +- name: save iptables + command: service iptables save + args: + warn: false + +- name: reload iptables + command: service iptables reload + args: + warn: false + +- name: create ecs directory + file: + path: "{{item}}" + state: directory + loop: + - "/etc/ecs" + - "/var/log/ecs" + - "/var/lib/ecs/data" + +- name: copy file ecs.config to /etc/ecs/ecs.config + template: + src: ecs.config.j2 + dest: /etc/ecs/ecs.config + +- name: copy docker service to systemd directory + copy: + src: docker-container@ecs-agent.service + dest: /etc/systemd/system/docker-container@ecs-agent.service + +- name: enable and start docker-container@ecs-agent.service + service: + name: docker-container@ecs-agent.service + state: started + enabled: yes \ No newline at end of file diff --git a/ansible/roles/ecs-agent/templates/ecs.config.j2 b/ansible/roles/ecs-agent/templates/ecs.config.j2 new file mode 100644 index 000000000..d14ec15d1 --- /dev/null +++ b/ansible/roles/ecs-agent/templates/ecs.config.j2 @@ -0,0 +1,12 @@ +ECS_DATADIR=/data +ECS_ENABLE_TASK_IAM_ROLE=true +ECS_ENABLE_TASK_IAM_ROLE_NETWORK_HOST=true +ECS_LOGFILE=/log/ecs-agent.log +ECS_AVAILABLE_LOGGING_DRIVERS=["json-file","awslogs","syslog"] +ECS_LOGLEVEL=info +ECS_CLUSTER={{ecs_cluster_name}} +{% if ansible_default_ipv4.address == frontend_ip %} +ECS_INSTANCE_ATTRIBUTES={"role": "frontend"} +{% else %} +ECS_INSTANCE_ATTRIBUTES={"role": "backend"} +{% endif %} diff --git a/ansible/roles/ecs-agent/tests/inventory b/ansible/roles/ecs-agent/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/ecs-agent/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/ecs-agent/tests/test.yml b/ansible/roles/ecs-agent/tests/test.yml new file mode 100644 index 000000000..bd797d6fb --- /dev/null +++ b/ansible/roles/ecs-agent/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - ecs-agent \ No newline at end of file diff --git a/ansible/roles/ecs-agent/vars/main.yml b/ansible/roles/ecs-agent/vars/main.yml new file mode 100644 index 000000000..c73e942d3 --- /dev/null +++ b/ansible/roles/ecs-agent/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for ecs-agent \ No newline at end of file diff --git a/ansible/roles/git-copy/README.md b/ansible/roles/git-copy/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/git-copy/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/git-copy/defaults/main.yml b/ansible/roles/git-copy/defaults/main.yml new file mode 100644 index 000000000..58ebccb15 --- /dev/null +++ b/ansible/roles/git-copy/defaults/main.yml @@ -0,0 +1,3 @@ +--- +# defaults file for roles/git-tag +project: icdc \ No newline at end of file diff --git a/ansible/roles/git-copy/handlers/main.yml b/ansible/roles/git-copy/handlers/main.yml new file mode 100644 index 000000000..efa3d9db0 --- /dev/null +++ b/ansible/roles/git-copy/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for roles/git-tag \ No newline at end of file diff --git a/ansible/roles/git-copy/meta/main.yml b/ansible/roles/git-copy/meta/main.yml new file mode 100644 index 000000000..3a212a936 --- /dev/null +++ b/ansible/roles/git-copy/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.4 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/ansible/roles/git-copy/tasks/main.yml b/ansible/roles/git-copy/tasks/main.yml new file mode 100644 index 000000000..8b33e4e80 --- /dev/null +++ b/ansible/roles/git-copy/tasks/main.yml @@ -0,0 +1,51 @@ +--- +- name: clean up local folders + file: + path: "{{ git_home }}" + state: absent + +- name: set icdc git_url + set_fact: + git_url: "{{git_base}}/bento-icdc-frontend" + when: project == "icdc" + +- name: set fact + set_fact: + git_url: "{{git_base}}/bento-frontend" + when: project == "bento" + +- name: checkout common codebase + git: + repo: "{{git_url}}" + dest: "{{ git_home }}" + update: yes + version: master + +- name: copy content {{copy_from}} to {{copy_to}} + command: rsync -rcCivh {{copy_from}}/ {{copy_to}}/ --delete + args: + chdir: "{{ git_home}}/src/content" + register: git_copy + +- debug: + msg: "{{git_copy.stdout_lines}}" + +- name: git add contents + command: git add -A . + args: + chdir: "{{ git_home }}" + warn: false + +- name: commit changes + command: git commit -m "copy content from {{copy_from}} to {{copy_to}} on {{git_date}}" --allow-empty + args: + chdir: "{{ git_home }}" + warn: false + +- name: + command: git push + args: + chdir: "{{ git_home }}" + warn: false + + diff --git a/ansible/roles/git-copy/tests/inventory b/ansible/roles/git-copy/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/git-copy/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/git-copy/tests/test.yml b/ansible/roles/git-copy/tests/test.yml new file mode 100644 index 000000000..d01d1a912 --- /dev/null +++ b/ansible/roles/git-copy/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - roles/git-tag \ No newline at end of file diff --git a/ansible/roles/git-copy/vars/main.yml b/ansible/roles/git-copy/vars/main.yml new file mode 100644 index 000000000..f6f51d167 --- /dev/null +++ b/ansible/roles/git-copy/vars/main.yml @@ -0,0 +1,10 @@ +--- +# vars file for roles/git-tag +git_date: "{{ lookup('pipe','date +%Y%m%d%H%M') }}" +username: "{{ lookup('env','GIT_USERNAME') }}" +password: "{{ lookup('env','GIT_PASSWORD') }}" +git_token: "{{ lookup('env','git_token') }}" +copy_from: pre-prod +copy_to: prod +git_home: /tmp/work +git_base: https://{{git_token}}:x-oauth-basic@github.com/CBIIT diff --git a/ansible/roles/git-tag/README.md b/ansible/roles/git-tag/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/git-tag/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/git-tag/defaults/main.yml b/ansible/roles/git-tag/defaults/main.yml new file mode 100644 index 000000000..bb2204d4f --- /dev/null +++ b/ansible/roles/git-tag/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for roles/git-tag \ No newline at end of file diff --git a/ansible/roles/git-tag/handlers/main.yml b/ansible/roles/git-tag/handlers/main.yml new file mode 100644 index 000000000..efa3d9db0 --- /dev/null +++ b/ansible/roles/git-tag/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for roles/git-tag \ No newline at end of file diff --git a/ansible/roles/git-tag/meta/main.yml b/ansible/roles/git-tag/meta/main.yml new file mode 100644 index 000000000..3a212a936 --- /dev/null +++ b/ansible/roles/git-tag/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.4 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/ansible/roles/git-tag/tasks/main.yml b/ansible/roles/git-tag/tasks/main.yml new file mode 100644 index 000000000..6ae81d6ec --- /dev/null +++ b/ansible/roles/git-tag/tasks/main.yml @@ -0,0 +1,53 @@ +--- +- name: clean up local folders + file: + path: "{{ git_home }}" + state: absent + +- name: checkout commons codebase + git: + repo: "{{git_url}}" + dest: "{{ git_home }}" + update: yes + version: master + +- name: tag the repo + command: git tag "{{git_tag}}" + args: + chdir: "{{ git_home }}" + warn: false + +- name: push the new tag + command: git push --tags + args: + chdir: "{{ git_home }}" + warn: false + +- name: count the current tags + shell: git tag + args: + chdir: "{{ git_home }}" + warn: false + register: tag_list + +- name: list git daily tags + set_fact: + tags: "{{ tag_list.stdout_lines | map('regex_search',date_regex) | select('string') | list }}" + +- name: tags to be deleted + set_fact: + tags_to_remove: "{{ tags[:-retention] }}" + +- name: delete excess tags remote + command: git push --delete origin {{ item }} + args: + chdir: "{{ git_home }}" + warn: false + loop: "{{ tags_to_remove }}" + +- name: delete excess tags local + command: git tag --delete {{ item }} + args: + chdir: "{{ git_home }}" + warn: false + loop: "{{ tags_to_remove }}" diff --git a/ansible/roles/git-tag/tests/inventory b/ansible/roles/git-tag/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/git-tag/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/git-tag/tests/test.yml b/ansible/roles/git-tag/tests/test.yml new file mode 100644 index 000000000..d01d1a912 --- /dev/null +++ b/ansible/roles/git-tag/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - roles/git-tag \ No newline at end of file diff --git a/ansible/roles/git-tag/vars/main.yml b/ansible/roles/git-tag/vars/main.yml new file mode 100644 index 000000000..d698fcb57 --- /dev/null +++ b/ansible/roles/git-tag/vars/main.yml @@ -0,0 +1,3 @@ +--- +# vars file for roles/git-tag +#git_tag: "daily_tag.{{ lookup('pipe','date +%Y%m%d%H%M') }}" diff --git a/ansible/roles/github-actions-runner/.travis.yml b/ansible/roles/github-actions-runner/.travis.yml new file mode 100644 index 000000000..36bbf6208 --- /dev/null +++ b/ansible/roles/github-actions-runner/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/ansible/roles/github-actions-runner/README.md b/ansible/roles/github-actions-runner/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/github-actions-runner/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/github-actions-runner/defaults/main.yml b/ansible/roles/github-actions-runner/defaults/main.yml new file mode 100644 index 000000000..032dba938 --- /dev/null +++ b/ansible/roles/github-actions-runner/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for roles/github-actions-runner diff --git a/ansible/roles/github-actions-runner/handlers/main.yml b/ansible/roles/github-actions-runner/handlers/main.yml new file mode 100644 index 000000000..488f519a0 --- /dev/null +++ b/ansible/roles/github-actions-runner/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for roles/github-actions-runner diff --git a/ansible/roles/github-actions-runner/meta/main.yml b/ansible/roles/github-actions-runner/meta/main.yml new file mode 100644 index 000000000..c572acc9f --- /dev/null +++ b/ansible/roles/github-actions-runner/meta/main.yml @@ -0,0 +1,52 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.1 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. diff --git a/ansible/roles/github-actions-runner/tasks/main.yml b/ansible/roles/github-actions-runner/tasks/main.yml new file mode 100644 index 000000000..bcd34be94 --- /dev/null +++ b/ansible/roles/github-actions-runner/tasks/main.yml @@ -0,0 +1,56 @@ +--- +# tasks file for roles/github-actions-runner + +- name: install docker + yum: + name: + - docker + - git + - jq + state: present + +- name: create runner directory + file: + path: /runner + state: directory + +- name: download runner binary + get_url: + url: https://github.com/actions/runner/releases/download/v{{runner_version}}/actions-runner-linux-x64-{{runner_version}}.tar.gz + dest: /runner/actions-runner-linux-x64-{{runner_version}}.tar.gz + +- name: extract the runner file + unarchive: + src: /runner/actions-runner-linux-x64-{{runner_version}}.tar.gz + dest: /runner + remote_src: yes + +# - name: get github token +# uri: +# url: https://api.github.com/repos/CBIIT/bento-github-actions-poc/actions/runners/registration-token +# method: POST +# headers: +# authorization: "{{pat}}" +# register: login + +- name: install runner script + shell: ./config.sh --url https://github.com/CBIIT/bento-github-actions-poc --token {{pat}} --name "bento-runner-0" + args: + chdir: /runner + environment: + RUNNER_ALLOW_RUNASROOT: true + +- name: install runner service + shell: ./svc.sh install && ./svc.sh start + args: + chdir: /runner + +- name: show login content + debug: + msg: "{{login}}" + +- name: enable and start docker + service: + name: docker + state: started + enabled: yes diff --git a/ansible/roles/github-actions-runner/tests/inventory b/ansible/roles/github-actions-runner/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/github-actions-runner/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/github-actions-runner/tests/test.yml b/ansible/roles/github-actions-runner/tests/test.yml new file mode 100644 index 000000000..9f05ce6cd --- /dev/null +++ b/ansible/roles/github-actions-runner/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - roles/github-actions-runner diff --git a/ansible/roles/github-actions-runner/vars/main.yml b/ansible/roles/github-actions-runner/vars/main.yml new file mode 100644 index 000000000..7de6d67bd --- /dev/null +++ b/ansible/roles/github-actions-runner/vars/main.yml @@ -0,0 +1,3 @@ +--- +# vars file for roles/github-actions-runner +runner_version: 2.283.3 diff --git a/ansible/roles/icdc-data-dictionary/.travis.yml b/ansible/roles/icdc-data-dictionary/.travis.yml new file mode 100644 index 000000000..36bbf6208 --- /dev/null +++ b/ansible/roles/icdc-data-dictionary/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/ansible/roles/icdc-data-dictionary/README.md b/ansible/roles/icdc-data-dictionary/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/icdc-data-dictionary/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/icdc-data-dictionary/defaults/main.yml b/ansible/roles/icdc-data-dictionary/defaults/main.yml new file mode 100644 index 000000000..fc5714050 --- /dev/null +++ b/ansible/roles/icdc-data-dictionary/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for cicd diff --git a/ansible/roles/icdc-data-dictionary/files/inject.template.js b/ansible/roles/icdc-data-dictionary/files/inject.template.js new file mode 100644 index 000000000..431ba7769 --- /dev/null +++ b/ansible/roles/icdc-data-dictionary/files/inject.template.js @@ -0,0 +1,4 @@ +window.injectedEnv = { + REACT_APP_MODEL_URL: '${REACT_APP_MODEL_URL}', + REACT_APP_MODEL_PROPS_URL: '${REACT_APP_MODEL_PROPS_URL}', +}; \ No newline at end of file diff --git a/ansible/roles/icdc-data-dictionary/files/nginx-entrypoint.sh b/ansible/roles/icdc-data-dictionary/files/nginx-entrypoint.sh new file mode 100755 index 000000000..70875490e --- /dev/null +++ b/ansible/roles/icdc-data-dictionary/files/nginx-entrypoint.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +WWW_DIR=/usr/share/nginx/html +INJECT_FILE_SRC="${WWW_DIR}/inject.template.js" +INJECT_FILE_DST="${WWW_DIR}/injectEnv.js" +envsubst < "${INJECT_FILE_SRC}" > "${INJECT_FILE_DST}" + +[ -z "$@" ] && nginx -g 'daemon off;' || $@ \ No newline at end of file diff --git a/ansible/roles/icdc-data-dictionary/handlers/main.yml b/ansible/roles/icdc-data-dictionary/handlers/main.yml new file mode 100644 index 000000000..3c94d4172 --- /dev/null +++ b/ansible/roles/icdc-data-dictionary/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for cicd \ No newline at end of file diff --git a/ansible/roles/icdc-data-dictionary/meta/main.yml b/ansible/roles/icdc-data-dictionary/meta/main.yml new file mode 100644 index 000000000..227ad9c34 --- /dev/null +++ b/ansible/roles/icdc-data-dictionary/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.9 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/ansible/roles/icdc-data-dictionary/tasks/build.yml b/ansible/roles/icdc-data-dictionary/tasks/build.yml new file mode 100644 index 000000000..34f319b6a --- /dev/null +++ b/ansible/roles/icdc-data-dictionary/tasks/build.yml @@ -0,0 +1,82 @@ +--- +################################################################################################# + +- name: copy nginx conf + copy: + remote_src: yes + src: '{{workspace}}/icdc-devops/docker/dockerfiles/data-dictionary-nginx.conf' + dest: '{{workspace}}/{{project}}/nginx.conf' + +- name: copy entrypoint.sh to workspace + copy: + src: "nginx-entrypoint.sh" + dest: "{{workspace}}/{{project}}/nginx-entrypoint.sh" + mode: 0755 + +- name: run npm install in {{workspace}}/{{project}} + command: "{{item}}" + args: + chdir: "{{workspace}}/{{project}}" + loop: + - npm install + - npm run build + + + +- name: rename build to dist + command: mv {{workspace}}/{{project}}/build {{workspace}}/{{project}}/dist + +- name: copy env to dist + copy: + src: inject.template.js + dest: "{{workspace}}/{{project}}/dist/inject.template.js" + mode: 0755 + + +- name: log into ncidockerhub + docker_login: + username: "{{docker_user}}" + password: "{{docker_password}}" + registry: https://ncidockerhub.nci.nih.gov + tls: yes + ca_cert: /local/home/commonsdocker/.docker/ca.pem + client_cert: /local/home/commonsdocker/.docker/jenkinscert.pem + client_key: /local/home/commonsdocker/.docker/jenkinskey.pem + tls_hostname: "{{tls_hostname}}" + docker_host: "{{docker_host}}" + + +- name: build cbiitssrepo/{{project | lower}} image + docker_image: + build: + path: "{{workspace}}/{{project}}" + dockerfile: "{{workspace}}/icdc-devops/docker/dockerfiles/data-dictionary-dockerfile" + pull: yes + nocache: yes + name: ncidockerhub.nci.nih.gov/icdc/data-dictionary + tag: "{{dictionary_version}}" + push: yes + force_source: yes + source: build + tls: yes + ca_cert: /local/home/commonsdocker/.docker/ca.pem + client_cert: /local/home/commonsdocker/.docker/jenkinscert.pem + client_key: /local/home/commonsdocker/.docker/jenkinskey.pem + tls_hostname: "{{tls_hostname}}" + docker_host: "{{docker_host}}" + +- name: Add tag latest to cbiitssrepo/data-dictionary image + docker_image: + name: "ncidockerhub.nci.nih.gov/icdc/data-dictionary:{{dictionary_version}}" + repository: ncidockerhub.nci.nih.gov/icdc/data-dictionary:latest + force_tag: yes + push: yes + source: local + tls: yes + ca_cert: /local/home/commonsdocker/.docker/ca.pem + client_cert: /local/home/commonsdocker/.docker/jenkinscert.pem + client_key: /local/home/commonsdocker/.docker/jenkinskey.pem + tls_hostname: "{{tls_hostname}}" + docker_host: "{{docker_host}}" + + \ No newline at end of file diff --git a/ansible/roles/icdc-data-dictionary/tasks/deploy.yml b/ansible/roles/icdc-data-dictionary/tasks/deploy.yml new file mode 100644 index 000000000..7004bd129 --- /dev/null +++ b/ansible/roles/icdc-data-dictionary/tasks/deploy.yml @@ -0,0 +1,46 @@ +--- +- name: log into DockerHub + docker_login: + username: "{{ docker_user }}" + password: "{{ docker_password }}" + registry: https://ncidockerhub.nci.nih.gov + +- name: remove data directionary container + docker_container: + name: data-dictionary + state: absent + +- name: clean up + command: docker system prune -a -f + +- name: ensure log and docker directory exists + file: + path: /local/content/k9dc/data-dictionary + state: directory + +- name: launch the new data-dictionary + docker_container: + name: data-dictionary + image: ncidockerhub.nci.nih.gov/icdc/data-dictionary:{{dictionary_version}} + env: + REACT_APP_MODEL_URL: "{{react_app_model_url}}" + REACT_APP_MODEL_PROPS_URL: "{{react_app_model_props_url}}" + VERSION: "{{dictionary_version}}" + DATE: "{{ansible_date_time.date}}" + NEW_RELIC_APP_NAME: "{{stack_name}}-{{tier}}data-dictionary-{{ansible_hostname}}" + NEW_RELIC_LICENSE_KEY: "{{license_key}}" + restart_policy: always + ports: + - "81:81" + volumes: + - "/local/content/k9dc/data-dictionary:/var/log/nginx" + +# - name: "wait for data-dictionary service to become available" +# uri: +# url: "{% if tier == 'prod' %}https://caninecommons.cancer.gov/api/files/ping{% else %}https://caninecommons-{{ tier }}.cancer.gov/api/files/ping{% endif %}" +# follow_redirects: none +# method: GET +# register: _result +# until: ('status' in _result) and (_result.status == 200) +# retries: 100 +# delay: 10 diff --git a/ansible/roles/icdc-data-dictionary/tasks/main.yml b/ansible/roles/icdc-data-dictionary/tasks/main.yml new file mode 100644 index 000000000..e69de29bb diff --git a/ansible/roles/icdc-data-dictionary/templates/env.j2 b/ansible/roles/icdc-data-dictionary/templates/env.j2 new file mode 100644 index 000000000..17c24f011 --- /dev/null +++ b/ansible/roles/icdc-data-dictionary/templates/env.j2 @@ -0,0 +1,9 @@ +{% if tier == "prod" %} +REACT_APP_BACKEND_API=https://api.bento-tools.org/v1/graphql/ +REACT_APP_APPLICATION_VERSION={{version}} +REACT_APP_ABOUT_CONTENT_URL=https://raw.githubusercontent.com/CBIIT/bento-frontend/master/src/content/{{tier}}/aboutPagesContent.yaml +{% else %} +REACT_APP_BACKEND_API=https://api-{{tier}}.bento-tools.org/v1/graphql/ +REACT_APP_APPLICATION_VERSION={{version}} +REACT_APP_ABOUT_CONTENT_URL=https://raw.githubusercontent.com/CBIIT/bento-frontend/master/src/content/{{tier}}/aboutPagesContent.yaml +{% endif %} \ No newline at end of file diff --git a/ansible/roles/icdc-data-dictionary/templates/nginx-config.yml.j2 b/ansible/roles/icdc-data-dictionary/templates/nginx-config.yml.j2 new file mode 100644 index 000000000..848a0dfb3 --- /dev/null +++ b/ansible/roles/icdc-data-dictionary/templates/nginx-config.yml.j2 @@ -0,0 +1,5 @@ +integrations: + - name: nri-nginx + env: + REMOTE_MONITORING: true + METRICS: 1 \ No newline at end of file diff --git a/ansible/roles/icdc-data-dictionary/tests/inventory b/ansible/roles/icdc-data-dictionary/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/icdc-data-dictionary/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/icdc-data-dictionary/tests/test.yml b/ansible/roles/icdc-data-dictionary/tests/test.yml new file mode 100644 index 000000000..c9b79015b --- /dev/null +++ b/ansible/roles/icdc-data-dictionary/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - cicd \ No newline at end of file diff --git a/ansible/roles/icdc-data-dictionary/vars/main.yml b/ansible/roles/icdc-data-dictionary/vars/main.yml new file mode 100644 index 000000000..ce4d18c5d --- /dev/null +++ b/ansible/roles/icdc-data-dictionary/vars/main.yml @@ -0,0 +1,19 @@ +--- +# vars file for cicd +docker_host: "{{ lookup('env','DOCKER_HOST') }}" +tls_hostname: "{{ lookup('env','TLS_HOSTNAME') }}" +indexd_url: "{{ indexd_url }}" +stack_name: icdc +tier: "{{ lookup('env','TIER') }}" +workspace: "{{ lookup('env','WORKSPACE') }}" +docker_user: "{{ lookup('env','DOCKER_USER') }}" +docker_password: "{{ lookup('env','DOCKER_PASSWORD') }}" +build_number: "{{ lookup('env','BUILD_NUMBER')}" +dictionary_version: "{{ lookup('env','DICTIONARY_VERSION') }}" +license_key: "{{ lookup('env','NEWRELIC_LIC_KEY') }}" +react_app_model_url: https://raw.githubusercontent.com/CBIIT/icdc-model-tool/master/model-desc/icdc-model.yml +react_app_model_props_url: https://raw.githubusercontent.com/CBIIT/icdc-model-tool/master/model-desc/icdc-model-props.yml +project: visualiser-standalone + + + diff --git a/ansible/roles/icdc-file-downloader/.travis.yml b/ansible/roles/icdc-file-downloader/.travis.yml new file mode 100644 index 000000000..36bbf6208 --- /dev/null +++ b/ansible/roles/icdc-file-downloader/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/ansible/roles/icdc-file-downloader/README.md b/ansible/roles/icdc-file-downloader/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/icdc-file-downloader/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/icdc-file-downloader/defaults/main.yml b/ansible/roles/icdc-file-downloader/defaults/main.yml new file mode 100644 index 000000000..fc5714050 --- /dev/null +++ b/ansible/roles/icdc-file-downloader/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for cicd diff --git a/ansible/roles/icdc-file-downloader/files/inject.template.js b/ansible/roles/icdc-file-downloader/files/inject.template.js new file mode 100644 index 000000000..9c7915530 --- /dev/null +++ b/ansible/roles/icdc-file-downloader/files/inject.template.js @@ -0,0 +1,11 @@ +window.injectedEnv = { + REACT_APP_BACKEND_GETUSERINFO_API: '${REACT_APP_BACKEND_GETUSERINFO_API}', + REACT_APP_LOGIN_URL: '${REACT_APP_LOGIN_URL}', + REACT_APP_USER_LOGOUT_URL: '${REACT_APP_USER_LOGOUT_URL}', + REACT_APP_BACKEND_API: '${REACT_APP_BACKEND_API}', + REACT_APP_ABOUT_CONTENT_URL: '${REACT_APP_ABOUT_CONTENT_URL}', + REACT_APP_BE_VERSION: '${REACT_APP_BE_VERSION}', + REACT_APP_FE_VERSION: '${REACT_APP_FE_VERSION}', + REACT_APP_APPLICATION_VERSION: '${REACT_APP_APPLICATION_VERSION}', + REACT_APP_GA_TRACKING_ID: '${REACT_APP_GA_TRACKING_ID}', +}; diff --git a/ansible/roles/icdc-file-downloader/files/nginx-entrypoint.sh b/ansible/roles/icdc-file-downloader/files/nginx-entrypoint.sh new file mode 100755 index 000000000..70875490e --- /dev/null +++ b/ansible/roles/icdc-file-downloader/files/nginx-entrypoint.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +WWW_DIR=/usr/share/nginx/html +INJECT_FILE_SRC="${WWW_DIR}/inject.template.js" +INJECT_FILE_DST="${WWW_DIR}/injectEnv.js" +envsubst < "${INJECT_FILE_SRC}" > "${INJECT_FILE_DST}" + +[ -z "$@" ] && nginx -g 'daemon off;' || $@ \ No newline at end of file diff --git a/ansible/roles/icdc-file-downloader/handlers/main.yml b/ansible/roles/icdc-file-downloader/handlers/main.yml new file mode 100644 index 000000000..3c94d4172 --- /dev/null +++ b/ansible/roles/icdc-file-downloader/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for cicd \ No newline at end of file diff --git a/ansible/roles/icdc-file-downloader/meta/main.yml b/ansible/roles/icdc-file-downloader/meta/main.yml new file mode 100644 index 000000000..227ad9c34 --- /dev/null +++ b/ansible/roles/icdc-file-downloader/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.9 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/ansible/roles/icdc-file-downloader/tasks/build.yml b/ansible/roles/icdc-file-downloader/tasks/build.yml new file mode 100644 index 000000000..4671f6798 --- /dev/null +++ b/ansible/roles/icdc-file-downloader/tasks/build.yml @@ -0,0 +1,50 @@ +--- +################################################################################################# + +- name: log into ncidockerhub + docker_login: + username: "{{docker_user}}" + password: "{{docker_password}}" + registry: https://ncidockerhub.nci.nih.gov + tls: yes + ca_cert: /local/home/commonsdocker/.docker/ca.pem + client_cert: /local/home/commonsdocker/.docker/jenkinscert.pem + client_key: /local/home/commonsdocker/.docker/jenkinskey.pem + tls_hostname: "{{tls_hostname}}" + docker_host: "{{docker_host}}" + + +- name: build cbiitssrepo/icdc-filedownloader image + docker_image: + build: + path: "{{workspace}}/bento-files" + dockerfile: "{{workspace}}/icdc-devops/docker/dockerfiles/filedownload-dockerfile" + pull: yes + nocache: yes + name: ncidockerhub.nci.nih.gov/icdc/icdc-filedownloader + tag: "{{downloader_version}}" + push: yes + force_source: yes + source: build + tls: yes + ca_cert: /local/home/commonsdocker/.docker/ca.pem + client_cert: /local/home/commonsdocker/.docker/jenkinscert.pem + client_key: /local/home/commonsdocker/.docker/jenkinskey.pem + tls_hostname: "{{tls_hostname}}" + docker_host: "{{docker_host}}" + +- name: Add tag latest to cbiitssrepo/bento-filedownloader image + docker_image: + name: "ncidockerhub.nci.nih.gov/icdc/icdc-filedownloader:{{downloader_version}}" + repository: ncidockerhub.nci.nih.gov/icdc/icdc-filedownloader:latest + force_tag: yes + push: yes + source: local + tls: yes + ca_cert: /local/home/commonsdocker/.docker/ca.pem + client_cert: /local/home/commonsdocker/.docker/jenkinscert.pem + client_key: /local/home/commonsdocker/.docker/jenkinskey.pem + tls_hostname: "{{tls_hostname}}" + docker_host: "{{docker_host}}" + + \ No newline at end of file diff --git a/ansible/roles/icdc-file-downloader/tasks/deploy.yml b/ansible/roles/icdc-file-downloader/tasks/deploy.yml new file mode 100644 index 000000000..0a06bdbb0 --- /dev/null +++ b/ansible/roles/icdc-file-downloader/tasks/deploy.yml @@ -0,0 +1,52 @@ +--- +- name: log into DockerHub + docker_login: + username: "{{ docker_user }}" + password: "{{ docker_password }}" + registry: https://ncidockerhub.nci.nih.gov + +- name: remove frontend container + docker_container: + name: file-downloader + state: absent + +- name: clean up + command: docker system prune -a -f + +- name: ensure log and docker directory exists + file: + path: /local/content/k9dc/file-downloader + state: directory + +- name: launch the new file-downloader + docker_container: + name: file-downloader + image: ncidockerhub.nci.nih.gov/icdc/icdc-filedownloader:{{downloader_version}} + env: + CF_URL: "https://{{cloudfront_domain_name}}" + # URL_SRC: "indexd" + # INDEXD_URL: "{{indexd_url}}" + CF_PRIVATE_KEY: "{{cloudfront_private_key}}" + CF_KEY_PAIR_ID: "{{cloudfront_key_group_id}}" + URL_SRC: "{{url_src}}" + BACKEND_URL: "{% if tier == 'prod' %}https://caninecommons.cancer.gov/v1/graphql/{% else %}https://caninecommons-{{ tier }}.cancer.gov/v1/graphql/{% endif %}" + VERSION: "{{downloader_version}}" + DATE: "{{ansible_date_time.date}}" + PROJECT: "ICDC" + NEW_RELIC_APP_NAME: "{{stack_name}}-{{tier}}-file-downloader-{{ansible_hostname}}" + NEW_RELIC_LICENSE_KEY: "{{license_key}}" + restart_policy: always + ports: + - "8081:8081" + volumes: + - "/local/content/k9dc/file-downloader:/var/log" + +- name: "wait for file-downloader service to become available" + uri: + url: "{% if tier == 'prod' %}https://caninecommons.cancer.gov/api/files/ping{% else %}https://caninecommons-{{ tier }}.cancer.gov/api/files/ping{% endif %}" + follow_redirects: none + method: GET + register: _result + until: ('status' in _result) and (_result.status == 200) + retries: 100 + delay: 10 diff --git a/ansible/roles/icdc-file-downloader/tasks/main.yml b/ansible/roles/icdc-file-downloader/tasks/main.yml new file mode 100644 index 000000000..e69de29bb diff --git a/ansible/roles/icdc-file-downloader/templates/env.j2 b/ansible/roles/icdc-file-downloader/templates/env.j2 new file mode 100644 index 000000000..17c24f011 --- /dev/null +++ b/ansible/roles/icdc-file-downloader/templates/env.j2 @@ -0,0 +1,9 @@ +{% if tier == "prod" %} +REACT_APP_BACKEND_API=https://api.bento-tools.org/v1/graphql/ +REACT_APP_APPLICATION_VERSION={{version}} +REACT_APP_ABOUT_CONTENT_URL=https://raw.githubusercontent.com/CBIIT/bento-frontend/master/src/content/{{tier}}/aboutPagesContent.yaml +{% else %} +REACT_APP_BACKEND_API=https://api-{{tier}}.bento-tools.org/v1/graphql/ +REACT_APP_APPLICATION_VERSION={{version}} +REACT_APP_ABOUT_CONTENT_URL=https://raw.githubusercontent.com/CBIIT/bento-frontend/master/src/content/{{tier}}/aboutPagesContent.yaml +{% endif %} \ No newline at end of file diff --git a/ansible/roles/icdc-file-downloader/templates/nginx-config.yml.j2 b/ansible/roles/icdc-file-downloader/templates/nginx-config.yml.j2 new file mode 100644 index 000000000..848a0dfb3 --- /dev/null +++ b/ansible/roles/icdc-file-downloader/templates/nginx-config.yml.j2 @@ -0,0 +1,5 @@ +integrations: + - name: nri-nginx + env: + REMOTE_MONITORING: true + METRICS: 1 \ No newline at end of file diff --git a/ansible/roles/icdc-file-downloader/tests/inventory b/ansible/roles/icdc-file-downloader/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/icdc-file-downloader/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/icdc-file-downloader/tests/test.yml b/ansible/roles/icdc-file-downloader/tests/test.yml new file mode 100644 index 000000000..c9b79015b --- /dev/null +++ b/ansible/roles/icdc-file-downloader/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - cicd \ No newline at end of file diff --git a/ansible/roles/icdc-file-downloader/vars/main.yml b/ansible/roles/icdc-file-downloader/vars/main.yml new file mode 100644 index 000000000..254953189 --- /dev/null +++ b/ansible/roles/icdc-file-downloader/vars/main.yml @@ -0,0 +1,21 @@ +--- +# vars file for cicd +docker_host: "{{ lookup('env','DOCKER_HOST') }}" +tls_hostname: "{{ lookup('env','TLS_HOSTNAME') }}" +indexd_url: "{{ indexd_url }}" +url_src: CLOUD_FRONT +stack_name: icdc +tier: "{{ lookup('env','TIER') }}" +workspace: "{{ lookup('env','WORKSPACE') }}" +docker_user: "{{ lookup('env','DOCKER_USER') }}" +docker_password: "{{ lookup('env','DOCKER_PASSWORD') }}" +build_number: "{{ lookup('env','BUILD_NUMBER')}" +downloader_version: "{{ lookup('env','DOWNLOADER_VERSION') }}" +license_key: "{{ lookup('env','NEWRELIC_LIC_KEY') }}" +#cloudfront variables +cloudfront_origin_id: icdc_files_origin_id +cloudfront_key_group_name: icdc-{{tier}}-key-group +cloudfront_key_group_id: "{{cloudfront_key_group_id}}" +cloudfront_private_key: "{{ lookup('file','{{workspace}}/icdc-devops/ansible/cloudfront_private_key')}}" +cloudfront_domain_name: "{{ cloudfront_domain_name }}" + diff --git a/ansible/roles/icdc-pipeline/.travis.yml b/ansible/roles/icdc-pipeline/.travis.yml new file mode 100644 index 000000000..36bbf6208 --- /dev/null +++ b/ansible/roles/icdc-pipeline/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/ansible/roles/icdc-pipeline/README.md b/ansible/roles/icdc-pipeline/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/icdc-pipeline/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/icdc-pipeline/defaults/main.yml b/ansible/roles/icdc-pipeline/defaults/main.yml new file mode 100644 index 000000000..fc5714050 --- /dev/null +++ b/ansible/roles/icdc-pipeline/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for cicd diff --git a/ansible/roles/icdc-pipeline/handlers/main.yml b/ansible/roles/icdc-pipeline/handlers/main.yml new file mode 100644 index 000000000..3c94d4172 --- /dev/null +++ b/ansible/roles/icdc-pipeline/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for cicd \ No newline at end of file diff --git a/ansible/roles/icdc-pipeline/meta/main.yml b/ansible/roles/icdc-pipeline/meta/main.yml new file mode 100644 index 000000000..227ad9c34 --- /dev/null +++ b/ansible/roles/icdc-pipeline/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.9 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/ansible/roles/icdc-pipeline/tasks/main.yml b/ansible/roles/icdc-pipeline/tasks/main.yml new file mode 100644 index 000000000..092383bd7 --- /dev/null +++ b/ansible/roles/icdc-pipeline/tasks/main.yml @@ -0,0 +1,50 @@ +--- +- name: run npm install in {{workspace}}/src/main/frontend/ + command: "{{item}}" + args: + chdir: "{{workspace}}/src/main/frontend/" + loop: + - npm install + - npm install --save https://github.com/skiran86/mui-custom-datatables/tarball/master + +- name: run npm install and build in {{workspace}}/src/main/frontend/node_modules/mui-custom-datatables + command: "{{item}}" + args: + chdir: "{{workspace}}/src/main/frontend/node_modules/mui-custom-datatables" + loop: + - npm install + - npm run build + +- name: run npm build in frontend + command: npm run-script build + args: + chdir: "{{workspace}}/src/main/frontend" + +- name: remove the application_example.properties file + file: + path: "{{workspace}}/src/main/resources/application_example.properties" + state: absent + +- name: copy application.properties file to /src/main/resources/ + template: + src: application.properties.j2 + dest: "{{workspace}}/src/main/resources/application.properties" + +- name: build springboot code + command: mvn package -DskipTests + args: + chdir: "{{workspace}}" + +- name: copy ICDC-0.0.1.war to ROOT.war + copy: + remote_src: yes + src: "{{workspace}}/target/ICDC-0.0.1.war" + dest: "{{workspace}}/target/ROOT.war" + +# - name: remove ICDC-0.0.1.war file +# file: +# path: "{{workspace}}/target/ICDC-0.0.1.war" +# state: absent + + + diff --git a/ansible/roles/icdc-pipeline/templates/application.properties.j2 b/ansible/roles/icdc-pipeline/templates/application.properties.j2 new file mode 100644 index 000000000..d75fa1487 --- /dev/null +++ b/ansible/roles/icdc-pipeline/templates/application.properties.j2 @@ -0,0 +1,26 @@ +spring.mvc.throw-exception-if-no-handler-found=true +spring.data.neo4j.username={{neo4j_user}} +spring.data.neo4j.password={{neo4j_ip}} +neo4j.jdbc.server=jdbc:neo4j:bolt://{{neo4j_ip}} +graphql.schema=graphql/person.graphqls, graphql/icdc.graphqls +neo4j.graphql.endpoint=http://{{neo4j_ip}}:7474/graphql/ +neo4j.java.driver.server=bolt://{{neo4j_ip}}:7687 +neo4j.authorization={{bearer}} +spring.mvc.view.prefix=/WEB-INF/ +spring.mvc.view.suffix=.jsp +error.redirect_url=http://localhost/error.html +api.version=v1 +session.timeout=30 +data.model.version = 1 +allow_grapqh_query = true +allow_graphql_mutation =false +fence.client_id={{fence_id}} +fence.client_credential={{fence_credential}} +fence.redirect_url=https://{{fence_url}}/ +fence.url=https://nci-crdc-staging.datacommons.io/ +fence.exchange_token_url=https://nci-crdc-staging.datacommons.io/user/oauth2/token +fence.log_out_url = https://nci-crdc-staging.datacommons.io/user/logout +neo4j_query.getversion= query { numberOfStudies }; +graphql_api.version = 1.0.0 +rest_api.version =1.0.0 +front_end.version =1.0.0 \ No newline at end of file diff --git a/ansible/roles/icdc-pipeline/tests/inventory b/ansible/roles/icdc-pipeline/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/icdc-pipeline/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/icdc-pipeline/tests/test.yml b/ansible/roles/icdc-pipeline/tests/test.yml new file mode 100644 index 000000000..c9b79015b --- /dev/null +++ b/ansible/roles/icdc-pipeline/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - cicd \ No newline at end of file diff --git a/ansible/roles/icdc-pipeline/vars/main.yml b/ansible/roles/icdc-pipeline/vars/main.yml new file mode 100644 index 000000000..388dbe3f8 --- /dev/null +++ b/ansible/roles/icdc-pipeline/vars/main.yml @@ -0,0 +1,10 @@ +--- +# vars file for cicd +workspace: "{{ lookup('env','WORKSPACE') }}" +neo4j_ip: "{{ lookup('env','NEO4J_IP') }}" +bearer: "{{ lookup('env','BEARER') }}" +neo4j_password: "{{ lookup('env','NEO4J_PASSWORD') }}" +neo4j_user: "{{ lookup('env','NEO4J_USER') }}" +fence_id: "{{ lookup('env','FENCE_ID') }}" +fence_credential: "{{ lookup('env','FENCE_CREDENTIAL') }}" +fence_url: "{{ lookup('env','FENCE_URL') }}" diff --git a/ansible/roles/icdc/README.md b/ansible/roles/icdc/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/icdc/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/icdc/defaults/main.yml b/ansible/roles/icdc/defaults/main.yml new file mode 100644 index 000000000..406cde47f --- /dev/null +++ b/ansible/roles/icdc/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for k9dc diff --git a/ansible/roles/icdc/handlers/main.yml b/ansible/roles/icdc/handlers/main.yml new file mode 100644 index 000000000..31630a940 --- /dev/null +++ b/ansible/roles/icdc/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for k9dc \ No newline at end of file diff --git a/ansible/roles/icdc/meta/main.yml b/ansible/roles/icdc/meta/main.yml new file mode 100644 index 000000000..5d50bf41b --- /dev/null +++ b/ansible/roles/icdc/meta/main.yml @@ -0,0 +1,60 @@ +galaxy_info: + author: your name + description: your description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Some suggested licenses: + # - BSD (default) + # - MIT + # - GPLv2 + # - GPLv3 + # - Apache + # - CC-BY + license: license (GPLv2, CC-BY, etc) + + min_ansible_version: 2.4 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # Optionally specify the branch Galaxy will use when accessing the GitHub + # repo for this role. During role install, if no tags are available, + # Galaxy will use this branch. During import Galaxy will access files on + # this branch. If Travis integration is configured, only notifications for this + # branch will be accepted. Otherwise, in all cases, the repo's default branch + # (usually master) will be used. + #github_branch: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. \ No newline at end of file diff --git a/ansible/roles/k9dc/tasks/main.yml b/ansible/roles/icdc/tasks/main.yml similarity index 55% rename from ansible/roles/k9dc/tasks/main.yml rename to ansible/roles/icdc/tasks/main.yml index d7efa0bf7..4c8e11fc0 100644 --- a/ansible/roles/k9dc/tasks/main.yml +++ b/ansible/roles/icdc/tasks/main.yml @@ -1,68 +1,56 @@ --- -# tasks file for k9dc -- name: open http and https services +- name: open port 8080 firewalld: - service: "{{item}}" + port: 8080/tcp zone: public immediate: yes permanent: yes state: enabled - loop: - - http - - https - + - name: create tomcat group group: name: tomcat - gid: 46 + gid: 3001 state: present - name: create tomcat user user: name: tomcat - uid: 46 - group: tomcat + uid: 3001 + groups: tomcat,docker + append: yes -- name: create k9dc volume/directory +- name: create k9dc deployments directory file: path: "{{ item }}" state: directory owner: tomcat group: tomcat loop: - - "{{ k9dc_home }}" + - "{{ deployments }}" - "{{ docker_home }}" - "{{ k9dc_home }}/logs" - - "{{ deployments }}" - -- name: create k9dc deployments directory - file: - path: "{{ deployments }}" - state: directory - owner: jenkins - group: jenkins - - + - name: copy docker files template: src: "{{ item.src }}" dest: "{{ item.dest }}" owner: tomcat group: tomcat - with_items: + loop: - {src: 'docker-compose.yml.j2',dest: '{{docker_home}}/docker-compose.yml'} +- name: start k9dc + command: docker-compose up -d + args: + chdir: "{{ docker_home }}" + warn: no + tags: + - cloudone + - name: start the k9dc docker_compose: project_src: "{{ docker_home }}" state: present - -- name: download initial master application - aws_s3: - bucket: "{{bucket_name}}" - object: "{{war_file}}" - dest: /tmp/{{war_file}} - mode: get - -- name: deploy master application - command: docker cp /tmp/{{war_file}} {{container_name}}:/usr/local/tomcat/webapps + tags: + - sandbox \ No newline at end of file diff --git a/ansible/roles/icdc/templates/docker-compose.yml.j2 b/ansible/roles/icdc/templates/docker-compose.yml.j2 new file mode 100644 index 000000000..ada4294fa --- /dev/null +++ b/ansible/roles/icdc/templates/docker-compose.yml.j2 @@ -0,0 +1,15 @@ +version: '3.1' +services: + match: + container_name: k9dc + image: cbiitssrepo/tomcat9 + ports: + - 8080:8080 + volumes: + - {{ k9dc_home }}/logs:/usr/local/tomcat/logs + restart: always + environment: + - NEW_RELIC_LICENSE_KEY={{ newrelic_license_key }} + - NEW_RELIC_APP_NAME={{ app_name }} + - NEW_RELIC_HOST=gov-collector.newrelic.com + diff --git a/ansible/roles/icdc/tests/inventory b/ansible/roles/icdc/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/icdc/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/icdc/tests/test.yml b/ansible/roles/icdc/tests/test.yml new file mode 100644 index 000000000..bd61c2678 --- /dev/null +++ b/ansible/roles/icdc/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - k9dc \ No newline at end of file diff --git a/ansible/roles/icdc/vars/main.yml b/ansible/roles/icdc/vars/main.yml new file mode 100644 index 000000000..d532df83d --- /dev/null +++ b/ansible/roles/icdc/vars/main.yml @@ -0,0 +1,15 @@ +--- +# vars file for k9dc +docker_home: /local/content/docker +k9dc_home: /local/content/k9dc +deployments: /local/content/canine-data +container_name: k9dc +app_name: "{{env}}-icdc" +collector_name: "{{ env }}-k9dc" +newrelic_license_key: "{{ newrelic_key }}" +additional_logs: + - name: "{{ env }} k9dc Logs" + description: "{{ env }} k9dc logs" + category: "{{env }}/app/k9dc" + path: "/local/content/k9dc/logs/*.log" + filters: "" \ No newline at end of file diff --git a/ansible/roles/icdc_demo/README.md b/ansible/roles/icdc_demo/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/icdc_demo/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/icdc_demo/defaults/main.yml b/ansible/roles/icdc_demo/defaults/main.yml new file mode 100644 index 000000000..406cde47f --- /dev/null +++ b/ansible/roles/icdc_demo/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for k9dc diff --git a/ansible/roles/icdc_demo/handlers/main.yml b/ansible/roles/icdc_demo/handlers/main.yml new file mode 100644 index 000000000..31630a940 --- /dev/null +++ b/ansible/roles/icdc_demo/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for k9dc \ No newline at end of file diff --git a/ansible/roles/icdc_demo/meta/main.yml b/ansible/roles/icdc_demo/meta/main.yml new file mode 100644 index 000000000..5d50bf41b --- /dev/null +++ b/ansible/roles/icdc_demo/meta/main.yml @@ -0,0 +1,60 @@ +galaxy_info: + author: your name + description: your description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Some suggested licenses: + # - BSD (default) + # - MIT + # - GPLv2 + # - GPLv3 + # - Apache + # - CC-BY + license: license (GPLv2, CC-BY, etc) + + min_ansible_version: 2.4 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # Optionally specify the branch Galaxy will use when accessing the GitHub + # repo for this role. During role install, if no tags are available, + # Galaxy will use this branch. During import Galaxy will access files on + # this branch. If Travis integration is configured, only notifications for this + # branch will be accepted. Otherwise, in all cases, the repo's default branch + # (usually master) will be used. + #github_branch: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. \ No newline at end of file diff --git a/ansible/roles/icdc_demo/tasks/main.yml b/ansible/roles/icdc_demo/tasks/main.yml new file mode 100644 index 000000000..cae1cda9c --- /dev/null +++ b/ansible/roles/icdc_demo/tasks/main.yml @@ -0,0 +1,56 @@ +--- +- name: open port 8080 + firewalld: + port: 8080/tcp + zone: public + immediate: yes + permanent: yes + state: enabled + +- name: create tomcat group + group: + name: tomcat + gid: 3001 + state: present + +- name: create tomcat user + user: + name: tomcat + uid: 3001 + groups: tomcat,docker + append: yes + +- name: create icdc_demo deployments directory + file: + path: "{{ item }}" + state: directory + owner: tomcat + group: tomcat + loop: + - "{{ deployments }}" + - "{{ docker_home }}" + - "{{ icdc_demo_home }}/logs" + +- name: copy docker files + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + owner: tomcat + group: tomcat + loop: + - {src: 'docker-compose.yml.j2',dest: '{{docker_home}}/docker-compose.yml'} + +- name: start icdc_demo + command: docker-compose up -d + args: + chdir: "{{ docker_home }}" + warn: no + tags: + - cloudone + +- name: start the icdc_demo + docker_compose: + project_src: "{{ docker_home }}" + state: present + tags: + - sandbox \ No newline at end of file diff --git a/ansible/roles/icdc_demo/templates/docker-compose.yml.j2 b/ansible/roles/icdc_demo/templates/docker-compose.yml.j2 new file mode 100644 index 000000000..2e653a680 --- /dev/null +++ b/ansible/roles/icdc_demo/templates/docker-compose.yml.j2 @@ -0,0 +1,15 @@ +version: '3.1' +services: + match: + container_name: icdc_demo + image: cbiitssrepo/tomcat9 + ports: + - 8080:8080 + volumes: + - {{ icdc_demo_home }}/logs:/usr/local/tomcat/logs + restart: always + environment: + - NEW_RELIC_LICENSE_KEY={{ newrelic_license_key }} + - NEW_RELIC_APP_NAME={{ app_name }} + - NEW_RELIC_HOST=gov-collector.newrelic.com + diff --git a/ansible/roles/icdc_demo/tests/inventory b/ansible/roles/icdc_demo/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/icdc_demo/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/icdc_demo/tests/test.yml b/ansible/roles/icdc_demo/tests/test.yml new file mode 100644 index 000000000..bd61c2678 --- /dev/null +++ b/ansible/roles/icdc_demo/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - k9dc \ No newline at end of file diff --git a/ansible/roles/icdc_demo/vars/main.yml b/ansible/roles/icdc_demo/vars/main.yml new file mode 100644 index 000000000..41942aff1 --- /dev/null +++ b/ansible/roles/icdc_demo/vars/main.yml @@ -0,0 +1,15 @@ +--- +# vars file for icdc_demo +docker_home: /local/content/docker +icdc_demo_home: /local/content/icdc_demo +deployments: /local/content/canine-data +container_name: icdc_demo +app_name: "{{env}}-icdc_demo" +collector_name: "{{ env }}-icdc_demo" +newrelic_license_key: "{{ newrelic_key }}" +additional_logs: + - name: "{{ env }} icdc_demo Logs" + description: "{{ env }} icdc_demo logs" + category: "{{env }}/app/icdc_demo" + path: "/local/content/icdc_demo/logs/*.log" + filters: "" \ No newline at end of file diff --git a/ansible/roles/jenkins/README.md b/ansible/roles/jenkins/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/jenkins/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/jenkins/defaults/main.yml b/ansible/roles/jenkins/defaults/main.yml new file mode 100644 index 000000000..e848bd6a2 --- /dev/null +++ b/ansible/roles/jenkins/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for jenkins \ No newline at end of file diff --git a/ansible/roles/jenkins/handlers/main.yml b/ansible/roles/jenkins/handlers/main.yml new file mode 100644 index 000000000..c71405373 --- /dev/null +++ b/ansible/roles/jenkins/handlers/main.yml @@ -0,0 +1,6 @@ +--- +# handlers file for jenkins +- name: restart docker + service: + name: docker + state: restarted \ No newline at end of file diff --git a/ansible/roles/jenkins/meta/main.yml b/ansible/roles/jenkins/meta/main.yml new file mode 100644 index 000000000..5d50bf41b --- /dev/null +++ b/ansible/roles/jenkins/meta/main.yml @@ -0,0 +1,60 @@ +galaxy_info: + author: your name + description: your description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Some suggested licenses: + # - BSD (default) + # - MIT + # - GPLv2 + # - GPLv3 + # - Apache + # - CC-BY + license: license (GPLv2, CC-BY, etc) + + min_ansible_version: 2.4 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # Optionally specify the branch Galaxy will use when accessing the GitHub + # repo for this role. During role install, if no tags are available, + # Galaxy will use this branch. During import Galaxy will access files on + # this branch. If Travis integration is configured, only notifications for this + # branch will be accepted. Otherwise, in all cases, the repo's default branch + # (usually master) will be used. + #github_branch: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. \ No newline at end of file diff --git a/ansible/roles/jenkins/tasks/main.yml b/ansible/roles/jenkins/tasks/main.yml new file mode 100644 index 000000000..94dcb9470 --- /dev/null +++ b/ansible/roles/jenkins/tasks/main.yml @@ -0,0 +1,153 @@ +--- +# tasks file for jenkins +- name: open http and https services + firewalld: + service: "{{item}}" + zone: public + immediate: yes + permanent: yes + state: enabled + loop: + - http + - https + +# - name: list current users +# shell: getent group | awk -F":" '{print $1}' +# register: current_users + +# - name: change centos uid if exist +# user: +# name: centos +# uid: 1001 +# when: '"centos" in current_users.stdout' + + +# - name: change centos gid if exist +# group: +# name: centos +# gid: 1001 +# when: '"centos" in current_users.stdout' + + +# - name: create jenkins group +# group: +# name: jenkins +# gid: 1001 +# state: present + +# - name: create jenkins user +# user: +# name: jenkins +# uid: 1001 +# group: jenkins + +- name: install openjdk11 for local slave + yum: + name: java-11-openjdk + state: latest + +- name: create file structure for the jenkins + file: + path: "{{ item }}" + state: directory + # group: jenkins + # owner: jenkins + group: centos + owner: centos + loop: + - "{{ jenkins_home }}/jk_secrets" + - "{{ docker_home }}" + +- name: copy docker files + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + # group: jenkins + # owner: jenkins + group: centos + owner: centos + loop: + - {src: 'docker-compose.yml.j2',dest: '{{ docker_home }}/docker-compose.yml'} + - {src: 'dockerfile_jenkins.j2',dest: '{{docker_home}}/dockerfile_jenkins'} + - {src: 'plugins.txt.j2',dest: '{{docker_home}}/plugins.txt'} + - {src: 'jenkins.env.j2',dest: '{{docker_home}}/jenkins.env'} + + +- name: copy conf files + copy: + src: "{{jenkins_yaml}}" + dest: "{{jenkins_home}}/jenkins.yaml" + owner: centos + group: centos + +- name: copy server_sshkey files + copy: + src: "{{server_sshkey_file}}" + dest: "{{jenkins_home}}/jk_secrets/server_sshkey" + owner: centos + group: centos + +- name: add secret files + copy: + content: "{{docker_agent_ip}}" + dest: "{{jenkins_home}}/docker_agent_ip" + +- name: add secrets files + copy: + content: "{{ item.src }}" + dest: "{{jenkins_home}}/jk_secrets/{{ item.dest }}" + # group: jenkins + # owner: jenkins + group: centos + owner: centos + loop: + - {src: "{{docker_agent_ip}}",dest: "docker_agent_ip"} + - {src: "{{tomcat01_ip}}",dest: "tomcat01_ip"} + - {src: "{{tomcat02_ip}}",dest: "tomcat02_ip"} + - {src: "{{slack_url}}",dest: "slack_url"} + - {src: "{{jenkinsadmin_ps}}",dest: "jenkinsadmin_ps"} + - {src: "{{bearer_ps}}",dest: "bearer_ps"} + - {src: "{{vdonkor_ps}}",dest: "vdonkor_ps"} + - {src: "{{neo4j_ps}}",dest: "neo4j_ps"} + - {src: "{{sshkey}}" ,dest: "sshkey"} + +- name: clean sshkey file + shell: cat sshkey | awk '{$1=$1;print}' > tempssh && mv tempssh sshkey && chown centos:centos sshkey + args: + chdir: "{{jenkins_home}}/jk_secrets" + +- name: build the docker image + docker_image: + path: "{{ docker_home }}" + name: k9dc/jenkins + dockerfile: "{{ dockerfile }}" + state: present + + +- name: start the jenkins + docker_compose: + project_src: "{{ docker_home }}" + state: present + +# - name: Init a new swarm with default parameters +# docker_swarm: +# state: present + +# - name: check if secret is already created +# shell: docker secret ls -f name=jenkinsAdmin | wc -l +# register: output_secret + +# - debug: +# msg: "{{ output_secret.stdout }}" + +# - name: set docker secret user +# shell: echo {{ jenkinsAdmin }} | docker secret create jenkinsAdmin - +# when: output_secret.stdout == "1" +# register: output + +# - name: deploy jenkins +# command: docker stack deploy --compose-file {{ docker_compose_file }} jenkins +# register: output + +# - debug: +# msg: "{{output.stdout }}" diff --git a/ansible/roles/jenkins/templates/docker-compose.yml.j2 b/ansible/roles/jenkins/templates/docker-compose.yml.j2 new file mode 100644 index 000000000..c11884896 --- /dev/null +++ b/ansible/roles/jenkins/templates/docker-compose.yml.j2 @@ -0,0 +1,15 @@ +version: '3.1' +services: + jenkins: + image: k9dc/jenkins + ports: + - 80:8080 + - 5001:5000 + volumes: + - {{jenkins_home}}:/var/jenkins_home + env_file: + - ./jenkins.env + environment: + - CASC_JENKINS_CONFIG=/var/jenkins_home/jenkins.yaml + - SECRETS=/var/jenkins_home/jk_secrets/ + restart: always diff --git a/terraform/icdc/roles/setup-jenkins/templates/dockerfile_jenkins.j2 b/ansible/roles/jenkins/templates/dockerfile_jenkins.j2 similarity index 88% rename from terraform/icdc/roles/setup-jenkins/templates/dockerfile_jenkins.j2 rename to ansible/roles/jenkins/templates/dockerfile_jenkins.j2 index e8cc2d996..c2c4a07db 100644 --- a/terraform/icdc/roles/setup-jenkins/templates/dockerfile_jenkins.j2 +++ b/ansible/roles/jenkins/templates/dockerfile_jenkins.j2 @@ -1,4 +1,4 @@ -FROM jenkins/jenkins:2.176.1 +FROM jenkins/jenkins:2.176.2 ARG JAVA_OPTS ENV JAVA_OPTS="-Djenkins.install.runSetupWizard=false ${JAVA_OPTS:-}" diff --git a/ansible/roles/jenkins/templates/jenkins.env.j2 b/ansible/roles/jenkins/templates/jenkins.env.j2 new file mode 100644 index 000000000..f5be7061a --- /dev/null +++ b/ansible/roles/jenkins/templates/jenkins.env.j2 @@ -0,0 +1,9 @@ +DOCKER_AGENT_IP={{ docker_agent_ip }} +TOMCAT01_IP={{ tomcat01_ip }} +TOMCAT02_IP={{ tomcat02_ip }} +NEO4J_IP={{ neo4j_ip }} +SLACK_URL={{ slack_url }} +jenkinsAdmin={{ jenkinsadmin_ps }} +bearer={{ bearer_ps }} +vdonkor={{ vdonkor_ps }} +neo4j={{ neo4j_ps }} \ No newline at end of file diff --git a/ansible/roles/jenkins/templates/plugins.txt.j2 b/ansible/roles/jenkins/templates/plugins.txt.j2 new file mode 100644 index 000000000..e81db515d --- /dev/null +++ b/ansible/roles/jenkins/templates/plugins.txt.j2 @@ -0,0 +1,157 @@ +ace-editor +analysis-core +analysis-model-api +ansible +ansicolor +ant +antisamy-markup-formatter +apache-httpcomponents-client-4-api +authentication-tokens +bitbucket +blueocean +blueocean-autofavorite +blueocean-bitbucket-pipeline +blueocean-commons +blueocean-config +blueocean-core-js +blueocean-dashboard +blueocean-display-url +blueocean-events +blueocean-git-pipeline +blueocean-github-pipeline +blueocean-i18n +blueocean-jira +blueocean-jwt +blueocean-personalization +blueocean-pipeline-api-impl +blueocean-pipeline-editor +blueocean-pipeline-scm-api +blueocean-rest +blueocean-rest-impl +blueocean-web +bouncycastle-api +branch-api +build-environment +build-timeout +build-with-parameters +cloudbees-bitbucket-branch-source +cloudbees-folder +command-launcher +conditional-buildstep +configuration-as-code +configuration-as-code-groovy +copyartifact +credentials +credentials-binding +cucumber-reports +cucumber-slack-notifier +cucumber-trends-report +deploy +description-setter +display-url-api +docker-commons +docker-java-api +docker-plugin +docker-workflow +durable-task +email-ext +envinject +envinject-api +extended-choice-parameter +extensible-choice-parameter +favorite +filesystem-list-parameter-plugin +git +git-client +git-parameter +git-server +github +github-api +github-branch-source +github-oauth +github-organization-folder +gradle +handlebars +handy-uri-templates-2-api +htmlpublisher +jackson2-api +javadoc +jdk-tool +jenkins-design-language +job-dsl +jquery +jquery-detached +jquery-ui +jsch +junit +ldap +list-git-branches-parameter +lockable-resources +mailer +mapdb-api +matrix-auth +matrix-combinations-parameter +matrix-project +maven-plugin +mercurial +momentjs +multiple-scms +pam-auth +Parameterized-Remote-Trigger +parameterized-scheduler +parameterized-trigger +pipeline-build-step +pipeline-github-lib +pipeline-graph-analysis +pipeline-input-step +pipeline-milestone-step +pipeline-model-api +pipeline-model-declarative-agent +pipeline-model-definition +pipeline-model-extensions +pipeline-rest-api +pipeline-stage-step +pipeline-stage-tags-metadata +pipeline-stage-view +plain-credentials +preSCMbuildstep +pretested-integration +pubsub-light +purge-job-history +rbenv +rebuild +resource-disposer +role-strategy +ruby-runtime +run-condition +scm-api +script-security +seed +slack +sse-gateway +ssh-credentials +ssh-slaves +structs +subversion +text-finder +timestamper +token-macro +trilead-api +uno-choice +variant +warnings +warnings-ng +windows-slaves +workflow-aggregator +workflow-api +workflow-basic-steps +workflow-cps +workflow-cps-global-lib +workflow-durable-task-step +workflow-job +workflow-multibranch +workflow-scm-step +workflow-step-api +workflow-support +ws-cleanup +xvfb \ No newline at end of file diff --git a/ansible/roles/jenkins/tests/inventory b/ansible/roles/jenkins/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/jenkins/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/jenkins/tests/test.yml b/ansible/roles/jenkins/tests/test.yml new file mode 100644 index 000000000..846c4ff17 --- /dev/null +++ b/ansible/roles/jenkins/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - jenkins \ No newline at end of file diff --git a/ansible/roles/jenkins/vars/main.yml b/ansible/roles/jenkins/vars/main.yml new file mode 100644 index 000000000..3d9261ce6 --- /dev/null +++ b/ansible/roles/jenkins/vars/main.yml @@ -0,0 +1,9 @@ +--- +# vars file for jenkins +collector_name: "{{env}}-jenkins" +docker_home: "/local/content/docker" +jenkins_home: "/local/content/jenkins" +jenkins_yaml: "./config/icdc-jenkins.yaml" +server_sshkey_file: "./config/icdc-devops.pem" +dockerfile: "{{ docker_home }}/dockerfile_jenkins" +docker_compose_file: "{{ docker_home }}/docker-compose.yml" diff --git a/ansible/roles/k9dc/defaults/main.yml b/ansible/roles/k9dc/defaults/main.yml deleted file mode 100644 index e9fd96800..000000000 --- a/ansible/roles/k9dc/defaults/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -# defaults file for k9dc \ No newline at end of file diff --git a/ansible/roles/k9dc/templates/docker-compose.yml.j2 b/ansible/roles/k9dc/templates/docker-compose.yml.j2 deleted file mode 100644 index 73b0f7afc..000000000 --- a/ansible/roles/k9dc/templates/docker-compose.yml.j2 +++ /dev/null @@ -1,14 +0,0 @@ -version: '3.1' -services: - match: - container_name: k9dc - image: vdonkor/tomcat9 - ports: - - 80:8080 - volumes: - - {{k9dc_home}}/logs:/usr/local/tomcat/logs - restart: always - environment: - - NEW_RELIC_LICENSE_KEY={{ new_relic_license_key }} - - NEW_RELIC_APP_NAME={{ app_name }} - diff --git a/ansible/roles/k9dc/vars/main.yml b/ansible/roles/k9dc/vars/main.yml deleted file mode 100644 index 8914817aa..000000000 --- a/ansible/roles/k9dc/vars/main.yml +++ /dev/null @@ -1,19 +0,0 @@ -$ANSIBLE_VAULT;1.1;AES256 -65653231383761633231666563383333333666666163313535616430363835656136373733326231 -3031663231346464373531353862633364656531316233300a386234336338633839646438343431 -33383561623738363065363636346538393731656132616635353830383266616333626532356263 -6137373964383464390a316436393931313531633837346336663536623435306263306562663765 -63326637363531326261613162323464336165343734616237343434393335346331643764393338 -64363338666661336166303836333438613062613134636366653265643866306537386364633235 -64383361353039313937303962393465666533636564663736306530393536646566366238346661 -62363831633965373763666465646361353264386338633332313066613030643935366634636138 -30333964613266333335373631323232333536343730623839633662383861313261653630623130 -64383664303932333435633030373330383331313539376431626564666666333533366138663662 -33353936643665663261333563333035616631336662356533343035376466353431656662633230 -63663864316662366466303035333231363466376130386231663461346337376661643463666239 -37306564343135326334663962613261336432646232303939336362393530383761633665663466 -32393563343335646339316638636166633737316135323132303831386365363535666262636164 -36396431373261396634666132663162303762323662643938333934613236316135616662333534 -65396532333563366639346232613065333139666663353063363632386661646436663663323133 -62343461326261623834306531363935616664633764653335646631643464313161373236363331 -6336613432353563376133613939353130393738313462373731 diff --git a/ansible/roles/katalon/README.md b/ansible/roles/katalon/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/katalon/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/katalon/defaults/main.yml b/ansible/roles/katalon/defaults/main.yml new file mode 100644 index 000000000..6e7243bcd --- /dev/null +++ b/ansible/roles/katalon/defaults/main.yml @@ -0,0 +1,4 @@ +--- +# defaults file for katalon +katalon_version: 7.2.6 +katalon_api_key: 1d9d7e79-dde9-45c1-9d21-b31424a47864 diff --git a/ansible/roles/katalon/handlers/main.yml b/ansible/roles/katalon/handlers/main.yml new file mode 100644 index 000000000..ae2752b01 --- /dev/null +++ b/ansible/roles/katalon/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for katalon \ No newline at end of file diff --git a/ansible/roles/katalon/meta/main.yml b/ansible/roles/katalon/meta/main.yml new file mode 100644 index 000000000..5d50bf41b --- /dev/null +++ b/ansible/roles/katalon/meta/main.yml @@ -0,0 +1,60 @@ +galaxy_info: + author: your name + description: your description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Some suggested licenses: + # - BSD (default) + # - MIT + # - GPLv2 + # - GPLv3 + # - Apache + # - CC-BY + license: license (GPLv2, CC-BY, etc) + + min_ansible_version: 2.4 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # Optionally specify the branch Galaxy will use when accessing the GitHub + # repo for this role. During role install, if no tags are available, + # Galaxy will use this branch. During import Galaxy will access files on + # this branch. If Travis integration is configured, only notifications for this + # branch will be accepted. Otherwise, in all cases, the repo's default branch + # (usually master) will be used. + #github_branch: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. \ No newline at end of file diff --git a/ansible/roles/katalon/tasks/main.yml b/ansible/roles/katalon/tasks/main.yml new file mode 100644 index 000000000..7b254bc9a --- /dev/null +++ b/ansible/roles/katalon/tasks/main.yml @@ -0,0 +1,42 @@ +--- +# tasks file for katalon +- name: install firefox + yum: + name: + - firefox + - java-1.8.0-openjdk + state: installed + +- name: download katalon engine + get_url: + url: "{{ item }}" + dest: /tmp + with_items: + - https://github.com/katalon-studio/katalon-studio/releases/download/v{{katalon_version}}/Katalon_Studio_Engine_Linux_64-{{katalon_version}}.tar.gz + +- name: untar katalon engine + unarchive: + src: /tmp/Katalon_Studio_Engine_Linux_64-{{katalon_version}}.tar.gz + dest: /usr/local + remote_src: yes + +- name: remove the downloaded files + file: + path: "{{ item }}" + state: absent + with_items: + - /tmp/Katalon_Studio_Engine_Linux_64-{{katalon_version}}.tar.gz + +- name: create output directory + file: + path: "{{ workspace }}/results" + state: directory + +- name: execute katalon test script + shell: + cmd: > + /usr/local/Katalon_Studio_Engine_Linux_64-{{ katalon_version }}/katalonc + -noSplash -runMode=console -projectPath= "{{ katalon_project }}" -retry= 0 -testSuitePath= "{{ katalon_suite_path }}" -browserType= "Firefox (headless)" + -reportFolder="{{ workspace }}/results" -reportFileName="index" -apiKey={{ katalon_api_key }} + chdir: "{{ workspace }}" + register: katalon_out diff --git a/ansible/roles/katalon/tests/inventory b/ansible/roles/katalon/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/katalon/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/katalon/tests/test.yml b/ansible/roles/katalon/tests/test.yml new file mode 100644 index 000000000..10af078d8 --- /dev/null +++ b/ansible/roles/katalon/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - katalon \ No newline at end of file diff --git a/ansible/roles/katalon/vars/main.yml b/ansible/roles/katalon/vars/main.yml new file mode 100644 index 000000000..613f9796a --- /dev/null +++ b/ansible/roles/katalon/vars/main.yml @@ -0,0 +1,5 @@ +--- +# vars file for katalon +workspace: "{{ lookup('env','WORKSPACE') }}" +katalon_project: "{{ workspace }}/{{ lookup('env','KATALON_PRJ') }}" +katalon_suite_path: "{{ lookup('env','KATALON_SUITE_PATH') }}" \ No newline at end of file diff --git a/ansible/roles/migrations/README.md b/ansible/roles/migrations/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/migrations/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/migrations/Users/vdonkor/icdc_devops/ansible/roles/sumologic/README.md b/ansible/roles/migrations/Users/vdonkor/icdc_devops/ansible/roles/sumologic/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/migrations/Users/vdonkor/icdc_devops/ansible/roles/sumologic/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/migrations/Users/vdonkor/icdc_devops/ansible/roles/sumologic/defaults/main.yml b/ansible/roles/migrations/Users/vdonkor/icdc_devops/ansible/roles/sumologic/defaults/main.yml new file mode 100644 index 000000000..60b4b0aa0 --- /dev/null +++ b/ansible/roles/migrations/Users/vdonkor/icdc_devops/ansible/roles/sumologic/defaults/main.yml @@ -0,0 +1,4 @@ +--- +# defaults file for sumologic +timezone: 'Etc/EST' +additional_logs: [] \ No newline at end of file diff --git a/ansible/roles/migrations/Users/vdonkor/icdc_devops/ansible/roles/sumologic/handlers/main.yml b/ansible/roles/migrations/Users/vdonkor/icdc_devops/ansible/roles/sumologic/handlers/main.yml new file mode 100644 index 000000000..be869c71d --- /dev/null +++ b/ansible/roles/migrations/Users/vdonkor/icdc_devops/ansible/roles/sumologic/handlers/main.yml @@ -0,0 +1,6 @@ +--- +# handlers file for sumologic +- name: restart collector + service: + name: collector + state: restarted \ No newline at end of file diff --git a/ansible/roles/migrations/Users/vdonkor/icdc_devops/ansible/roles/sumologic/meta/main.yml b/ansible/roles/migrations/Users/vdonkor/icdc_devops/ansible/roles/sumologic/meta/main.yml new file mode 100644 index 000000000..5d50bf41b --- /dev/null +++ b/ansible/roles/migrations/Users/vdonkor/icdc_devops/ansible/roles/sumologic/meta/main.yml @@ -0,0 +1,60 @@ +galaxy_info: + author: your name + description: your description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Some suggested licenses: + # - BSD (default) + # - MIT + # - GPLv2 + # - GPLv3 + # - Apache + # - CC-BY + license: license (GPLv2, CC-BY, etc) + + min_ansible_version: 2.4 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # Optionally specify the branch Galaxy will use when accessing the GitHub + # repo for this role. During role install, if no tags are available, + # Galaxy will use this branch. During import Galaxy will access files on + # this branch. If Travis integration is configured, only notifications for this + # branch will be accepted. Otherwise, in all cases, the repo's default branch + # (usually master) will be used. + #github_branch: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. \ No newline at end of file diff --git a/ansible/roles/migrations/Users/vdonkor/icdc_devops/ansible/roles/sumologic/tasks/main.yml b/ansible/roles/migrations/Users/vdonkor/icdc_devops/ansible/roles/sumologic/tasks/main.yml new file mode 100644 index 000000000..445d4923f --- /dev/null +++ b/ansible/roles/migrations/Users/vdonkor/icdc_devops/ansible/roles/sumologic/tasks/main.yml @@ -0,0 +1,36 @@ +--- +# tasks file for sumologic + +- name: check if sumologic is installed + yum: + list: 'SumoCollector' + register: sumo_installed + +- name: download sumologic rpm + get_url: + url: https://collectors.sumologic.com/rest/download/rpm/64 + dest: /tmp/sumologic.rpm + when: sumo_installed.results == [] + +- name: Install SumoCollector + yum: + name: '/tmp/sumologic.rpm' + state: installed + when: sumo_installed.results == [] + +- name: copy user.properties and source configuration + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + group: sumologic_collector + with_items: + - {src: 'sources.json.j2',dest: '{{config}}/sources.json'} + - {src: 'user.properties.j2',dest: '{{config}}/user.properties'} + notify: restart collector + + +- name: Start service + service: + name: collector + state: started + enabled: yes diff --git a/ansible/roles/migrations/Users/vdonkor/icdc_devops/ansible/roles/sumologic/templates/sources.json.j2 b/ansible/roles/migrations/Users/vdonkor/icdc_devops/ansible/roles/sumologic/templates/sources.json.j2 new file mode 100644 index 000000000..4c96dd943 --- /dev/null +++ b/ansible/roles/migrations/Users/vdonkor/icdc_devops/ansible/roles/sumologic/templates/sources.json.j2 @@ -0,0 +1,68 @@ +{ + "api.version": "v1", + "sources": [ + { + "name": "Linux Secure Log", + "description": "Security events and user logins", + "category": "{{env}}/OS/Linux", + "automaticDateParsing": false, + "multilineProcessingEnabled": false, + "useAutolineMatching": false, + "forceTimeZone": true, + "timeZone": "{{ timezone }}", + "filters": [], + "encoding": "UTF-8", + "pathExpression": "/var/log/secure*", + "blacklist": [], + "sourceType": "LocalFile" + }, + { + "name": "Linux Message Log", + "description": "System events, such as user creation, deletion, system start, shutdown, etc", + "category": "{{env}}/OS/Linux", + "automaticDateParsing": false, + "multilineProcessingEnabled": false, + "useAutolineMatching": false, + "forceTimeZone": true, + "timeZone": "{{ timezone }}", + "filters": [], + "encoding": "UTF-8", + "pathExpression": "/var/log/messages*", + "blacklist": [], + "sourceType": "LocalFile" + }, + { + "name": "Linux dmesg", + "description": "Kernel messages", + "category": "{{env}}/OS/Linux", + "automaticDateParsing": false, + "multilineProcessingEnabled": true, + "useAutolineMatching": false, + "forceTimeZone": true, + "timeZone": "{{ timezone }}", + "filters": [], + "encoding": "UTF-8", + "pathExpression": "/var/log/dmesg", + "blacklist": [], + "sourceType": "LocalFile" + } +{% for log in additional_logs %} + , + { + "name": "{{ log.name }}", + "description": "{{ log.description }}", + "category": "{{ log.category }}", + "automaticDateParsing": false, + "multilineProcessingEnabled": false, + "useAutolineMatching": false, + "forceTimeZone": true, + "timeZone": "{{ timezone }}", + "filters": [{{ log.filters }}], + "encoding": "UTF-8", + "pathExpression": "{{ log.path }}", + "blacklist": [], + "sourceType": "LocalFile" + } +{% endfor %} + ] +} \ No newline at end of file diff --git a/ansible/roles/migrations/Users/vdonkor/icdc_devops/ansible/roles/sumologic/templates/user.properties.j2 b/ansible/roles/migrations/Users/vdonkor/icdc_devops/ansible/roles/sumologic/templates/user.properties.j2 new file mode 100644 index 000000000..3e3d93f77 --- /dev/null +++ b/ansible/roles/migrations/Users/vdonkor/icdc_devops/ansible/roles/sumologic/templates/user.properties.j2 @@ -0,0 +1,5 @@ +name = {{ collector_name }} +accessid = {{ access_id }} +accesskey = {{ access_key }} +ephemeral = true +syncSources = /opt/SumoCollector/config/sources.json \ No newline at end of file diff --git a/ansible/roles/migrations/Users/vdonkor/icdc_devops/ansible/roles/sumologic/tests/inventory b/ansible/roles/migrations/Users/vdonkor/icdc_devops/ansible/roles/sumologic/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/migrations/Users/vdonkor/icdc_devops/ansible/roles/sumologic/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/migrations/Users/vdonkor/icdc_devops/ansible/roles/sumologic/tests/test.yml b/ansible/roles/migrations/Users/vdonkor/icdc_devops/ansible/roles/sumologic/tests/test.yml new file mode 100644 index 000000000..9049b5d4a --- /dev/null +++ b/ansible/roles/migrations/Users/vdonkor/icdc_devops/ansible/roles/sumologic/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - sumologic \ No newline at end of file diff --git a/ansible/roles/migrations/Users/vdonkor/icdc_devops/ansible/roles/sumologic/vars/main.yml b/ansible/roles/migrations/Users/vdonkor/icdc_devops/ansible/roles/sumologic/vars/main.yml new file mode 100644 index 000000000..c0ea57ccc --- /dev/null +++ b/ansible/roles/migrations/Users/vdonkor/icdc_devops/ansible/roles/sumologic/vars/main.yml @@ -0,0 +1,6 @@ +--- +# vars file for sumologic +accessid: "{{ access_id }}" +accesskey: "{{ access_key }}" +config: /opt/SumoCollector/config + diff --git a/ansible/roles/migrations/defaults/main.yml b/ansible/roles/migrations/defaults/main.yml new file mode 100644 index 000000000..60b4b0aa0 --- /dev/null +++ b/ansible/roles/migrations/defaults/main.yml @@ -0,0 +1,4 @@ +--- +# defaults file for sumologic +timezone: 'Etc/EST' +additional_logs: [] \ No newline at end of file diff --git a/ansible/roles/migrations/handlers/main.yml b/ansible/roles/migrations/handlers/main.yml new file mode 100644 index 000000000..be869c71d --- /dev/null +++ b/ansible/roles/migrations/handlers/main.yml @@ -0,0 +1,6 @@ +--- +# handlers file for sumologic +- name: restart collector + service: + name: collector + state: restarted \ No newline at end of file diff --git a/ansible/roles/migrations/meta/main.yml b/ansible/roles/migrations/meta/main.yml new file mode 100644 index 000000000..5d50bf41b --- /dev/null +++ b/ansible/roles/migrations/meta/main.yml @@ -0,0 +1,60 @@ +galaxy_info: + author: your name + description: your description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Some suggested licenses: + # - BSD (default) + # - MIT + # - GPLv2 + # - GPLv3 + # - Apache + # - CC-BY + license: license (GPLv2, CC-BY, etc) + + min_ansible_version: 2.4 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # Optionally specify the branch Galaxy will use when accessing the GitHub + # repo for this role. During role install, if no tags are available, + # Galaxy will use this branch. During import Galaxy will access files on + # this branch. If Travis integration is configured, only notifications for this + # branch will be accepted. Otherwise, in all cases, the repo's default branch + # (usually master) will be used. + #github_branch: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. \ No newline at end of file diff --git a/ansible/roles/migrations/tasks/main.yml b/ansible/roles/migrations/tasks/main.yml new file mode 100644 index 000000000..c8bd64de1 --- /dev/null +++ b/ansible/roles/migrations/tasks/main.yml @@ -0,0 +1,22 @@ +--- +# tasks file for sumologic +- name: Stop collector service + service: + name: collector + state: stopped + +- name: remove cred directory + file: + path: "{{config}}/creds" + state: absent + +- name: copy user.properties and source configuration + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + group: sumologic_collector + with_items: + - {src: 'sources.json.j2',dest: '{{config}}/sources.json'} + - {src: 'user.properties.j2',dest: '{{config}}/user.properties'} + notify: restart collector + diff --git a/ansible/roles/migrations/templates/sources.json.j2 b/ansible/roles/migrations/templates/sources.json.j2 new file mode 100644 index 000000000..91d0c7fbe --- /dev/null +++ b/ansible/roles/migrations/templates/sources.json.j2 @@ -0,0 +1,91 @@ +{ + "api.version": "v1", + "sources": [ + { + "name": "Linux Secure Log", + "description": "Security events and user logins", + "category": "{{env}}/OS/Linux/secure", + "automaticDateParsing": false, + "multilineProcessingEnabled": false, + "useAutolineMatching": false, + "forceTimeZone": true, + "timeZone": "{{ timezone }}", + "filters": [], + "encoding": "UTF-8", + "pathExpression": "/var/log/secure*", + "blacklist": [], + "sourceType": "LocalFile" + }, + { + "name": "Linux Message Log", + "description": "System events, such as user creation, deletion, system start, shutdown, etc", + "category": "{{env}}/OS/Linux/message", + "automaticDateParsing": false, + "multilineProcessingEnabled": false, + "useAutolineMatching": false, + "forceTimeZone": true, + "timeZone": "{{ timezone }}", + "filters": [], + "encoding": "UTF-8", + "pathExpression": "/var/log/messages*", + "blacklist": [], + "sourceType": "LocalFile" + }, + { + "name": "Linux dmesg", + "description": "Kernel messages", + "category": "{{env}}/OS/Linux/dmesg", + "automaticDateParsing": false, + "multilineProcessingEnabled": true, + "useAutolineMatching": false, + "forceTimeZone": true, + "timeZone": "{{ timezone }}", + "filters": [], + "encoding": "UTF-8", + "pathExpression": "/var/log/dmesg", + "blacklist": [], + "sourceType": "LocalFile" + } +{% for log in additional_logs %} + , + { + "name": "{{ log.name }}", + "description": "{{ log.description }}", + "category": "{{ log.category }}", + "automaticDateParsing": false, + "multilineProcessingEnabled": false, + "useAutolineMatching": false, + "forceTimeZone": true, + "timeZone": "{{ timezone }}", + "filters": [{{ log.filters }}], + "encoding": "UTF-8", + "pathExpression": "{{ log.path }}", + "blacklist": [], + "sourceType": "LocalFile" + } +{% endfor %} +{% for log in additional_logs_docker %} + , + { + "name": "{{ log.name }}", + "description": "{{ log.description }}", + "category": "{{ log.category }}", + "uri": "unix:///var/run/docker.sock", + "specifiedContainers": [], + "allContainers": true, + "multilineProcessingEnabled": false, +{% if "Logs" in log.category %} + "collectEvents": true, + "sourceType": "DockerLog" +{% elif "Stats" in log.category %} + "certPath": "", + "sourceType": "DockerStats", + "pollInterval": 60000 + "automaticDateParsing": true, + "forceTimeZone": false, + "cutoffTimestamp": 0 +{% endif %} + } +{% endfor %} + ] +} \ No newline at end of file diff --git a/ansible/roles/migrations/templates/user.properties.j2 b/ansible/roles/migrations/templates/user.properties.j2 new file mode 100644 index 000000000..009548740 --- /dev/null +++ b/ansible/roles/migrations/templates/user.properties.j2 @@ -0,0 +1,6 @@ +name = {{ collector_name }} +accessid = {{ sumo_access_id }} +accesskey = {{ sumo_access_key }} +ephemeral = true +syncSources = /opt/SumoCollector/config/sources.json +skipAccessKeyRemoval = true diff --git a/ansible/roles/migrations/tests/inventory b/ansible/roles/migrations/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/migrations/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/migrations/tests/test.yml b/ansible/roles/migrations/tests/test.yml new file mode 100644 index 000000000..9049b5d4a --- /dev/null +++ b/ansible/roles/migrations/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - sumologic \ No newline at end of file diff --git a/ansible/roles/migrations/vars/main.yml b/ansible/roles/migrations/vars/main.yml new file mode 100644 index 000000000..64199c6eb --- /dev/null +++ b/ansible/roles/migrations/vars/main.yml @@ -0,0 +1,35 @@ +--- +# vars file for sumologic +config: /opt/SumoCollector/config +collector_name: "{{ collector_prefix }}-{{ inventory_hostname_short }}" +tomcat_logs: + - name: "{{ env }} {{ app_name }} Access Logs" + description: "{{ env }} {{ app_name}} Logs" + category: "{{ env }}/{{ app_type }}/{{ app_name }}/Access" + path: "{{ log_path }}/localhost_access_log.*" + filters: "" + - name: "{{ env }} {{ app_name }} Catalina Logs" + description: "{{ env }} {{ app_name }} Catalina Logs" + category: "{{ env }}/{{ app_type }}/{{ app_name }}/Catalina" + path: "{{ log_path }}/catalina.*" + filters: "" + - name: "{{ env }} {{ app_name }} Localhost Logs" + description: "{{ env }} {{ app_name }} Localhost Logs" + category: "{{ env }}/{{ app_type }}/{{ app_name }}/Localhost" + path: "{{ log_path }}/localhost.*" + filters: "" +docker_logs: + - name: "{{ env }} Docker Logs" + description: "{{ env }} Docker Logs" + category: "{{ env }}/Docker/Logs" + - name: "{{ env }} Docker Stats" + description: "{{ env }} Docker Stats" + category: "{{ env }}/Docker/Stats" +neo4j_logs: + - name: "{{ env }} {{ app_name }} Logs" + description: "{{ env }} {{ app_name}} Logs" + category: "{{ env }}/{{ app_type }}/{{ app_name }}" + path: "{{ log_path }}/*" + filters: "" +additional_logs: "{{ tomcat_logs if app_name == 'Tomcat' else neo4j_logs }}" +additional_logs_docker: "{{ docker_logs if app_name == 'Tomcat' else '' }}" \ No newline at end of file diff --git a/terraform/icdc/roles/setup-nat-instance/tasks/main.yml b/ansible/roles/nat/tasks/main.yml similarity index 100% rename from terraform/icdc/roles/setup-nat-instance/tasks/main.yml rename to ansible/roles/nat/tasks/main.yml diff --git a/ansible/roles/neo4j-loader/.travis.yml b/ansible/roles/neo4j-loader/.travis.yml new file mode 100644 index 000000000..36bbf6208 --- /dev/null +++ b/ansible/roles/neo4j-loader/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/ansible/roles/neo4j-loader/README.md b/ansible/roles/neo4j-loader/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/neo4j-loader/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/neo4j-loader/defaults/main.yml b/ansible/roles/neo4j-loader/defaults/main.yml new file mode 100644 index 000000000..2e66705dd --- /dev/null +++ b/ansible/roles/neo4j-loader/defaults/main.yml @@ -0,0 +1,7 @@ +--- +# defaults file for neo4j-loader +redis_host: + dev: "bento-dev-redis-cluster.l5vrvc.clustercfg.use1.cache.amazonaws.com" + qa: "bento-qa-redis-cluster.l5vrvc.clustercfg.use1.cache.amazonaws.com" + stage: "bento-stage-redis-cluster.l5vrvc.clustercfg.use1.cache.amazonaws.com" + prod: "bento-prod-redis-cluster.l5vrvc.clustercfg.use1.cache.amazonaws.com" diff --git a/ansible/roles/neo4j-loader/handlers/main.yml b/ansible/roles/neo4j-loader/handlers/main.yml new file mode 100644 index 000000000..577beaf0b --- /dev/null +++ b/ansible/roles/neo4j-loader/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for neo4j-loader \ No newline at end of file diff --git a/ansible/roles/neo4j-loader/meta/main.yml b/ansible/roles/neo4j-loader/meta/main.yml new file mode 100644 index 000000000..227ad9c34 --- /dev/null +++ b/ansible/roles/neo4j-loader/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.9 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/ansible/roles/neo4j-loader/tasks/bento-restart-backend.yml b/ansible/roles/neo4j-loader/tasks/bento-restart-backend.yml new file mode 100644 index 000000000..0f57dea1a --- /dev/null +++ b/ansible/roles/neo4j-loader/tasks/bento-restart-backend.yml @@ -0,0 +1,37 @@ +--- +############################################################################################################################ + +# Restart the backend container to reload schema + +############################################################################################################################ + +- name: query backend service + ecs_service_info: + cluster: bento-{{ tier }} + service: bento-{{ tier }}-backend + details: true + region: "{{ region }}" + register: service_backend + +- name: set backend facts + set_fact: + backend_task_definition: "{{ service_backend.services[0].taskDefinition }}" + lb_backend: "{{ service_backend.services[0].loadBalancers }}" + role_arn: "{{ service_backend.services[0].roleArn }}" + +- name: recreate backend service + ecs_service: + state: present + name: bento-{{ tier }}-backend + cluster: bento-{{ tier }} + task_definition: "{{ backend_task_definition }}" + role: "{{ role_arn }}" + force_new_deployment: yes + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + load_balancers: "{{ lb_backend }}" + region: "{{ region }}" + register: service_backend_output + diff --git a/ansible/roles/neo4j-loader/tasks/icdc-restart-backend.yml b/ansible/roles/neo4j-loader/tasks/icdc-restart-backend.yml new file mode 100644 index 000000000..8a15a5837 --- /dev/null +++ b/ansible/roles/neo4j-loader/tasks/icdc-restart-backend.yml @@ -0,0 +1,30 @@ +--- +############################################################################################################################ + +# Restart the backend container to reload schema + +############################################################################################################################ + +- name: query backend container status + shell: docker ps -f name=backend | awk '{print $F}' + register: status_backend + +- name: echo backend status + debug: + msg: "{{status_backend.stdout_lines}}" + +- name: restart backend container + shell: docker restart backend + register: restart_backend_output + +- name: restart output + debug: + msg: "{{restart_backend_output.stdout_lines}}" + +- name: query backend container status + shell: docker ps -f name=backend | awk '{print $F}' + register: status_backend + +- name: echo backend status + debug: + msg: "{{status_backend.stdout_lines}}" \ No newline at end of file diff --git a/ansible/roles/neo4j-loader/tasks/main.yml b/ansible/roles/neo4j-loader/tasks/main.yml new file mode 100644 index 000000000..408689139 --- /dev/null +++ b/ansible/roles/neo4j-loader/tasks/main.yml @@ -0,0 +1,117 @@ +--- +# tasks file for neo4j-loader + +#- name: install redis +# yum: +# name: +# - redis +# state: present + +#- name: install pip +# yum: +# name: +# - epel-release +# - python2-pip +# state: present + +#- name: install required packages +# pip: +# name: +# - awscli +# - boto3 +# state: present + +- name: ensure that remote workspace exists + file: + path: "{{remote_workspace}}" + state: directory + +- name: ensure backup directory exists + file: + path: /backups + state: directory + owner: neo4j + group: neo4j + +# - name: download dump files +# command: aws s3 cp {{s3_bucket_name}}/{{dump_file_name}} "{{remote_workspace}}" + +- name: download dump file + aws_s3: + bucket: "{{s3_bucket_name}}" + object: "/dump_files/{{dump_file_name}}" + dest: "{{remote_workspace}}/neo4j.dump" + mode: get + +- name: stop neo4j + service: + name: neo4j + state: stopped + +- name: backup neo4j db + command: /var/lib/neo4j/bin/neo4j-admin dump --database=graph.db --to={{backup_directory}}/neo4j-backup-{{timestamp}}.dump + become_user: neo4j + +- name: stop neo4j + service: + name: neo4j + state: stopped + +- name: load neo4j dumpfile + command: /var/lib/neo4j/bin/neo4j-admin load --from {{remote_workspace}}/neo4j.dump --database graph.db --force + become_user: neo4j + register: loader + +- name: loader output + debug: + msg: "{{loader.stdout_lines}}" + +# - name: flush redis cache +# shell: echo -e "get abc \nFLUSHALL" | redis-cli -h {{ redis_host[tier]}} -p 6379 -c + +- name: start neo4j + service: + name: neo4j + state: started + + +############################################################################################################################ + +# Restart the backend container to reload schema + +############################################################################################################################ + +- name: query backend service + ecs_service_info: + cluster: bento-{{ tier }} + service: bento-{{ tier }}-backend + details: true + region: "{{ region }}" + register: service_backend + +############################################################################################################################ + +- name: set backend facts + set_fact: + backend_task_definition: "{{ service_backend.services[0].taskDefinition }}" + lb_backend: "{{ service_backend.services[0].loadBalancers }}" + role_arn: "{{ service_backend.services[0].roleArn }}" + +############################################################################################################################ + +- name: recreate backend service + ecs_service: + state: present + name: bento-{{ tier }}-backend + cluster: bento-{{ tier }} + task_definition: "{{ backend_task_definition }}" + role: "{{ role_arn }}" + force_new_deployment: yes + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + load_balancers: "{{ lb_backend }}" + region: "{{ region }}" + register: service_backend_output + diff --git a/ansible/roles/neo4j-loader/tasks/neo4j-loader-icdc-get.yml b/ansible/roles/neo4j-loader/tasks/neo4j-loader-icdc-get.yml new file mode 100644 index 000000000..045eba3dc --- /dev/null +++ b/ansible/roles/neo4j-loader/tasks/neo4j-loader-icdc-get.yml @@ -0,0 +1,18 @@ +--- +# tasks file for neo4j-loader + +- name: download dump file + aws_s3: + bucket: "{{ s3_bucket_name }}" + object: "/dump_files/{{ dump_file_name }}" + dest: "{{ dump_file_name }}" + mode: get + +- name: verify file is copied + stat: + path: "{{ dump_file_name }}" + register: file_status + +#- name: stat output +# debug: +# msg: "{{ file_status }}" \ No newline at end of file diff --git a/ansible/roles/neo4j-loader/tasks/neo4j-loader-icdc.yml b/ansible/roles/neo4j-loader/tasks/neo4j-loader-icdc.yml new file mode 100644 index 000000000..5823db56e --- /dev/null +++ b/ansible/roles/neo4j-loader/tasks/neo4j-loader-icdc.yml @@ -0,0 +1,56 @@ +--- +# tasks file for neo4j-loader + +- name: ensure that remote workspace exists + file: + path: "{{ remote_workspace }}" + state: directory + +- name: ensure backup directory exists + file: + path: /backups + state: directory + owner: neo4j + group: neo4j + +- name: copy dump file + copy: + src: "{{ dump_file_name }}" + dest: "{{ remote_workspace }}/neo4j.dump" + +- name: verify file is copied + stat: + path: "{{ remote_workspace }}/neo4j.dump" + register: file_status + +#- name: stat output +# debug: +# msg: "{{ file_status }}" + +- name: stop neo4j + service: + name: neo4j + state: stopped + +- name: backup neo4j db + command: neo4j-admin dump --database=neo4j --to={{backup_directory}}/neo4j-backup-{{timestamp}}.dump + become_user: neo4j + +- name: stop neo4j + service: + name: neo4j + state: stopped + +- name: load neo4j dumpfile + command: neo4j-admin load --from {{remote_workspace}}/neo4j.dump --force + become_user: neo4j + register: loader + +- name: loader output + debug: + msg: "{{loader.stdout_lines}}" + +- name: start neo4j + service: + name: neo4j + state: started \ No newline at end of file diff --git a/ansible/roles/neo4j-loader/tasks/neo4j-loader.yml b/ansible/roles/neo4j-loader/tasks/neo4j-loader.yml new file mode 100644 index 000000000..c7a6e6115 --- /dev/null +++ b/ansible/roles/neo4j-loader/tasks/neo4j-loader.yml @@ -0,0 +1,113 @@ +--- +# tasks file for neo4j-loader + +- name: ensure that remote workspace exists + file: + path: "{{remote_workspace}}" + state: directory + +- name: ensure backup directory exists + file: + path: /backups + state: directory + owner: neo4j + group: neo4j + +- name: copy dump file + copy: + src: "{{ dump_file_name }}" + dest: "{{ remote_workspace }}/neo4j.dump" + owner: neo4j + group: neo4j + +- name: verify file is copied + stat: + path: "{{ remote_workspace }}/neo4j.dump" + register: file_status + +#- name: stat output +# debug: +# msg: "{{ file_status }}" + +- name: stop neo4j + service: + name: neo4j + state: stopped + +- name: backup neo4j db + command: "{{ neo4j_admin_cmd }} dump --database={{ neo4j_db_name }} --to={{backup_directory}}/neo4j-backup-{{timestamp}}.dump" + become_user: neo4j + +- name: stop neo4j + service: + name: neo4j + state: stopped + +#- name: neo4j db name +# debug: +# msg: "{{ neo4j_db_name }}" + +#- name: neo4j load command +# debug: +# msg: "{{ neo4j_admin_cmd }} load --from={{ remote_workspace }}/neo4j.dump --database={{ neo4j_db_name }} --force --verbose" + +- name: load neo4j dumpfile + #command: "{{ neo4j_admin_cmd }} load --from={{ remote_workspace }}/neo4j.dump --database={{ neo4j_db_name }} --force --verbose" + command: "{{ neo4j_admin_cmd }} load --from={{ remote_workspace }}/neo4j.dump --force --verbose" + become_user: neo4j + register: loader + +- name: loader output + debug: + msg: "{{loader.stdout_lines}}" + +- name: start neo4j + service: + name: neo4j + state: started + +#- name: remove the remote workspace +# file: +# path: "{{remote_workspace}}" +# state: absent + +############################################################################################################################ + +# Restart the backend container to reload schema + +############################################################################################################################ + +#- name: query backend service +# ecs_service_info: +# cluster: bento-{{ tier }} +# service: bento-{{ tier }}-backend +# details: true +# region: "{{ region }}" +# register: service_backend + +############################################################################################################################ + +#- name: set backend facts +# set_fact: +# backend_task_definition: "{{ service_backend.services[0].taskDefinition }}" +# lb_backend: "{{ service_backend.services[0].loadBalancers }}" +# role_arn: "{{ service_backend.services[0].roleArn }}" + +############################################################################################################################ + +#- name: recreate backend service +# ecs_service: +# state: present +# name: bento-{{ tier }}-backend +# cluster: bento-{{ tier }} +# task_definition: "{{ backend_task_definition }}" +# role: "{{ role_arn }}" +# force_new_deployment: yes +# deployment_configuration: +# minimum_healthy_percent: 0 +# maximum_percent: 100 +# desired_count: 1 +# load_balancers: "{{ lb_backend }}" +# region: "{{ region }}" +# register: service_backend_output + diff --git a/ansible/roles/neo4j-loader/tests/inventory b/ansible/roles/neo4j-loader/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/neo4j-loader/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/neo4j-loader/tests/test.yml b/ansible/roles/neo4j-loader/tests/test.yml new file mode 100644 index 000000000..2ac52d45f --- /dev/null +++ b/ansible/roles/neo4j-loader/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - neo4j-loader \ No newline at end of file diff --git a/ansible/roles/neo4j-loader/vars/main.yml b/ansible/roles/neo4j-loader/vars/main.yml new file mode 100644 index 000000000..95a29b7ef --- /dev/null +++ b/ansible/roles/neo4j-loader/vars/main.yml @@ -0,0 +1,14 @@ +--- +# vars file for neo4j-loader +backup_directory: /backups +remote_workspace: /tmp/neo4j +s3_bucket_name: "{{ lookup('env','S3_BUCKET') }}" +dump_file_name: "{{ lookup('env','DUMP_FILE') }}" +timestamp: "{{ lookup('pipe','date +%Y-%m-%d-%H-%M-%S') }}" + +tier: "{{ lookup('env','TIER') }}" +region: us-east-1 + +#neo4j_db_name: "{{ 'neo4j' if tier == 'icdc' else 'graph.db' }}" +neo4j_db_name: neo4j +neo4j_admin_cmd: 'neo4j-admin' \ No newline at end of file diff --git a/ansible/roles/neo4j-version-update/README.md b/ansible/roles/neo4j-version-update/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/neo4j-version-update/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/neo4j-version-update/defaults/main.yml b/ansible/roles/neo4j-version-update/defaults/main.yml new file mode 100644 index 000000000..199effa31 --- /dev/null +++ b/ansible/roles/neo4j-version-update/defaults/main.yml @@ -0,0 +1,3 @@ +--- +# defaults file +neo4j_edition: 'community' \ No newline at end of file diff --git a/ansible/roles/neo4j-version-update/handlers/main.yml b/ansible/roles/neo4j-version-update/handlers/main.yml new file mode 100644 index 000000000..5a1e9d0a3 --- /dev/null +++ b/ansible/roles/neo4j-version-update/handlers/main.yml @@ -0,0 +1,6 @@ +--- +# handlers file +- name: yum-clean-metadata + command: yum clean metadata + args: + warn: no \ No newline at end of file diff --git a/ansible/roles/neo4j-version-update/meta/main.yml b/ansible/roles/neo4j-version-update/meta/main.yml new file mode 100644 index 000000000..5d50bf41b --- /dev/null +++ b/ansible/roles/neo4j-version-update/meta/main.yml @@ -0,0 +1,60 @@ +galaxy_info: + author: your name + description: your description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Some suggested licenses: + # - BSD (default) + # - MIT + # - GPLv2 + # - GPLv3 + # - Apache + # - CC-BY + license: license (GPLv2, CC-BY, etc) + + min_ansible_version: 2.4 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # Optionally specify the branch Galaxy will use when accessing the GitHub + # repo for this role. During role install, if no tags are available, + # Galaxy will use this branch. During import Galaxy will access files on + # this branch. If Travis integration is configured, only notifications for this + # branch will be accepted. Otherwise, in all cases, the repo's default branch + # (usually master) will be used. + #github_branch: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. \ No newline at end of file diff --git a/ansible/roles/neo4j-version-update/tasks/main.yml b/ansible/roles/neo4j-version-update/tasks/main.yml new file mode 100644 index 000000000..25eb485a7 --- /dev/null +++ b/ansible/roles/neo4j-version-update/tasks/main.yml @@ -0,0 +1,147 @@ +--- +# tasks file for neo4j update +- name: Remove neo4j repository (and clean up left-over metadata) + yum_repository: + name: neo4j + state: absent + notify: yum-clean-metadata + +- name: add neo4j repository + yum_repository: + name: neo4j + description: neo4j repository + file: neo4j + baseurl: https://yum.neo4j.com/stable + gpgcheck: yes + enabled: yes + +- name: disable and stop neo4j + service: + name: neo4j + state: stopped + enabled: no + +- name: create neo4j cert backup directory + file: + path: /tmp/neo4j_certs + state: directory + +# - name: copy neo4j certs +# copy: +# remote_src: yes +# src: "{{ item.src }}" +# dest: "{{ item.dest }}" +# owner: neo4j +# group: neo4j +# with_items: +# - {src: '/var/lib/neo4j/certificates/neo4j.cert',dest: '/tmp/neo4j_certs/neo4j.cert'} +# - {src: '/var/lib/neo4j/certificates/neo4j.key',dest: '/tmp/neo4j_certs/neo4j.key'} + +#- name: verify installed neo4j version +# shell: yum list installed neo4j* +# register: neo4j_installed + +#- name: version output +# debug: +# msg: "{{ neo4j_installed.stdout_lines }}" + +- name: remove neo4j + shell: yum -y autoremove neo4j* java* + +- name: clean old neo4j files + file: + path: "{{ item.path }}" + state: absent + + with_items: + - {path: '/var/lib/neo4j/'} + - {path: '/etc/neo4j/'} + +- name: install neo4j community {{ neo4j_version }} and java 11 JRE + yum: + name: + - java-11-openjdk + - neo4j-{{ neo4j_version }} + state: installed + disable_gpg_check: yes + when: neo4j_edition == 'community' + +- name: install neo4j {{ neo4j_version }} + shell: NEO4J_ACCEPT_LICENSE_AGREEMENT=yes yum -y install neo4j-{{ neo4j_version }} + when: neo4j_edition == 'enterprise' + +#- name: verify installed neo4j version +# shell: yum list installed neo4j* +# register: neo4j_installed + +#- name: version output +# debug: +# msg: "{{ neo4j_installed.stdout_lines }}" + +- name: copy neo4j cert and key + copy: + remote_src: yes + src: "{{ item.src }}" + dest: "{{ item.dest }}" + owner: neo4j + group: neo4j + with_items: + - {src: /tmp/neo4j_certs/neo4j.cert ,dest: /var/lib/neo4j/certificates/neo4j.cert} + - {src: /tmp/neo4j_certs/neo4j.key,dest: /var/lib/neo4j/certificates/neo4j.key} + +- name: change neo4j parameters + lineinfile: + path: /etc/neo4j/neo4j.conf + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + state: present + with_items: + #- { regexp: '^#dbms.memory.heap.initial_size=512m', line: 'dbms.memory.heap.initial_size=5632m' } + #- { regexp: '^#dbms.memory.heap.max_size=512m', line: 'dbms.memory.heap.max_size=5632m' } + - { regexp: '^#dbms.allow_upgrade=true', line: 'dbms.allow_upgrade=true' } + - { regexp: '^#dbms.default_listen_address=0.0.0.0', line: 'dbms.default_listen_address=0.0.0.0' } + - { regexp: '^#dbms.connector.bolt.tls_level=DISABLED', line: 'dbms.connector.bolt.tls_level=OPTIONAL' } + - { regexp: '^#dbms.connector.bolt.listen_address=:7687', line: 'dbms.connector.bolt.listen_address=:7687' } + - { regexp: '^#dbms.ssl.policy.bolt.enabled=true', line: 'dbms.ssl.policy.bolt.enabled=true' } + - { regexp: '^#dbms.ssl.policy.bolt.base_directory=certificates/bolt', line: 'dbms.ssl.policy.bolt.base_directory=certificates' } + - { regexp: '^#dbms.ssl.policy.bolt.private_key=private.key', line: 'dbms.ssl.policy.bolt.private_key=neo4j.key' } + - { regexp: '^#dbms.ssl.policy.bolt.public_certificate=public.crt', line: 'dbms.ssl.policy.bolt.public_certificate=neo4j.cert' } + - { regexp: '^#dbms.ssl.policy.bolt.client_auth=NONE', line: 'dbms.ssl.policy.bolt.client_auth=NONE' } + #- { regexp: '^#dbms.backup.enabled=true', line: 'dbms.backup.enabled=true' } + #- { regexp: '^#dbms.logs.query.page_logging_enabled=true', line: 'dbms.logs.query.page_logging_enabled=true' } + #- { regexp: '^#dbms.logs.security.level=INFO', line: 'dbms.logs.security.level=INFO' } + #- { regexp: '^#dbms.allow_upgrade=true', line: 'dbms.allow_upgrade=true' } + #- { regexp: '^#dbms.logs.query.enabled=true', line: 'dbms.logs.query.enabled=true' } + #- { regexp: '^#dbms.logs.query.time_logging_enabled=true', line: 'dbms.logs.query.time_logging_enabled=true' } + #- { regexp: '^#dbms.logs.query.parameter_logging_enabled=true', line: 'dbms.logs.query.parameter_logging_enabled=true' } + +#- name: Enable neo4j metrics +# lineinfile: +# state: present +# path: /etc/neo4j/neo4j.conf +# insertafter: 'EOF' +# line: '{{ item }}' +# loop: +# - metrics.enabled=true +# - metrics.neo4j.enabled=true +# - metrics.neo4j.tx.enabled=true +# - metrics.neo4j.pagecache.enabled=true +# - metrics.neo4j.counts.enabled=true +# - metrics.neo4j.network.enabled=true + +- name: add APOC + copy: + remote_src: yes + src: "/var/lib/neo4j/labs/apoc-{{ apoc_version }}.jar" + dest: "/var/lib/neo4j/plugins/apoc-{{ apoc_version }}.jar" + owner: neo4j + group: neo4j + +- name: update default neo4j password + shell: "neo4j-admin set-initial-password {{ neo4j_password }}" + +- name: start and enable neo4j service + service: + name: neo4j + state: started + enabled: yes \ No newline at end of file diff --git a/ansible/roles/neo4j-version-update/tests/inventory b/ansible/roles/neo4j-version-update/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/neo4j-version-update/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/neo4j-version-update/tests/test.yml b/ansible/roles/neo4j-version-update/tests/test.yml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/ansible/roles/neo4j-version-update/tests/test.yml @@ -0,0 +1 @@ +--- diff --git a/ansible/roles/neo4j-version-update/vars/main.yml b/ansible/roles/neo4j-version-update/vars/main.yml new file mode 100644 index 000000000..fa070d826 --- /dev/null +++ b/ansible/roles/neo4j-version-update/vars/main.yml @@ -0,0 +1,6 @@ +--- +# vars file for neo4j +neo4j_password: "{{ lookup('env','NEO4J_PASS') }}" +neo4j_version: "{{ 'enterprise-4.2.13' if neo4j_edition == 'enterprise' else '4.2.13' }}" +#neo4j_version: '4.2.8' +apoc_version: "4.2.0.9-core" \ No newline at end of file diff --git a/ansible/roles/neo4j/defaults/main.yml b/ansible/roles/neo4j/defaults/main.yml index 48311bef4..692fb0df3 100644 --- a/ansible/roles/neo4j/defaults/main.yml +++ b/ansible/roles/neo4j/defaults/main.yml @@ -1,11 +1,4 @@ -$ANSIBLE_VAULT;1.1;AES256 -36316364393631633564383639633663346236613734656531366139393531373764636137323033 -3432383063336435643162353035636264393662306531340a636133373231613464376235313932 -30666164666163366236623966303964656263393265623136343230303130653634333765613236 -6433376537343361350a396333623339376530663262656331353562323432623061343032363161 -37393065313836616366333632376464333665396161333235326238643264373866613038326266 -35666236393061313136636438326633336433333435633964663038613665626665396634393438 -61396538633738656261616563303937383033356637366533366565623838353062323234303936 -31663834376339326237343861616538333662666433303433656633613438336239636335666364 -31653439383130313161623234373735363664353830333666366439393631323234303035323462 -3235356365333961333633623536376161373463313133653939 +--- +# defaults file for neo4j +newrelic: yes +neo4j_home: /var/lib/neo4j diff --git a/ansible/roles/neo4j/tasks/main.yml b/ansible/roles/neo4j/tasks/main.yml index a5ef793ca..90c0ad241 100644 --- a/ansible/roles/neo4j/tasks/main.yml +++ b/ansible/roles/neo4j/tasks/main.yml @@ -5,8 +5,10 @@ yum: name: - firewalld - - http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm + - epel-release + - unzip state: latest + disable_gpg_check: yes - name: enable and start firewalld service: @@ -42,19 +44,6 @@ state: started enabled: yes -- name: configure file decriptors for neo4j - pam_limits: - domain: neo4j - limit_type: "{{item.ltype}}" - limit_item: "{{item.litem}}" - value: "{{item.lvalue}}" - with_items: - - { ltype: 'soft', litem: 'nofile', lvalue: 40000 } - - { ltype: 'hard', litem: 'nofile', lvalue: 40000 } - -- name: reload sysctl - command: sysctl --system - - name: open neo4j data ports firewalld: port: "{{item}}/tcp" @@ -102,6 +91,13 @@ with_fileglob: - "{{neo4j_home}}/certificates/neo4j*" +# - name: copy neo4j.conf to /etc/neo4j/neo4j.conf +# template: +# src: neo4j.conf.j2 +# dest: /etc/neo4j/neo4j.conf +# owner: neo4j +# group: neo4j + # - name: copy neo4j cert and key # template: # src: "{{ item.src }}" @@ -125,17 +121,18 @@ path: /etc/neo4j/neo4j.conf regexp: "{{ item.regexp }}" line: "{{ item.line }}" + state: present with_items: - { regexp: '^#dbms.connectors.default_listen_address=0.0.0.0', line: 'dbms.connectors.default_listen_address=0.0.0.0' } # - { regexp: '^#dbms.connector.bolt.listen_address=:7687', line: 'dbms.connector.bolt.listen_address=0.0.0.0:7687' } - - { regexp: '^#bolt.ssl_policy=legacy', line: 'bolt.ssl_policy=default' } - - { regexp: '^#dbms.ssl.policy.default.base_directory=certificates/default', line: 'dbms.ssl.policy.default.base_directory=/var/lib/neo4j/certificates' } - - { regexp: '#dbms.ssl.policy.default.allow_key_generation=false', line: 'dbms.ssl.policy.default.allow_key_generation=false' } - - { regexp: '^#dbms.ssl.policy.default.private_key=', line: 'dbms.ssl.policy.default.private_key=/var/lib/neo4j/certificates/neo4j.key' } - - { regexp: '^#dbms.ssl.policy.default.public_certificate=', line: 'dbms.ssl.policy.default.public_certificate=/var/lib/neo4j/certificates/neo4j.cert' } - - { regexp: '^#dbms.ssl.policy.default.client_auth=require', line: 'dbms.ssl.policy.default.client_auth=NONE' } - - { regexp: '^#dbms.ssl.policy.default.revoked_dir=', line: 'dbms.ssl.policy.default.revoked_dir=/var/lib/neo4j/certificates/revoked' } - - { regexp: '^#dbms.ssl.policy.default.trusted_dir=', line: 'dbms.ssl.policy.default.trusted_dir=/var/lib/neo4j/certificates/trusted' } + # - { regexp: '^#bolt.ssl_policy=legacy', line: 'bolt.ssl_policy=default' } + # - { regexp: '^#dbms.ssl.policy.default.base_directory=certificates/default', line: 'dbms.ssl.policy.default.base_directory=/var/lib/neo4j/certificates' } + # - { regexp: '#dbms.ssl.policy.default.allow_key_generation=false', line: 'dbms.ssl.policy.default.allow_key_generation=false' } + # - { regexp: '^#dbms.ssl.policy.default.private_key=', line: 'dbms.ssl.policy.default.private_key=/var/lib/neo4j/certificates/neo4j.key' } + # - { regexp: '^#dbms.ssl.policy.default.public_certificate=', line: 'dbms.ssl.policy.default.public_certificate=/var/lib/neo4j/certificates/neo4j.cert' } + # - { regexp: '^#dbms.ssl.policy.default.client_auth=require', line: 'dbms.ssl.policy.default.client_auth=NONE' } + # - { regexp: '^#dbms.ssl.policy.default.revoked_dir=', line: 'dbms.ssl.policy.default.revoked_dir=/var/lib/neo4j/certificates/revoked' } + # - { regexp: '^#dbms.ssl.policy.default.trusted_dir=', line: 'dbms.ssl.policy.default.trusted_dir=/var/lib/neo4j/certificates/trusted' } - { regexp: '^#dbms.backup.enabled=true', line: 'dbms.backup.enabled=true' } - { regexp: '^#dbms.logs.query.page_logging_enabled=true', line: 'dbms.logs.query.page_logging_enabled=true' } - { regexp: '^#dbms.logs.security.level=INFO', line: 'dbms.logs.security.level=INFO' } @@ -144,9 +141,9 @@ - { regexp: '^#dbms.logs.query.time_logging_enabled=true', line: 'dbms.logs.query.time_logging_enabled=true' } - { regexp: '^#dbms.logs.query.parameter_logging_enabled=true', line: 'dbms.logs.query.parameter_logging_enabled=true' } - -- name: Ensure the default Apache port is 8080 +- name: Enable neo4j metrics lineinfile: + state: present path: /etc/neo4j/neo4j.conf insertafter: 'EOF' line: '{{ item }}' @@ -164,11 +161,9 @@ line: 'dbms.unmanaged_extension_classes=org.neo4j.graphql=/graphql' regex: 'dbms.directories.data=/var/lib/neo4j/data' insertbefore: 'BOF' + state: present notify: restart neo4j -# - name: change admin password -# command: "curl -H \"Content-Type: application/json\" -XPOST -d '{\"password\":\"{{neo4j_password}}\"}' -u neo4j:neo4j http://localhost:7474/user/neo4j/password" - - name: newrelic task include_tasks: newrelic.yml when: newrelic is defined \ No newline at end of file diff --git a/ansible/roles/neo4j/tasks/newrelic.yml b/ansible/roles/neo4j/tasks/newrelic.yml index a3c55f750..bc6701afd 100644 --- a/ansible/roles/neo4j/tasks/newrelic.yml +++ b/ansible/roles/neo4j/tasks/newrelic.yml @@ -1,47 +1,87 @@ -- name: download nodejs rpm script +# - name: download nodejs rpm script +# get_url: +# url: https://rpm.nodesource.com/setup_{{node_version}}.x +# dest: /tmp/nodejs.sh +# mode: 0755 + +# - name: run the nodejs.sh script +# shell: /tmp/nodejs.sh + +# - name: cleanup nodejs.sh file +# file: +# path: /tmp/nodejs.sh +# state: absent + +# - name: install nodes and npm +# yum: +# name: nodejs +# state: installed + +# - name: install newrelic neo4j plugin +# npm: +# name: newrelic-neo4j +# global: yes +# unsafe_perm: yes + +# - name: configure newrelic-neo4j +# template: +# src: newrelic-neo4j.js.j2 +# dest: /etc/newrelic/newrelic-neo4j.js + +# - name: configure newrelic-neo4j service +# template: +# src: newrelic-neo4j.service.j2 +# dest: /etc/systemd/system/newrelic-neo4j.service + +# - name: reload systemd +# systemd: +# daemon_reload: yes + +# - name: enable and start newrelic-neo4j +# systemd: +# name: newrelic-neo4j +# state: started +# enabled: yes + +# - name: run newrelic-neo4j +# command: newrelic-neo4j >/dev/null 2>&1 & + +- name: download newrelic apm agent get_url: - url: https://rpm.nodesource.com/setup_{{node_version}}.x - dest: /tmp/nodejs.sh - mode: 0755 + url: http://download.newrelic.com/newrelic/java-agent/newrelic-agent/current/newrelic-java.zip + dest: /var/lib/neo4j -- name: run the nodejs.sh script - shell: /tmp/nodejs.sh +- name: unzip newrelic-java.zip + unarchive: + src: /var/lib/neo4j/newrelic-java.zip + dest: /var/lib/neo4j + remote_src: yes + owner: neo4j + group: neo4j -- name: cleanup nodejs.sh file +- name: remove newrelic-java.zip file: - path: /tmp/nodejs.sh + path: /var/lib/neo4j/newrelic-java.zip state: absent + +- name: update newrelic.yml with license info + lineinfile: + path: /var/lib/neo4j/newrelic/newrelic.yml + regexp: '^ license_key' + line: ' license_key: {{newrelic_license_key}}' + state: present -- name: install nodes and npm - yum: - name: nodejs - state: installed - -- name: install newrelic neo4j plugin - npm: - name: newrelic-neo4j - global: yes - unsafe_perm: yes - -- name: configure newrelic-neo4j - template: - src: newrelic-neo4j.js.j2 - dest: /etc/newrelic/newrelic-neo4j.js - -- name: configure newrelic-neo4j service - template: - src: newrelic-neo4j.service.j2 - dest: /etc/systemd/system/newrelic-neo4j.service - -- name: reload systemd - systemd: - daemon_reload: yes - -- name: enable and start newrelic-neo4j - systemd: - name: newrelic-neo4j - state: started - enabled: yes - -# - name: run newrelic-neo4j -# command: newrelic-neo4j >/dev/null 2>&1 & \ No newline at end of file +- name: update newrelic.yml with app_name + lineinfile: + path: /var/lib/neo4j/newrelic/newrelic.yml + insertafter: '# The first application name must be unique.' + regexp: '^ app_name: My Application$' + state: present + line: ' app_name: {{env}}-neo4j' + +- name: change neo4j parameters + lineinfile: + path: /etc/neo4j/neo4j.conf + line: dbms.jvm.additional=-javaagent:/var/lib/neo4j/newrelic/newrelic.jar + state: present + notify: restart neo4j \ No newline at end of file diff --git a/ansible/roles/neo4j/templates/neo4j.conf.j2 b/ansible/roles/neo4j/templates/neo4j.conf.j2 new file mode 100644 index 000000000..faa235325 --- /dev/null +++ b/ansible/roles/neo4j/templates/neo4j.conf.j2 @@ -0,0 +1,816 @@ + +#***************************************************************** +# Neo4j configuration +# +# For more details and a complete list of settings, please see +# https://neo4j.com/docs/operations-manual/current/reference/configuration-settings/ +#***************************************************************** + +# The name of the database to mount. Note that this is *not* to be confused with +# the causal_clustering.database setting, used to specify a logical database +# name when creating a multi-clustering deployment. +#dbms.active_database=graph.db + +# Paths of directories in the installation. +dbms.unmanaged_extension_classes=org.neo4j.graphql=/graphql +dbms.directories.plugins=/var/lib/neo4j/plugins +dbms.directories.certificates=/var/lib/neo4j/certificates +dbms.directories.logs=/var/log/neo4j +dbms.directories.lib=/usr/share/neo4j/lib +dbms.directories.run=/var/run/neo4j +dbms.directories.metrics=/var/lib/neo4j/metrics + +# This setting constrains all `LOAD CSV` import files to be under the `import` directory. Remove or comment it out to +# allow files to be loaded from anywhere in the filesystem; this introduces possible security problems. See the +# `LOAD CSV` section of the manual for details. +dbms.directories.import=/var/lib/neo4j/import + +# Whether requests to Neo4j are authenticated. +# To disable authentication, uncomment this line +#dbms.security.auth_enabled=false + +# Enable this to be able to upgrade a store from an older version. +#dbms.allow_upgrade=true + +# Java Heap Size: by default the Java heap size is dynamically +# calculated based on available system resources. +# Uncomment these lines to set specific initial and maximum +# heap size. +dbms.memory.heap.initial_size={{heap_min_size}} +dbms.memory.heap.max_size={{heap_max_size}} + +# The amount of memory to use for mapping the store files, in bytes (or +# kilobytes with the 'k' suffix, megabytes with 'm' and gigabytes with 'g'). +# If Neo4j is running on a dedicated server, then it is generally recommended +# to leave about 2-4 gigabytes for the operating system, give the JVM enough +# heap to hold all your transaction state and query context, and then leave the +# rest for the page cache. +# The default page cache memory assumes the machine is dedicated to running +# Neo4j, and is heuristically set to 50% of RAM minus the max Java heap size. +#dbms.memory.pagecache.size=10g + +# Enable online backups to be taken from this database. +dbms.backup.enabled=true + +# By default the backup service will only listen on localhost. +# To enable remote backups you will have to bind to an external +# network interface (e.g. 0.0.0.0 for all interfaces). +# The protocol running varies depending on deployment. In a Causal Clustering environment this is the +# same protocol that runs on causal_clustering.transaction_listen_address. +#dbms.backup.address=0.0.0.0:6362 + +# Enable encryption on the backup service for CC instances (does not work for single-instance or HA clusters) +#dbms.backup.ssl_policy=backup + +#***************************************************************** +# Network connector configuration +#***************************************************************** + +# With default configuration Neo4j only accepts local connections. +# To accept non-local connections, uncomment this line: +dbms.connectors.default_listen_address=0.0.0.0 + +# You can also choose a specific network interface, and configure a non-default +# port for each connector, by setting their individual listen_address. + +# The address at which this server can be reached by its clients. This may be the server's IP address or DNS name, or +# it may be the address of a reverse proxy which sits in front of the server. This setting may be overridden for +# individual connectors below. +#dbms.connectors.default_advertised_address=localhost + +# You can also choose a specific advertised hostname or IP address, and +# configure an advertised port for each connector, by setting their +# individual advertised_address. + +# Bolt connector +dbms.connector.bolt.enabled=true +#dbms.connector.bolt.tls_level=OPTIONAL +#dbms.connector.bolt.listen_address=:7687 + +# HTTP Connector. There can be zero or one HTTP connectors. +dbms.connector.http.enabled=true +#dbms.connector.http.listen_address=:7474 + +# HTTPS Connector. There can be zero or one HTTPS connectors. +dbms.connector.https.enabled=true +#dbms.connector.https.listen_address=:7473 + +# Number of Neo4j worker threads. +#dbms.threads.worker_count= + +#***************************************************************** +# SSL system configuration +#***************************************************************** + +# Names of the SSL policies to be used for the respective components. + +# The legacy policy is a special policy which is not defined in +# the policy configuration section, but rather derives from +# dbms.directories.certificates and associated files +# (by default: neo4j.key and neo4j.cert). Its use will be deprecated. + +# The policies to be used for connectors. +# +# N.B: Note that a connector must be configured to support/require +# SSL/TLS for the policy to actually be utilized. +# +# see: dbms.connector.*.tls_level + +#bolt.ssl_policy=legacy +#https.ssl_policy=legacy + +# For a causal cluster the configuring of a policy mandates its use. + +#causal_clustering.ssl_policy= + +#***************************************************************** +# SSL policy configuration +#***************************************************************** + +# Each policy is configured under a separate namespace, e.g. +# dbms.ssl.policy..* +# +# The example settings below are for a new policy named 'default'. + +# The base directory for cryptographic objects. Each policy will by +# default look for its associated objects (keys, certificates, ...) +# under the base directory. +# +# Every such setting can be overridden using a full path to +# the respective object, but every policy will by default look +# for cryptographic objects in its base location. +# +# Mandatory setting + +#dbms.ssl.policy.default.base_directory=certificates/default + +# Allows the generation of a fresh private key and a self-signed +# certificate if none are found in the expected locations. It is +# recommended to turn this off again after keys have been generated. +# +# Keys should in general be generated and distributed offline +# by a trusted certificate authority (CA) and not by utilizing +# this mode. + +#dbms.ssl.policy.default.allow_key_generation=false + +# Enabling this makes it so that this policy ignores the contents +# of the trusted_dir and simply resorts to trusting everything. +# +# Use of this mode is discouraged. It would offer encryption but no security. + +#dbms.ssl.policy.default.trust_all=false + +# The private key for the default SSL policy. By default a file +# named private.key is expected under the base directory of the policy. +# It is mandatory that a key can be found or generated. + +#dbms.ssl.policy.default.private_key= + +# The private key for the default SSL policy. By default a file +# named public.crt is expected under the base directory of the policy. +# It is mandatory that a certificate can be found or generated. + +#dbms.ssl.policy.default.public_certificate= + +# The certificates of trusted parties. By default a directory named +# 'trusted' is expected under the base directory of the policy. It is +# mandatory to create the directory so that it exists, because it cannot +# be auto-created (for security purposes). +# +# To enforce client authentication client_auth must be set to 'require'! + +#dbms.ssl.policy.default.trusted_dir= + +# Certificate Revocation Lists (CRLs). By default a directory named +# 'revoked' is expected under the base directory of the policy. It is +# mandatory to create the directory so that it exists, because it cannot +# be auto-created (for security purposes). + +#dbms.ssl.policy.default.revoked_dir= + +# Client authentication setting. Values: none, optional, require +# The default is to require client authentication. +# +# Servers are always authenticated unless explicitly overridden +# using the trust_all setting. In a mutual authentication setup this +# should be kept at the default of require and trusted certificates +# must be installed in the trusted_dir. + +#dbms.ssl.policy.default.client_auth=require + +# It is possible to verify the hostname that the client uses +# to connect to the remote server. In order for this to work, the server public +# certificate must have a valid CN and/or matching Subject Alternative Names. + +# Note that this is irrelevant on host side connections (sockets receiving +# connections). + +# To enable hostname verification client side on nodes, set this to true. + +#dbms.ssl.policy.default.verify_hostname=false + +# A comma-separated list of allowed TLS versions. +# By default only TLSv1.2 is allowed. + +#dbms.ssl.policy.default.tls_versions= + +# A comma-separated list of allowed ciphers. +# The default ciphers are the defaults of the JVM platform. + +#dbms.ssl.policy.default.ciphers= + +#***************************************************************** +# Logging configuration +#***************************************************************** + +# To enable HTTP logging, uncomment this line +#dbms.logs.http.enabled=true + +# Number of HTTP logs to keep. +#dbms.logs.http.rotation.keep_number=5 + +# Size of each HTTP log that is kept. +#dbms.logs.http.rotation.size=20m + +# To enable GC Logging, uncomment this line +#dbms.logs.gc.enabled=true + +# GC Logging Options +# see http://docs.oracle.com/cd/E19957-01/819-0084-10/pt_tuningjava.html#wp57013 for more information. +#dbms.logs.gc.options=-XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCApplicationStoppedTime -XX:+PrintPromotionFailure -XX:+PrintTenuringDistribution + +# For Java 9 and newer GC Logging Options +# see https://docs.oracle.com/javase/10/tools/java.htm#JSWOR-GUID-BE93ABDC-999C-4CB5-A88B-1994AAAC74D5 +#dbms.logs.gc.options=-Xlog:gc*,safepoint,age*=trace + +# Number of GC logs to keep. +#dbms.logs.gc.rotation.keep_number=5 + +# Size of each GC log that is kept. +#dbms.logs.gc.rotation.size=20m + +# Log level for the debug log. One of DEBUG, INFO, WARN and ERROR. Be aware that logging at DEBUG level can be very verbose. +#dbms.logs.debug.level=INFO + +# Size threshold for rotation of the debug log. If set to zero then no rotation will occur. Accepts a binary suffix "k", +# "m" or "g". +#dbms.logs.debug.rotation.size=20m + +# Maximum number of history files for the internal log. +#dbms.logs.debug.rotation.keep_number=7 + +# Log executed queries that takes longer than the configured threshold. Enable by uncommenting this line. +dbms.logs.query.enabled=true + +# If the execution of query takes more time than this threshold, the query is logged. If set to zero then all queries +# are logged. +dbms.logs.query.threshold=0 + +# The file size in bytes at which the query log will auto-rotate. If set to zero then no rotation will occur. Accepts a +# binary suffix "k", "m" or "g". +dbms.logs.query.rotation.size=20m + +# Maximum number of history files for the query log. +dbms.logs.query.rotation.keep_number=7 + +# Include parameters for the executed queries being logged (this is enabled by default). +dbms.logs.query.parameter_logging_enabled=true + +# Uncomment this line to include detailed time information for the executed queries being logged: +dbms.logs.query.time_logging_enabled=true + +# Uncomment this line to include bytes allocated by the executed queries being logged: +#dbms.logs.query.allocation_logging_enabled=true + +# Uncomment this line to include page hits and page faults information for the executed queries being logged: +dbms.logs.query.page_logging_enabled=true + +# The security log is always enabled when `dbms.security.auth_enabled=true`, and resides in `logs/security.log`. + +# Log level for the security log. One of DEBUG, INFO, WARN and ERROR. +dbms.logs.security.level=INFO + +# Threshold for rotation of the security log. +#dbms.logs.security.rotation.size=20m + +# Minimum time interval after last rotation of the security log before it may be rotated again. +#dbms.logs.security.rotation.delay=300s + +# Maximum number of history files for the security log. +#dbms.logs.security.rotation.keep_number=7 + +#***************************************************************** +# Causal Clustering Configuration +#***************************************************************** + +# Uncomment and specify these lines for running Neo4j in Causal Clustering mode. +# See the Causal Clustering documentation at https://neo4j.com/docs/ for details. + +# Database mode +# Allowed values: +# CORE - Core member of the cluster, part of the consensus quorum. +# READ_REPLICA - Read replica in the cluster, an eventually-consistent read-only instance of the database. +# To operate this Neo4j instance in Causal Clustering mode as a core member, uncomment this line: +#dbms.mode=CORE + +# Expected number of Core servers in the cluster at formation +#causal_clustering.minimum_core_cluster_size_at_formation=3 + +# Minimum expected number of Core servers in the cluster at runtime. +#causal_clustering.minimum_core_cluster_size_at_runtime=3 + +# A comma-separated list of the address and port for which to reach all other members of the cluster. It must be in the +# host:port format. For each machine in the cluster, the address will usually be the public ip address of that machine. +# The port will be the value used in the setting "causal_clustering.discovery_listen_address". +#causal_clustering.initial_discovery_members=localhost:5000,localhost:5001,localhost:5002 + +# Host and port to bind the cluster member discovery management communication. +# This is the setting to add to the collection of address in causal_clustering.initial_core_cluster_members. +# Use 0.0.0.0 to bind to any network interface on the machine. If you want to only use a specific interface +# (such as a private ip address on AWS, for example) then use that ip address instead. +# If you don't know what value to use here, use this machines ip address. +#causal_clustering.discovery_listen_address=:5000 + +# Network interface and port for the transaction shipping server to listen on. +# Please note that it is also possible to run the backup client against this port so always limit access to it via the +# firewall and configure an ssl policy. If you want to allow for messages to be read from +# any network on this machine, us 0.0.0.0. If you want to constrain communication to a specific network address +# (such as a private ip on AWS, for example) then use that ip address instead. +# If you don't know what value to use here, use this machines ip address. +#causal_clustering.transaction_listen_address=:6000 + +# Network interface and port for the RAFT server to listen on. If you want to allow for messages to be read from +# any network on this machine, us 0.0.0.0. If you want to constrain communication to a specific network address +# (such as a private ip on AWS, for example) then use that ip address instead. +# If you don't know what value to use here, use this machines ip address. +#causal_clustering.raft_listen_address=:7000 + +# List a set of names for groups to which this server should belong. This +# is a comma-separated list and names should only use alphanumericals +# and underscore. This can be used to identify groups of servers in the +# configuration for load balancing and replication policies. +# +# The main intention for this is to group servers, but it is possible to specify +# a unique identifier here as well which might be useful for troubleshooting +# or other special purposes. +#causal_clustering.server_groups= + +#***************************************************************** +# Causal Clustering Load Balancing +#***************************************************************** + +# N.B: Read the online documentation for a thorough explanation! + +# Selects the load balancing plugin that shall be enabled. +#causal_clustering.load_balancing.plugin=server_policies + +####### Examples for "server_policies" plugin ####### + +# Will select all available servers as the default policy, which is the +# policy used when the client does not specify a policy preference. The +# default configuration for the default policy is all(). +#causal_clustering.load_balancing.config.server_policies.default=all() + +# Will select servers in groups 'group1' or 'group2' under the default policy. +#causal_clustering.load_balancing.config.server_policies.default=groups(group1,group2) + +# Slightly more advanced example: +# Will select servers in 'group1', 'group2' or 'group3', but only if there are at least 2. +# This policy will be exposed under the name of 'mypolicy'. +#causal_clustering.load_balancing.config.server_policies.mypolicy=groups(group1,group2,group3) -> min(2) + +# Below will create an even more advanced policy named 'regionA' consisting of several rules +# yielding the following behaviour: +# +# select servers in regionA, if at least 2 are available +# otherwise: select servers in regionA and regionB, if at least 2 are available +# otherwise: select all servers +# +# The intention is to create a policy for a particular region which prefers +# a certain set of local servers, but which will fallback to other regions +# or all available servers as required. +# +# N.B: The following configuration uses the line-continuation character \ +# which allows you to construct an easily readable rule set spanning +# several lines. +# +#causal_clustering.load_balancing.config.server_policies.policyA=\ +#groups(regionA) -> min(2);\ +#groups(regionA,regionB) -> min(2); + +# Note that implicitly the last fallback is to always consider all() servers, +# but this can be prevented by specifying a halt() as the last rule. +# +#causal_clustering.load_balancing.config.server_policies.regionA_only=\ +#groups(regionA);\ +#halt(); + +#***************************************************************** +# Causal Clustering Additional Configuration Options +#***************************************************************** +# The following settings are used less frequently. +# If you don't know what these are, you don't need to change these from their default values. + +# The name of the database being hosted by this server instance. This +# configuration setting may be safely ignored unless deploying a multicluster. +# Instances may be allocated to constituent clusters by assigning them +# distinct database names using this setting. For instance if you had 6 +# instances you could form 2 clusters by assigning half the database name +# "foo", half the name "bar". The setting value must match exactly between +# members of the same cluster. This setting is a one-off: once an instance +# is configured with a database name it may not be changed in future without +# using `neo4j-admin unbind`. +#causal_clustering.database=default + +# Address and port that this machine advertises that it's RAFT server is listening at. Should be a +# specific network address. If you are unsure about what value to use here, use this machine's ip address. +#causal_clustering.raft_advertised_address=:7000 + +# Address and port that this machine advertises that it's transaction shipping server is listening at. Should be a +# specific network address. If you are unsure about what value to use here, use this machine's ip address. +#causal_clustering.transaction_advertised_address=:6000 + +# The time limit within which a new leader election will occur if no messages from the current leader are received. +# Larger values allow for more stable leaders at the expense of longer unavailability times in case of leader +# failures. +#causal_clustering.leader_election_timeout=7s + +# The time limit allowed for a new member to attempt to update its data to match the rest of the cluster. +#causal_clustering.join_catch_up_timeout=10m + +# The size of the batch for streaming entries to other machines while trying to catch up another machine. +#causal_clustering.catchup_batch_size=64 + +# When to pause sending entries to other machines and allow them to catch up. +#causal_clustering.log_shipping_max_lag=256 + +# Raft log pruning frequncy. +#causal_clustering.raft_log_pruning_frequency=10m + +# The size to allow the raft log to grow before rotating. +#causal_clustering.raft_log_rotation_size=250M + +### The following setting is relevant for Edge servers only. +# The interval of pulling updates from Core servers. +#causal_clustering.pull_interval=1s + +# For how long should drivers cache the discovery data from +# the dbms.cluster.routing.getServers() procedure. Defaults to 300s. +#causal_clustering.cluster_routing_ttl=300s + +#***************************************************************** +# HA configuration +#***************************************************************** + +# Uncomment and specify these lines for running Neo4j in High Availability mode. +# See the High Availability documentation at https://neo4j.com/docs/ for details. + +# Database mode +# Allowed values: +# HA - High Availability +# SINGLE - Single mode, default. +# To run in High Availability mode uncomment this line: +#dbms.mode=HA + +# ha.server_id is the number of each instance in the HA cluster. It should be +# an integer (e.g. 1), and should be unique for each cluster instance. +#ha.server_id= + +# ha.initial_hosts is a comma-separated list (without spaces) of the host:port +# where the ha.host.coordination of all instances will be listening. Typically +# this will be the same for all cluster instances. +#ha.initial_hosts=127.0.0.1:5001,127.0.0.1:5002,127.0.0.1:5003 + +# IP and port for this instance to listen on, for communicating cluster status +# information with other instances (also see ha.initial_hosts). The IP +# must be the configured IP address for one of the local interfaces. +#ha.host.coordination=127.0.0.1:5001 + +# IP and port for this instance to listen on, for communicating transaction +# data with other instances (also see ha.initial_hosts). The IP +# must be the configured IP address for one of the local interfaces. +#ha.host.data=127.0.0.1:6001 + +# The interval, in seconds, at which slaves will pull updates from the master. You must comment out +# the option to disable periodic pulling of updates. +#ha.pull_interval=10 + +# Amount of slaves the master will try to push a transaction to upon commit +# (default is 1). The master will optimistically continue and not fail the +# transaction even if it fails to reach the push factor. Setting this to 0 will +# increase write performance when writing through master but could potentially +# lead to branched data (or loss of transaction) if the master goes down. +#ha.tx_push_factor=1 + +# Strategy the master will use when pushing data to slaves (if the push factor +# is greater than 0). There are three options available "fixed_ascending" (default), +# "fixed_descending" or "round_robin". Fixed strategies will start by pushing to +# slaves ordered by server id (accordingly with qualifier) and are useful when +# planning for a stable fail-over based on ids. +#ha.tx_push_strategy=fixed_ascending + +# Policy for how to handle branched data. +#ha.branched_data_policy=keep_all + +# How often heartbeat messages should be sent. Defaults to ha.default_timeout. +#ha.heartbeat_interval=5s + +# How long to wait for heartbeats from other instances before marking them as suspects for failure. +# This value reflects considerations of network latency, expected duration of garbage collection pauses +# and other factors that can delay message sending and processing. Larger values will result in more +# stable masters but also will result in longer waits before a failover in case of master failure. +# This value should not be set to less than twice the ha.heartbeat_interval value otherwise there is a high +# risk of frequent master switches and possibly branched data occurrence. +#ha.heartbeat_timeout=40s + +# If you are using a load-balancer that doesn't support HTTP Auth, you may need to turn off authentication for the +# HA HTTP status endpoint by uncommenting the following line. +#dbms.security.ha_status_auth_enabled=false + +# Whether this instance should only participate as slave in cluster. If set to +# true, it will never be elected as master. +#ha.slave_only=false + +#******************************************************************** +# Security Configuration +#******************************************************************** + +# The authentication and authorization provider that contains both users and roles. +# This can be one of the built-in `native` or `ldap` auth providers, +# or it can be an externally provided plugin, with a custom name prefixed by `plugin`, +# i.e. `plugin-`. +#dbms.security.auth_provider=native + +# The time to live (TTL) for cached authentication and authorization info when using +# external auth providers (LDAP or plugin). Setting the TTL to 0 will +# disable auth caching. +#dbms.security.auth_cache_ttl=10m + +# The maximum capacity for authentication and authorization caches (respectively). +#dbms.security.auth_cache_max_capacity=10000 + +# Set to log successful authentication events to the security log. +# If this is set to `false` only failed authentication events will be logged, which +# could be useful if you find that the successful events spam the logs too much, +# and you do not require full auditing capability. +#dbms.security.log_successful_authentication=true + +#================================================ +# LDAP Auth Provider Configuration +#================================================ + +# URL of LDAP server to use for authentication and authorization. +# The format of the setting is `://:`, where hostname is the only required field. +# The supported values for protocol are `ldap` (default) and `ldaps`. +# The default port for `ldap` is 389 and for `ldaps` 636. +# For example: `ldaps://ldap.example.com:10389`. +# +# NOTE: You may want to consider using STARTTLS (`dbms.security.ldap.use_starttls`) instead of LDAPS +# for secure connections, in which case the correct protocol is `ldap`. +#dbms.security.ldap.host=localhost + +# Use secure communication with the LDAP server using opportunistic TLS. +# First an initial insecure connection will be made with the LDAP server, and then a STARTTLS command +# will be issued to negotiate an upgrade of the connection to TLS before initiating authentication. +#dbms.security.ldap.use_starttls=false + +# The LDAP referral behavior when creating a connection. This is one of `follow`, `ignore` or `throw`. +# `follow` automatically follows any referrals +# `ignore` ignores any referrals +# `throw` throws an exception, which will lead to authentication failure +#dbms.security.ldap.referral=follow + +# The timeout for establishing an LDAP connection. If a connection with the LDAP server cannot be +# established within the given time the attempt is aborted. +# A value of 0 means to use the network protocol's (i.e., TCP's) timeout value. +#dbms.security.ldap.connection_timeout=30s + +# The timeout for an LDAP read request (i.e. search). If the LDAP server does not respond within +# the given time the request will be aborted. A value of 0 means wait for a response indefinitely. +#dbms.security.ldap.read_timeout=30s + +#---------------------------------- +# LDAP Authentication Configuration +#---------------------------------- + +# LDAP authentication mechanism. This is one of `simple` or a SASL mechanism supported by JNDI, +# for example `DIGEST-MD5`. `simple` is basic username +# and password authentication and SASL is used for more advanced mechanisms. See RFC 2251 LDAPv3 +# documentation for more details. +#dbms.security.ldap.authentication.mechanism=simple + +# LDAP user DN template. An LDAP object is referenced by its distinguished name (DN), and a user DN is +# an LDAP fully-qualified unique user identifier. This setting is used to generate an LDAP DN that +# conforms with the LDAP directory's schema from the user principal that is submitted with the +# authentication token when logging in. +# The special token {0} is a placeholder where the user principal will be substituted into the DN string. +#dbms.security.ldap.authentication.user_dn_template=uid={0},ou=users,dc=example,dc=com + +# Determines if the result of authentication via the LDAP server should be cached or not. +# Caching is used to limit the number of LDAP requests that have to be made over the network +# for users that have already been authenticated successfully. A user can be authenticated against +# an existing cache entry (instead of via an LDAP server) as long as it is alive +# (see `dbms.security.auth_cache_ttl`). +# An important consequence of setting this to `true` is that +# Neo4j then needs to cache a hashed version of the credentials in order to perform credentials +# matching. This hashing is done using a cryptographic hash function together with a random salt. +# Preferably a conscious decision should be made if this method is considered acceptable by +# the security standards of the organization in which this Neo4j instance is deployed. +#dbms.security.ldap.authentication.cache_enabled=true + +#---------------------------------- +# LDAP Authorization Configuration +#---------------------------------- +# Authorization is performed by searching the directory for the groups that +# the user is a member of, and then map those groups to Neo4j roles. + +# Perform LDAP search for authorization info using a system account instead of the user's own account. +# +# If this is set to `false` (default), the search for group membership will be performed +# directly after authentication using the LDAP context bound with the user's own account. +# The mapped roles will be cached for the duration of `dbms.security.auth_cache_ttl`, +# and then expire, requiring re-authentication. To avoid frequently having to re-authenticate +# sessions you may want to set a relatively long auth cache expiration time together with this option. +# NOTE: This option will only work if the users are permitted to search for their +# own group membership attributes in the directory. +# +# If this is set to `true`, the search will be performed using a special system account user +# with read access to all the users in the directory. +# You need to specify the username and password using the settings +# `dbms.security.ldap.authorization.system_username` and +# `dbms.security.ldap.authorization.system_password` with this option. +# Note that this account only needs read access to the relevant parts of the LDAP directory +# and does not need to have access rights to Neo4j, or any other systems. +#dbms.security.ldap.authorization.use_system_account=false + +# An LDAP system account username to use for authorization searches when +# `dbms.security.ldap.authorization.use_system_account` is `true`. +# Note that the `dbms.security.ldap.authentication.user_dn_template` will not be applied to this username, +# so you may have to specify a full DN. +#dbms.security.ldap.authorization.system_username= + +# An LDAP system account password to use for authorization searches when +# `dbms.security.ldap.authorization.use_system_account` is `true`. +#dbms.security.ldap.authorization.system_password= + +# The name of the base object or named context to search for user objects when LDAP authorization is enabled. +# A common case is that this matches the last part of `dbms.security.ldap.authentication.user_dn_template`. +#dbms.security.ldap.authorization.user_search_base=ou=users,dc=example,dc=com + +# The LDAP search filter to search for a user principal when LDAP authorization is +# enabled. The filter should contain the placeholder token {0} which will be substituted for the +# user principal. +#dbms.security.ldap.authorization.user_search_filter=(&(objectClass=*)(uid={0})) + +# A list of attribute names on a user object that contains groups to be used for mapping to roles +# when LDAP authorization is enabled. +#dbms.security.ldap.authorization.group_membership_attributes=memberOf + +# An authorization mapping from LDAP group names to Neo4j role names. +# The map should be formatted as a semicolon separated list of key-value pairs, where the +# key is the LDAP group name and the value is a comma separated list of corresponding role names. +# For example: group1=role1;group2=role2;group3=role3,role4,role5 +# +# You could also use whitespaces and quotes around group names to make this mapping more readable, +# for example: dbms.security.ldap.authorization.group_to_role_mapping=\ +# "cn=Neo4j Read Only,cn=users,dc=example,dc=com" = reader; \ +# "cn=Neo4j Read-Write,cn=users,dc=example,dc=com" = publisher; \ +# "cn=Neo4j Schema Manager,cn=users,dc=example,dc=com" = architect; \ +# "cn=Neo4j Administrator,cn=users,dc=example,dc=com" = admin +#dbms.security.ldap.authorization.group_to_role_mapping= + + +#***************************************************************** +# Miscellaneous configuration +#***************************************************************** + +# Enable this to specify a parser other than the default one. +#cypher.default_language_version=3.0 + +# Determines if Cypher will allow using file URLs when loading data using +# `LOAD CSV`. Setting this value to `false` will cause Neo4j to fail `LOAD CSV` +# clauses that load data from the file system. +#dbms.security.allow_csv_import_from_file_urls=true + +# Retention policy for transaction logs needed to perform recovery and backups. +#dbms.tx_log.rotation.retention_policy=7 days + +# Limit the number of IOs the background checkpoint process will consume per second. +# This setting is advisory, is ignored in Neo4j Community Edition, and is followed to +# best effort in Enterprise Edition. +# An IO is in this case a 8 KiB (mostly sequential) write. Limiting the write IO in +# this way will leave more bandwidth in the IO subsystem to service random-read IOs, +# which is important for the response time of queries when the database cannot fit +# entirely in memory. The only drawback of this setting is that longer checkpoint times +# may lead to slightly longer recovery times in case of a database or system crash. +# A lower number means lower IO pressure, and consequently longer checkpoint times. +# The configuration can also be commented out to remove the limitation entirely, and +# let the checkpointer flush data as fast as the hardware will go. +# Set this to -1 to disable the IOPS limit. +# dbms.checkpoint.iops.limit=300 + +# Only allow read operations from this Neo4j instance. This mode still requires +# write access to the directory for lock purposes. +#dbms.read_only=false + +# Comma separated list of JAX-RS packages containing JAX-RS resources, one +# package name for each mountpoint. The listed package names will be loaded +# under the mountpoints specified. Uncomment this line to mount the +# org.neo4j.examples.server.unmanaged.HelloWorldResource.java from +# neo4j-server-examples under /examples/unmanaged, resulting in a final URL of +# http://localhost:7474/examples/unmanaged/helloworld/{nodeId} +#dbms.unmanaged_extension_classes=org.neo4j.examples.server.unmanaged=/examples/unmanaged + +# A comma separated list of procedures and user defined functions that are allowed +# full access to the database through unsupported/insecure internal APIs. +#dbms.security.procedures.unrestricted=my.extensions.example,my.procedures.* + +# A comma separated list of procedures to be loaded by default. +# Leaving this unconfigured will load all procedures found. +#dbms.security.procedures.whitelist=apoc.coll.*,apoc.load.* + +# Specified comma separated list of id types (like node or relationship) that should be reused. +# When some type is specified database will try to reuse corresponding ids as soon as it will be safe to do so. +# Currently only 'node' and 'relationship' types are supported. +# This settings is ignored in Neo4j Community Edition. +#dbms.ids.reuse.types.override=node,relationship + +#******************************************************************** +# JVM Parameters +#******************************************************************** + +# G1GC generally strikes a good balance between throughput and tail +# latency, without too much tuning. +dbms.jvm.additional=-XX:+UseG1GC + +# Have common exceptions keep producing stack traces, so they can be +# debugged regardless of how often logs are rotated. +dbms.jvm.additional=-XX:-OmitStackTraceInFastThrow + +# Make sure that `initmemory` is not only allocated, but committed to +# the process, before starting the database. This reduces memory +# fragmentation, increasing the effectiveness of transparent huge +# pages. It also reduces the possibility of seeing performance drop +# due to heap-growing GC events, where a decrease in available page +# cache leads to an increase in mean IO response time. +# Try reducing the heap memory, if this flag degrades performance. +dbms.jvm.additional=-XX:+AlwaysPreTouch + +# Trust that non-static final fields are really final. +# This allows more optimizations and improves overall performance. +# NOTE: Disable this if you use embedded mode, or have extensions or dependencies that may use reflection or +# serialization to change the value of final fields! +dbms.jvm.additional=-XX:+UnlockExperimentalVMOptions +dbms.jvm.additional=-XX:+TrustFinalNonStaticFields + +# Disable explicit garbage collection, which is occasionally invoked by the JDK itself. +dbms.jvm.additional=-XX:+DisableExplicitGC + +# Remote JMX monitoring, uncomment and adjust the following lines as needed. Absolute paths to jmx.access and +# jmx.password files are required. +# Also make sure to update the jmx.access and jmx.password files with appropriate permission roles and passwords, +# the shipped configuration contains only a read only role called 'monitor' with password 'Neo4j'. +# For more details, see: http://download.oracle.com/javase/8/docs/technotes/guides/management/agent.html +# On Unix based systems the jmx.password file needs to be owned by the user that will run the server, +# and have permissions set to 0600. +# For details on setting these file permissions on Windows see: +# http://docs.oracle.com/javase/8/docs/technotes/guides/management/security-windows.html +#dbms.jvm.additional=-Dcom.sun.management.jmxremote.port=3637 +#dbms.jvm.additional=-Dcom.sun.management.jmxremote.authenticate=true +#dbms.jvm.additional=-Dcom.sun.management.jmxremote.ssl=false +#dbms.jvm.additional=-Dcom.sun.management.jmxremote.password.file=/absolute/path/to/conf/jmx.password +#dbms.jvm.additional=-Dcom.sun.management.jmxremote.access.file=/absolute/path/to/conf/jmx.access + +# Some systems cannot discover host name automatically, and need this line configured: +#dbms.jvm.additional=-Djava.rmi.server.hostname=$THE_NEO4J_SERVER_HOSTNAME + +# Expand Diffie Hellman (DH) key size from default 1024 to 2048 for DH-RSA cipher suites used in server TLS handshakes. +# This is to protect the server from any potential passive eavesdropping. +dbms.jvm.additional=-Djdk.tls.ephemeralDHKeySize=2048 + +# This mitigates a DDoS vector. +dbms.jvm.additional=-Djdk.tls.rejectClientInitiatedRenegotiation=true + +#******************************************************************** +# Wrapper Windows NT/2000/XP Service Properties +#******************************************************************** +# WARNING - Do not modify any of these properties when an application +# using this configuration file has been installed as a service. +# Please uninstall the service before modifying this section. The +# service can then be reinstalled. + +# Name of the service +dbms.windows_service_name=neo4j + +#******************************************************************** +# Other Neo4j system properties +#******************************************************************** +dbms.jvm.additional=-Dunsupported.dbms.udc.source=rpm +dbms.jvm.additional=-javaagent:/var/lib/neo4j/newrelic/newrelic.jar +metrics.enabled=true +metrics.neo4j.enabled=true +metrics.neo4j.tx.enabled=true +metrics.neo4j.pagecache.enabled=true +metrics.neo4j.counts.enabled=true +metrics.neo4j.network.enabled=true \ No newline at end of file diff --git a/ansible/roles/neo4j/templates/newrelic-neo4j.js.j2 b/ansible/roles/neo4j/templates/newrelic-neo4j.js.j2 index a2f11bfbc..f6b13f1cc 100644 --- a/ansible/roles/neo4j/templates/newrelic-neo4j.js.j2 +++ b/ansible/roles/neo4j/templates/newrelic-neo4j.js.j2 @@ -2,7 +2,7 @@ module.exports = { pid: '/var/run/newrelic-neo4j.pid', log: '/var/log/newrelic-neo4j.log', - license: "{{newrelic_key}}", + license: "{{newrelic_license_key}}", name: "neo4j", url: "http://localhost:7474", auth: { diff --git a/ansible/roles/neo4j/vars/main.yml b/ansible/roles/neo4j/vars/main.yml index b3f81ea69..76d6bdb95 100644 --- a/ansible/roles/neo4j/vars/main.yml +++ b/ansible/roles/neo4j/vars/main.yml @@ -1,4 +1,9 @@ --- # vars file for neo4j -neo4j_home: /var/lib/neo4j -graphql_version: 3.5.0.4 \ No newline at end of file +collector_name: "{{ env }}-neo4j" +additional_logs: + - name: "{{ env }} Neo4j Logs" + description: "{{ env }} neo4j logs" + category: "{{env }}/db/neo4j" + path: "/var/log/neo4j/*.log" + filters: "" \ No newline at end of file diff --git a/ansible/roles/newrelic-apm-integrations/.travis.yml b/ansible/roles/newrelic-apm-integrations/.travis.yml new file mode 100644 index 000000000..36bbf6208 --- /dev/null +++ b/ansible/roles/newrelic-apm-integrations/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/ansible/roles/newrelic-apm-integrations/README.md b/ansible/roles/newrelic-apm-integrations/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/newrelic-apm-integrations/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/newrelic-apm-integrations/defaults/main.yml b/ansible/roles/newrelic-apm-integrations/defaults/main.yml new file mode 100644 index 000000000..9bf3b41f4 --- /dev/null +++ b/ansible/roles/newrelic-apm-integrations/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for newrelic-apm-config \ No newline at end of file diff --git a/ansible/roles/newrelic-apm-integrations/handlers/main.yml b/ansible/roles/newrelic-apm-integrations/handlers/main.yml new file mode 100644 index 000000000..2aed11562 --- /dev/null +++ b/ansible/roles/newrelic-apm-integrations/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for newrelic-apm-config \ No newline at end of file diff --git a/ansible/roles/newrelic-apm-integrations/meta/main.yml b/ansible/roles/newrelic-apm-integrations/meta/main.yml new file mode 100644 index 000000000..227ad9c34 --- /dev/null +++ b/ansible/roles/newrelic-apm-integrations/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.9 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/ansible/roles/newrelic-apm-integrations/tasks/main.yml b/ansible/roles/newrelic-apm-integrations/tasks/main.yml new file mode 100644 index 000000000..43c096837 --- /dev/null +++ b/ansible/roles/newrelic-apm-integrations/tasks/main.yml @@ -0,0 +1,82 @@ +--- +# tasks file for newrelic-apm-config +- name: add newrelic-infra gpg key + rpm_key: + state: present + key: https://download.newrelic.com/infrastructure_agent/gpg/newrelic-infra.gpg + +- name: setup newrelic repo + yum_repository: + name: newrelic-infra + description: Newrelic infrastruture repository + baseurl: https://download.newrelic.com/infrastructure_agent/linux/yum/el/7/x86_64/ + + +- name: show groups + debug: + msg: "{{groups[tier]}}" + + +# - name: install newrelic apm packages +# package: +# name: +# - nri-ngix +# state: installed +# when: host is webserver + +# - name: set url fact +# set_fact: +# app_url: "{% if tier == 'prod' %}caninecommons.cancer.gov{% else %}caninecommons-{{ tier }}.cancer.gov{% endif %}" + + +# - name: create nginx-config.yml file +# copy: +# dest: /etc/newrelic-infra/integrations.d/nginx-config.yml +# content: | +# instances: +# - name: nginx-server-metrics +# command: metrics +# arguments: +# status_url: 'http://{{app_url}}/nginx_status' +# status_module: discover +# remote_monitoring: true +# labels: +# env: '{{tier}}' +# role: frontend +# when: host is webserver + +# - name: install newrelic apm packages +# package: +# name: +# - nri-redis +# state: installed +# when: host is neo4j + +# - name: create redis-config.yml +# copy: +# dest: /etc/newrelic-infra/integrations.d/redis-config.yml +# content: | +# instances: +# - name: redis-metrics +# command: metrics +# arguments: +# hostname: {{ansible_fqdn}} +# port: 6379 +# keys: '{"0":[""],"1":[""]}' +# remote_monitoring: true +# use_unix_socket: true +# labels: +# environment: "{{tier}}" + +# - name: redis-inventory +# command: inventory +# arguments: +# hostname: {{ansible_fqdn}} +# port: 6379 +# remote_monitoring: true +# use_unix_socket: true +# labels: +# environment: {{tier}} +# when: host is neo4j + + diff --git a/ansible/roles/newrelic-apm-integrations/templates/nginx-config.yml b/ansible/roles/newrelic-apm-integrations/templates/nginx-config.yml new file mode 100644 index 000000000..682183855 --- /dev/null +++ b/ansible/roles/newrelic-apm-integrations/templates/nginx-config.yml @@ -0,0 +1,10 @@ +instances: + - name: nginx-server-metrics + command: metrics + arguments: + status_url: 'http://{{url}}/nginx_status' + status_module: discover + remote_monitoring: true + labels: + env: '{{tier}}' + role: frontend \ No newline at end of file diff --git a/ansible/roles/newrelic-apm-integrations/tests/inventory b/ansible/roles/newrelic-apm-integrations/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/newrelic-apm-integrations/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/newrelic-apm-integrations/tests/test.yml b/ansible/roles/newrelic-apm-integrations/tests/test.yml new file mode 100644 index 000000000..2de3cd318 --- /dev/null +++ b/ansible/roles/newrelic-apm-integrations/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - newrelic-apm-config \ No newline at end of file diff --git a/ansible/roles/newrelic-apm-integrations/vars/main.yml b/ansible/roles/newrelic-apm-integrations/vars/main.yml new file mode 100644 index 000000000..b23ede45a --- /dev/null +++ b/ansible/roles/newrelic-apm-integrations/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for newrelic-apm-config \ No newline at end of file diff --git a/ansible/roles/newrelic-icdc/README.md b/ansible/roles/newrelic-icdc/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/newrelic-icdc/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/newrelic-icdc/defaults/main.yml b/ansible/roles/newrelic-icdc/defaults/main.yml new file mode 100644 index 000000000..4e801fb8e --- /dev/null +++ b/ansible/roles/newrelic-icdc/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for roles/newrelic diff --git a/ansible/roles/newrelic-icdc/handlers/main.yml b/ansible/roles/newrelic-icdc/handlers/main.yml new file mode 100644 index 000000000..4223f4e46 --- /dev/null +++ b/ansible/roles/newrelic-icdc/handlers/main.yml @@ -0,0 +1,6 @@ +--- +# handlers file for roles/newrelic +- name: restart newrelic-infra + service: + name: newrelic-infra + state: restarted \ No newline at end of file diff --git a/ansible/roles/newrelic-icdc/meta/main.yml b/ansible/roles/newrelic-icdc/meta/main.yml new file mode 100644 index 000000000..5d50bf41b --- /dev/null +++ b/ansible/roles/newrelic-icdc/meta/main.yml @@ -0,0 +1,60 @@ +galaxy_info: + author: your name + description: your description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Some suggested licenses: + # - BSD (default) + # - MIT + # - GPLv2 + # - GPLv3 + # - Apache + # - CC-BY + license: license (GPLv2, CC-BY, etc) + + min_ansible_version: 2.4 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # Optionally specify the branch Galaxy will use when accessing the GitHub + # repo for this role. During role install, if no tags are available, + # Galaxy will use this branch. During import Galaxy will access files on + # this branch. If Travis integration is configured, only notifications for this + # branch will be accepted. Otherwise, in all cases, the repo's default branch + # (usually master) will be used. + #github_branch: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. \ No newline at end of file diff --git a/ansible/roles/newrelic-icdc/tasks/main.yml b/ansible/roles/newrelic-icdc/tasks/main.yml new file mode 100644 index 000000000..e3e47e33c --- /dev/null +++ b/ansible/roles/newrelic-icdc/tasks/main.yml @@ -0,0 +1,59 @@ +--- +- name: add newrelic-infra gpg key + rpm_key: + state: present + key: https://download.newrelic.com/infrastructure_agent/gpg/newrelic-infra.gpg + +- name: setup newrelic repo + yum_repository: + name: newrelic-infra + description: Newrelic infrastruture repository + baseurl: https://download.newrelic.com/infrastructure_agent/linux/yum/el/7/x86_64/ + +- name: install newrelic-infra + package: + name: + - libcap + - newrelic-infra + state: latest + environment: + NRIA_MODE: PRIVILEGED + +- name: copy newrelic config file to /etc/ + template: + src: newrelic-infra.yml.j2 + dest: /etc/newrelic-infra.yml + owner: nri-agent + group: nri-agent + +- name: install nri-nginx + package: + name: + - nri-nginx + state: installed + when: app_type == "app" + +- name: copy nginx config file to /etc/newrelic-infra/integrations.d/ + template: + src: nginx-config.yml.j2 + dest: /etc/newrelic-infra/integrations.d/nginx-config.yml + owner: nri-agent + group: nri-agent + when: app_type == "app" + +- name: add nri-agent to docker group + user: + name: nri-agent + groups: docker + append: yes + +- name: enable and restart newrelic-infra service + service: + name: newrelic-infra + state: restarted + enabled: yes + + + + # https://download.newrelic.com/infrastructure_agent/linux/yum/el/7/x86_64/newrelic-infra.repo + diff --git a/ansible/roles/newrelic-icdc/templates/newrelic-infra.yml.j2 b/ansible/roles/newrelic-icdc/templates/newrelic-infra.yml.j2 new file mode 100644 index 000000000..87d308f59 --- /dev/null +++ b/ansible/roles/newrelic-icdc/templates/newrelic-infra.yml.j2 @@ -0,0 +1,9 @@ +license_key: {{ newrelic_license_key }} +log_file: /var/log/newrelic-infra/newrelic-infra.log +display_name: {{ collector_name }} +collector_url: {{ newrelic_collector_url }} +identity_url: {{ newrelic_identity_url }} +command_channel_url: {{ newrelic_command_channel_url }} +custom_attributes: + label.Project: {{ project }} + label.Environment: {{ env }} \ No newline at end of file diff --git a/ansible/roles/newrelic-icdc/templates/nginx-config.yml.j2 b/ansible/roles/newrelic-icdc/templates/nginx-config.yml.j2 new file mode 100644 index 000000000..8f1134264 --- /dev/null +++ b/ansible/roles/newrelic-icdc/templates/nginx-config.yml.j2 @@ -0,0 +1,12 @@ +integration_name: com.newrelic.nginx + +instances: + - name: nginx-server-metrics + command: metrics + arguments: + status_url: {{ nginx_url }} + status_module: discover + remote_monitoring: true + labels: + Project: {{ project }} + Environment: {{ env }} \ No newline at end of file diff --git a/ansible/roles/newrelic-icdc/tests/inventory b/ansible/roles/newrelic-icdc/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/newrelic-icdc/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/newrelic-icdc/tests/test.yml b/ansible/roles/newrelic-icdc/tests/test.yml new file mode 100644 index 000000000..b9b489f3f --- /dev/null +++ b/ansible/roles/newrelic-icdc/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - roles/newrelic \ No newline at end of file diff --git a/ansible/roles/newrelic-icdc/vars/main.yml b/ansible/roles/newrelic-icdc/vars/main.yml new file mode 100644 index 000000000..acb59198f --- /dev/null +++ b/ansible/roles/newrelic-icdc/vars/main.yml @@ -0,0 +1,10 @@ +--- +# vars file for roles/newrelic +hostname: "{{ inventory_hostname }}" +collector_name: "{{ project }}-{{ env }}-{{ inventory_hostname }}" +newrelic_license_key: "{{ newrelic_license_key }}" +newrelic_collector_url: "https://gov-infra-api.newrelic.com" +newrelic_identity_url: "https://gov-identity-api.newrelic.com" +newrelic_command_channel_url: "https://gov-infrastructure-command-api.newrelic.com" +nginx_lower_tier: "https://caninecommons-{{ env }}.cancer.gov/nginx_status" +nginx_url: "{{ 'https://caninecommons.cancer.gov/nginx_status' if env == 'prod' else nginx_lower_tier }}" \ No newline at end of file diff --git a/ansible/roles/newrelic/tasks/main.yml b/ansible/roles/newrelic/tasks/main.yml index 6043d8535..7d8cd20af 100644 --- a/ansible/roles/newrelic/tasks/main.yml +++ b/ansible/roles/newrelic/tasks/main.yml @@ -1,13 +1,8 @@ --- -# tasks file for roles/newrelic -- name: set hostname - hostname: - name: "{{ hostname }}" - -- name: add newrelic-infra gpg key - rpm_key: - state: present - key: https://download.newrelic.com/infrastructure_agent/gpg/newrelic-infra.gpg +#- name: add newrelic-infra gpg key +# rpm_key: +# state: present +# key: https://download.newrelic.com/infrastructure_agent/gpg/newrelic-infra.gpg - name: setup newrelic repo yum_repository: @@ -15,17 +10,13 @@ description: Newrelic infrastruture repository baseurl: https://download.newrelic.com/infrastructure_agent/linux/yum/el/7/x86_64/ -# - name: update yum cache -# command: yum makecache -# args: -# warn: no - - name: install newrelic-infra package: name: - libcap - newrelic-infra state: latest + disable_gpg_check: yes environment: NRIA_MODE: PRIVILEGED @@ -34,10 +25,16 @@ src: newrelic-infra.yml.j2 dest: /etc/newrelic-infra.yml -- name: enable and start newrelic-infra service +#- name: add nri-agent to docker group +# user: +# name: nri-agent +# groups: docker +# append: yes + +- name: enable and restart newrelic-infra service service: name: newrelic-infra - state: started + state: restarted enabled: yes diff --git a/ansible/roles/newrelic/templates/newrelic-infra.yml.j2 b/ansible/roles/newrelic/templates/newrelic-infra.yml.j2 index a836da0c3..b3520e997 100644 --- a/ansible/roles/newrelic/templates/newrelic-infra.yml.j2 +++ b/ansible/roles/newrelic/templates/newrelic-infra.yml.j2 @@ -1 +1,7 @@ license_key: {{ newrelic_license_key }} +log_file: /var/log/newrelic-infra/newrelic-infra.log +display_name: {{collector_name}} +collector_url: {{newrelic_collector_url}} +identity_url: {{newrelic_identity_url}} +command_channel_url: {{newrelic_command_channel_url}} +fedramp: true \ No newline at end of file diff --git a/ansible/roles/newrelic/vars/main.yml b/ansible/roles/newrelic/vars/main.yml index 068cf5b0a..fdfdcdd67 100644 --- a/ansible/roles/newrelic/vars/main.yml +++ b/ansible/roles/newrelic/vars/main.yml @@ -1,4 +1,10 @@ --- # vars file for roles/newrelic -newrelic_license_key: "{{ newrelic_license_key }}" -hostname: "{{ hostname }}" \ No newline at end of file +newrelic_license_key: "{{ newrelic_key }}" +hostname: "{{ hostname }}" +platform: aws +collector_name: "{{ project }}-{{ platform }}-{{ env }}-{{ app_name }}" +newrelic_license_key: "{{ lookup('aws_ssm', 'newrelic_license_key', region='us-east-1' ) }}" +newrelic_collector_url: "https://gov-infra-api.newrelic.com" +newrelic_identity_url: "https://gov-identity-api.newrelic.com" +newrelic_command_channel_url: "https://gov-infrastructure-command-api.newrelic.com" \ No newline at end of file diff --git a/ansible/roles/old-docker/README.md b/ansible/roles/old-docker/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/old-docker/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/old-docker/defaults/main.yml b/ansible/roles/old-docker/defaults/main.yml new file mode 100644 index 000000000..c45677333 --- /dev/null +++ b/ansible/roles/old-docker/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for docker \ No newline at end of file diff --git a/ansible/roles/old-docker/handlers/main.yml b/ansible/roles/old-docker/handlers/main.yml new file mode 100644 index 000000000..11684bcd3 --- /dev/null +++ b/ansible/roles/old-docker/handlers/main.yml @@ -0,0 +1,6 @@ +--- +# handlers file for docker +- name: restart docker + service: + name: docker + state: restarted diff --git a/ansible/roles/old-docker/meta/main.yml b/ansible/roles/old-docker/meta/main.yml new file mode 100644 index 000000000..5d50bf41b --- /dev/null +++ b/ansible/roles/old-docker/meta/main.yml @@ -0,0 +1,60 @@ +galaxy_info: + author: your name + description: your description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Some suggested licenses: + # - BSD (default) + # - MIT + # - GPLv2 + # - GPLv3 + # - Apache + # - CC-BY + license: license (GPLv2, CC-BY, etc) + + min_ansible_version: 2.4 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # Optionally specify the branch Galaxy will use when accessing the GitHub + # repo for this role. During role install, if no tags are available, + # Galaxy will use this branch. During import Galaxy will access files on + # this branch. If Travis integration is configured, only notifications for this + # branch will be accepted. Otherwise, in all cases, the repo's default branch + # (usually master) will be used. + #github_branch: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. \ No newline at end of file diff --git a/ansible/roles/old-docker/tasks/main.yml b/ansible/roles/old-docker/tasks/main.yml new file mode 100644 index 000000000..35a25a3d8 --- /dev/null +++ b/ansible/roles/old-docker/tasks/main.yml @@ -0,0 +1,99 @@ +--- +- name: Remove other Docker versions + yum: + name: + - docker + - docker-client + - docker-client-latest + - docker-common + - docker-latest + - docker-latest-logrotate + - docker-logrotate + - docker-engine + - docker-compose + state: absent + +# tasks file for docker +- name: install systems packages needed for docker + yum: + name: + - yum-utils + # - epel-release + - device-mapper-persistent-data + - lvm2 + - python-setuptools + - firewalld + # - http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm + state: latest + +- name: enable and start firewalld + service: + name: firewalld + state: started + enabled: yes + +- name: open tcp port 2375 and 2376 + firewalld: + state: enabled + permanent: yes + port: "{{item}}/tcp" + immediate: yes + zone: public + loop: + - 2375 + - 2376 + tags: + - master + +# - name: enable extra repos when running on red hat +# command: subscription-manager repos --enable "rhel-*-optional-rpms" --enable "rhel-*-extras-rpms" +# when: ansible_distribution == 'Red Hat Enterprise Linux' + +- name: install pip and docker-compose + command: "{{ item }}" + with_items: + - "easy_install pip" + - "pip install --upgrade --force-reinstall pip==9.0.3" + - "pip install docker-compose" + +- name: add docker repo + command: > + yum-config-manager --add-repo + https://download.docker.com/linux/centos/docker-ce.repo + +- name: install docker + yum: + name: ['docker-ce', 'docker-ce-cli', 'containerd.io'] + state: latest + +- name: enable and start docker + service: + name: docker + enabled: yes + state: restarted + +- name: create docker systemd options directory + file: + path: /etc/systemd/system/docker.service.d + state: directory + tags: + - master + +- name: configure docker startup options + template: + src: startup-options.conf.j2 + dest: /etc/systemd/system/docker.service.d/startup_options.conf + notify: + - restart docker + tags: + - master + +- name: reload systemctl daemon + systemd: + daemon_reload: yes + + + + + + diff --git a/ansible/roles/old-docker/templates/startup-options.conf.j2 b/ansible/roles/old-docker/templates/startup-options.conf.j2 new file mode 100644 index 000000000..afa83a0aa --- /dev/null +++ b/ansible/roles/old-docker/templates/startup-options.conf.j2 @@ -0,0 +1,3 @@ +[Service] +ExecStart= +ExecStart=/usr/bin/dockerd -H fd:// -H tcp://0.0.0.0:2375 \ No newline at end of file diff --git a/ansible/roles/old-docker/tests/inventory b/ansible/roles/old-docker/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/old-docker/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/old-docker/tests/test.yml b/ansible/roles/old-docker/tests/test.yml new file mode 100644 index 000000000..2c81ca427 --- /dev/null +++ b/ansible/roles/old-docker/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - docker \ No newline at end of file diff --git a/ansible/roles/old-docker/vars/main.yml b/ansible/roles/old-docker/vars/main.yml new file mode 100644 index 000000000..dc934ce9c --- /dev/null +++ b/ansible/roles/old-docker/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for docker \ No newline at end of file diff --git a/ansible/roles/open-target-backend/.travis.yml b/ansible/roles/open-target-backend/.travis.yml new file mode 100644 index 000000000..36bbf6208 --- /dev/null +++ b/ansible/roles/open-target-backend/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/ansible/roles/open-target-backend/README.md b/ansible/roles/open-target-backend/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/open-target-backend/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/open-target-backend/defaults/main.yml b/ansible/roles/open-target-backend/defaults/main.yml new file mode 100644 index 000000000..49460d9e2 --- /dev/null +++ b/ansible/roles/open-target-backend/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for roles/open-target-backend \ No newline at end of file diff --git a/ansible/roles/open-target-backend/files/newrelic.sh b/ansible/roles/open-target-backend/files/newrelic.sh new file mode 100644 index 000000000..56a062e7b --- /dev/null +++ b/ansible/roles/open-target-backend/files/newrelic.sh @@ -0,0 +1,5 @@ +curl -O https://download.newrelic.com/newrelic/java-agent/newrelic-agent/current/newrelic-java.zip +unzip newrelic-java.zip +mkdir -p /usr/local/tomcat/newrelic +cp ./newrelic/newrelic.jar /usr/local/tomcat/newrelic/newrelic.jar +cp ./new-relic/newrelic.yml /usr/local/tomcat/newrelic/newrelic.yml diff --git a/ansible/roles/open-target-backend/handlers/main.yml b/ansible/roles/open-target-backend/handlers/main.yml new file mode 100644 index 000000000..08986b13a --- /dev/null +++ b/ansible/roles/open-target-backend/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for roles/open-target-backend \ No newline at end of file diff --git a/ansible/roles/open-target-backend/meta/main.yml b/ansible/roles/open-target-backend/meta/main.yml new file mode 100644 index 000000000..227ad9c34 --- /dev/null +++ b/ansible/roles/open-target-backend/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.9 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/ansible/roles/open-target-backend/tasks/build.yml b/ansible/roles/open-target-backend/tasks/build.yml new file mode 100644 index 000000000..0bf59a63b --- /dev/null +++ b/ansible/roles/open-target-backend/tasks/build.yml @@ -0,0 +1,121 @@ +--- +# - name: scala +# yum: +# name: http://www.scala-lang.org/files/archive/scala-{{scala_version}}.rpm +# state: present + +# - name: bintray yum repository +# get_url: +# url: https://bintray.com/sbt/rpm/rpm +# dest: /etc/yum.repos.d/bintray-sbt.repo + +# - name: ensure sbt is installed +# yum: +# name: +# - sbt +# - git +# state: present + +# - name: ensure files directories exists +# file: +# path: "{{item}}" +# state: directory +# loop: +# - "{{workspace}}" +# - "{{project_home}}" + +# - name: git clone "{{project}}" +# git: +# repo: "{{project_repo}}" +# dest: "{{workspace}}/backend" +# force: yes +# version: "{{tag}}" + +# - name: build the app +# command: sbt clean compile package + +- name: build the app + command: sbt dist + args: + chdir: "{{workspace}}/ppdc-otp-backend" + +- name: extract build artifacts + unarchive: + src: "{{workspace}}/ppdc-otp-backend/target/universal/{{artifact_dir_name}}.zip" + dest: "{{workspace}}" + +- name: listing the contents after extracting + shell: ls + register: shell_result + args: + chdir: "{{workspace}}" + +- debug: + var: shell_result.stdout_lines + +- name: rename {{artifact_dir_name}} to app + command: mv "{{workspace}}/{{artifact_dir_name}}" "{{workspace}}/app" + +- name: copy config files + command: cp "{{workspace}}/ppdc-otp-backend/production.conf" "{{workspace}}/ppdc-otp-backend/production.xml" "{{workspace}}/app/conf" + +- name: delete bat script + command: rm -rf *.bat + args: + chdir: "{{workspace}}/app/bin" + +#- name: delete bat script +# find: +# paths: "{{workspace}}/app/bin" +# patterns: "*.bat" +# register: files_to_delete + +#- name: Ansible remove .bat script +# file: +# path: "{{ item.path }}" +# state: absent +# with_items: "{{ files_to_delete.files }}" + +- name: listing the contents + shell: ls + register: shell_result + args: + chdir: "{{workspace}}/app/bin" + +- debug: + var: shell_result.stdout_lines + +- name: rename "{{artifact_name}}" script + command: mv "{{artifact_name}}" start + args: + chdir: "{{workspace}}/app/bin" + +- debug: + var: "{{backend_version}}-{{build_number}}" + +- name: log into DockerHub + docker_login: + username: "{{docker_user}}" + password: "{{docker_password}}" + +- name: build cbiitssrepo/{{stack_name}}-backend image + docker_image: + build: + path: "{{workspace}}" + dockerfile: "{{workspace}}/icdc-devops/docker/dockerfiles/open-target-backend" + pull: yes + nocache: yes + name: cbiitssrepo/{{stack_name}}-backend + tag: "{{backend_version}}-{{build_number}}" + push: yes + force_source: yes + source: build + +- name: Add tag latest to cbiitssrepo/{{stack_name}}-backend image + docker_image: + name: "cbiitssrepo/{{stack_name}}-backend:{{backend_version}}-{{build_number}}" + repository: cbiitssrepo/{{stack_name}}-backend:latest + force_tag: yes + push: yes + source: local + diff --git a/ansible/roles/open-target-backend/tasks/common.yml b/ansible/roles/open-target-backend/tasks/common.yml new file mode 100644 index 000000000..a0e7a8494 --- /dev/null +++ b/ansible/roles/open-target-backend/tasks/common.yml @@ -0,0 +1,7 @@ +- name: install epel-release + yum: + name: + - epel-release + - yum-utils + - java-11-openjdk-devel + - python3 diff --git a/ansible/roles/open-target-backend/tasks/database.yml b/ansible/roles/open-target-backend/tasks/database.yml new file mode 100644 index 000000000..c3c0a9a2b --- /dev/null +++ b/ansible/roles/open-target-backend/tasks/database.yml @@ -0,0 +1,96 @@ +--- +# tasks file for roles/open-target-backend + +- name: add click house and elasticsearch public rpm gpg key + rpm_key: + state: present + key: "{{item}}" + loop: + - https://repo.clickhouse.tech/CLICKHOUSE-KEY.GPG + - https://artifacts.elastic.co/GPG-KEY-elasticsearch + +- name: add clickhouse repo + yum_repository: + name: "{{item.name}}" + description: Clickhouse repo + file: external_repos + baseurl: "{{item.repo}}" + loop: + - {repo: 'https://repo.clickhouse.tech/rpm/stable/x86_64/',name: clickhouse} + - {repo: 'https://artifacts.elastic.co/packages/7.x/yum/',name: elasticsearch} + +- name: install clickhouse and elasticsearch + yum: + name: + - clickhouse-server + - clickhouse-client + - elasticsearch-7.9.0-1.x86_64 + +- name: updating elasticsearch config files + lineinfile: + path: /etc/elasticsearch/elasticsearch.yml + line: "{{ item }}" + with_items: + - 'node.name: node-1' + - 'network.host: 0.0.0.0' + - 'http.port: 9200' + - 'cluster.initial_master_nodes: ["node-1"]' + +- name: updating clickhouse-server config files + replace: + path: /etc/clickhouse-server/config.xml + regexp: '' + replace: ':: ' + +- name: updating elasticsearch memory configurations + lineinfile: + path: /etc/sysconfig/elasticsearch + line: "{{ item }}" + with_items: + - 'ES_HEAP_SIZE=2g' + - 'MAX_OPEN_FILES=65535' + - 'MAX_LOCKED_MEMORY=unlimited' + +- name: updating elasticsearch to increase initial memory configurations -Xms4g + replace: + path: /etc/elasticsearch/jvm.options + regexp: '-Xms1g' + replace: '-Xms4g' + +- name: updating elasticsearch to increase initial memory configurations -Xmx4g + replace: + path: /etc/elasticsearch/jvm.options + regexp: '-Xmx1g' + replace: '-Xmx4g' + + +- name: updating Dlog4j2.formatMsgNoLookups=true in elasticsearch jvm.options + lineinfile: + path: /etc/elasticsearch/jvm.options + line: "{{ item }}" + with_items: + - '-Dlog4j2.formatMsgNoLookups=true' + +- name: reload systemd config + systemd: daemon_reload=yes + +- name: enable service elasticsearch and ensure it is not masked + systemd: + name: elasticsearch + enabled: yes + masked: no + +- name: start elasticsearch + systemd: + state: started + name: elasticsearch + +- name: enable and start clickhouse + service: + name: clickhouse-server + enabled: yes + state: started + + + + diff --git a/ansible/roles/open-target-backend/tasks/deploy.yml b/ansible/roles/open-target-backend/tasks/deploy.yml new file mode 100644 index 000000000..4a5d27ebb --- /dev/null +++ b/ansible/roles/open-target-backend/tasks/deploy.yml @@ -0,0 +1,28 @@ +--- +- name: log into DockerHub + docker_login: + username: "{{ docker_user }}" + password: "{{ docker_password }}" + +- name: remove frontend container + docker_container: + name: "{{stack_name}}-backend" + state: absent + +- name: clean up + command: docker system prune -a -f + +- name: launch the new ppdc-otp + docker_container: + name: "{{stack_name}}-backend" + image: cbiitssrepo/{{stack_name}}-backend:{{backend_version}}-{{build_number}} + restart_policy: always + ports: + - "8080:8080" + env: + SLICK_CLICKHOUSE_URL: jdbc:clickhouse://{{slick_clickhouse_url}}:8123 + ELASTICSEARCH_HOST: "{{elasticsearch_host}}" + PLAY_PORT: "{{play_port}}" + NEW_RELIC_APP_NAME: "{{newrelic_appname}}" + NEW_RELIC_LICENSE_KEY: "{{newrelic_license}}" + NEW_RELIC_HOST: "{{newrelic_host}}" \ No newline at end of file diff --git a/ansible/roles/open-target-backend/tasks/main.yml b/ansible/roles/open-target-backend/tasks/main.yml new file mode 100644 index 000000000..e69de29bb diff --git a/ansible/roles/open-target-backend/tests/inventory b/ansible/roles/open-target-backend/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/open-target-backend/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/open-target-backend/tests/test.yml b/ansible/roles/open-target-backend/tests/test.yml new file mode 100644 index 000000000..e7b5689ea --- /dev/null +++ b/ansible/roles/open-target-backend/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - roles/open-target-backend \ No newline at end of file diff --git a/ansible/roles/open-target-backend/vars/main.yml b/ansible/roles/open-target-backend/vars/main.yml new file mode 100644 index 000000000..727805c89 --- /dev/null +++ b/ansible/roles/open-target-backend/vars/main.yml @@ -0,0 +1,18 @@ +--- +# vars file for roles/open-target-backend +scala_version: 2.13.5 +project_repo: https://github.com/CBIIT/ppdc-otp-backend.git +workspace: "{{ lookup('env','WORKSPACE') }}" +artifact_dir_name: ot-platform-api-beta-latest +stack_name: open-target +artifact_name: ot-platform-api-beta +backend_version: "{{ lookup('env','BE_VERSION') }}" +build_number: "{{ lookup('env','BUILD_NUMBER')}}" +docker_user: "{{ lookup('env','DOCKER_USER') }}" +docker_password: "{{ lookup('env','DOCKER_PASSWORD') }}" +elasticsearch_host: "{{ lookup('env','ELASTICSEARCH_HOST') }}" +slick_clickhouse_url: "{{ lookup('env','SLICK_CLICKHOUSE_URL') }}" +play_port: "{{lookup('env','PLAY_PORT') }}" +newrelic_appname: "{{ lookup('env','NEW_RELIC_APP_NAME') }}" +newrelic_license: "{{ lookup('env','NEW_RELIC_LICENSE_KEY') }}" +newrelic_host: "{{ lookup('env','NEW_RELIC_HOST') }}" \ No newline at end of file diff --git a/ansible/roles/ppdc-otg/.travis.yml b/ansible/roles/ppdc-otg/.travis.yml new file mode 100644 index 000000000..36bbf6208 --- /dev/null +++ b/ansible/roles/ppdc-otg/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/ansible/roles/ppdc-otg/README.md b/ansible/roles/ppdc-otg/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/ppdc-otg/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/ppdc-otg/defaults/main.yml b/ansible/roles/ppdc-otg/defaults/main.yml new file mode 100644 index 000000000..cf9baabb1 --- /dev/null +++ b/ansible/roles/ppdc-otg/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for ppdc-otg \ No newline at end of file diff --git a/ansible/roles/ppdc-otg/files/inject.template.js b/ansible/roles/ppdc-otg/files/inject.template.js new file mode 100644 index 000000000..9c7915530 --- /dev/null +++ b/ansible/roles/ppdc-otg/files/inject.template.js @@ -0,0 +1,11 @@ +window.injectedEnv = { + REACT_APP_BACKEND_GETUSERINFO_API: '${REACT_APP_BACKEND_GETUSERINFO_API}', + REACT_APP_LOGIN_URL: '${REACT_APP_LOGIN_URL}', + REACT_APP_USER_LOGOUT_URL: '${REACT_APP_USER_LOGOUT_URL}', + REACT_APP_BACKEND_API: '${REACT_APP_BACKEND_API}', + REACT_APP_ABOUT_CONTENT_URL: '${REACT_APP_ABOUT_CONTENT_URL}', + REACT_APP_BE_VERSION: '${REACT_APP_BE_VERSION}', + REACT_APP_FE_VERSION: '${REACT_APP_FE_VERSION}', + REACT_APP_APPLICATION_VERSION: '${REACT_APP_APPLICATION_VERSION}', + REACT_APP_GA_TRACKING_ID: '${REACT_APP_GA_TRACKING_ID}', +}; diff --git a/ansible/roles/ppdc-otg/files/nginx-entrypoint.sh b/ansible/roles/ppdc-otg/files/nginx-entrypoint.sh new file mode 100755 index 000000000..70875490e --- /dev/null +++ b/ansible/roles/ppdc-otg/files/nginx-entrypoint.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +WWW_DIR=/usr/share/nginx/html +INJECT_FILE_SRC="${WWW_DIR}/inject.template.js" +INJECT_FILE_DST="${WWW_DIR}/injectEnv.js" +envsubst < "${INJECT_FILE_SRC}" > "${INJECT_FILE_DST}" + +[ -z "$@" ] && nginx -g 'daemon off;' || $@ \ No newline at end of file diff --git a/ansible/roles/ppdc-otg/handlers/main.yml b/ansible/roles/ppdc-otg/handlers/main.yml new file mode 100644 index 000000000..e36a3cae6 --- /dev/null +++ b/ansible/roles/ppdc-otg/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for ppdc-otg \ No newline at end of file diff --git a/ansible/roles/ppdc-otg/meta/main.yml b/ansible/roles/ppdc-otg/meta/main.yml new file mode 100644 index 000000000..227ad9c34 --- /dev/null +++ b/ansible/roles/ppdc-otg/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.9 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/ansible/roles/ppdc-otg/tasks/build.yml b/ansible/roles/ppdc-otg/tasks/build.yml new file mode 100644 index 000000000..a4f51599a --- /dev/null +++ b/ansible/roles/ppdc-otg/tasks/build.yml @@ -0,0 +1,86 @@ +--- +- name: set dev environment facts + set_fact: + neo4j_ip: "{{ lookup('env','NEO4J_DEV_IP') }}" + bearer: "{{ lookup('env','BEARER') }}" + when: tier == "dev" + +- name: set qa environment facts + set_fact: + neo4j_ip: "{{ lookup('env','NEO4J_QA_IP') }}" + bearer: "{{ lookup('env','BEARER') }}" + when: tier == "qa" + +- name: set stage environment facts + set_fact: + neo4j_ip: "{{ lookup('env','NEO4J_PERF_IP') }}" + bearer: "{{ lookup('env','BEARER') }}" + when: tier == "perf" + +- name: set prod environment facts + set_fact: + neo4j_ip: "{{ lookup('env','NEO4J_PROD_IP') }}" + bearer: "{{ lookup('env','BEARER') }}" + when: tier == "prod" + +- name: copy nginx conf + copy: + remote_src: yes + src: '{{workspace}}/icdc-devops/docker/dockerfiles/nginx-ppdc.conf' + dest: '{{workspace}}/ppdc-otg-frontend/nginx.conf' + +- name: copy entrypoint.sh to workspace + copy: + src: "nginx-entrypoint.sh" + dest: "{{workspace}}/ppdc-otg-frontend/nginx-entrypoint.sh" + mode: 0755 + +- name: run yarn install in {{workspace}}/ppdc-otg-frontend + command: yarn install + args: + chdir: "{{workspace}}/ppdc-otg-frontend" + +- name: run yarn build in {{workspace}}/ppdc-otg-frontend + command: yarn build + args: + chdir: "{{workspace}}/ppdc-otg-frontend" + +- name: copy env to build + copy: + src: inject.template.js + dest: "{{workspace}}/ppdc-otg-frontend/build/inject.template.js" + mode: 0755 + +- name: rename build to dist to avoid .dockerignore problem with build directory + command: mv build dist + args: + chdir: "{{workspace}}/ppdc-otg-frontend" + +- name: log into DockerHub + docker_login: + username: "{{docker_user}}" + password: "{{docker_password}}" + +- name: build cbiitssrepo/ppdc-otg-frontend image + docker_image: + build: + path: "{{workspace}}/ppdc-otg-frontend" + + dockerfile: "{{workspace}}/icdc-devops/docker/dockerfiles/frontend-ppdc-dockerfile" + pull: yes + nocache: yes + name: cbiitssrepo/ppdc-otg-frontend + tag: "{{frontend_version}}-{{build_number}}" + push: yes + force_source: yes + source: build + +- name: Add tag latest to cbiitssrepo/ppdc-otg-frontend image + docker_image: + name: "cbiitssrepo/ppdc-otg-frontend:{{frontend_version}}-{{build_number}}" + repository: cbiitssrepo/ppdc-otg-frontend:latest + force_tag: yes + push: yes + source: local + + \ No newline at end of file diff --git a/ansible/roles/ppdc-otg/tasks/deploy.yml b/ansible/roles/ppdc-otg/tasks/deploy.yml new file mode 100644 index 000000000..2136f409c --- /dev/null +++ b/ansible/roles/ppdc-otg/tasks/deploy.yml @@ -0,0 +1,56 @@ +--- +- name: log into DockerHub + docker_login: + username: "{{ docker_user }}" + password: "{{ docker_password }}" + # registry: https://ncidockerhub.nci.nih.gov + +- name: remove frontend container + docker_container: + name: ppdc-otg + state: absent + +- name: clean up + command: docker system prune -a -f + +- name: ensure /local/content/docker exists + file: + path: /local/content/docker + state: directory + +- name: launch the new ppdc-otg + docker_container: + name: ppdc-otg + image: cbiitssrepo/ppdc-otg-frontend:{{frontend_version}}-{{build_number}} + restart_policy: always + ports: + - "80:80" + +# - name: update serivces and compose files +# template: +# src: "{{item.src}}" +# dest: "{{item.dest}}" +# loop: +# - {src: 'app.yml.j2',dest: '/local/content/docker/app.yml'} +# - {src: 'app.service.j2',dest: '/etc/systemd/system/app.service'} +# - {src: 'app.timer.j2',dest: '/etc/systemd/system/app.timer'} + +# - name: start frontend and backend containers +# docker_compose: +# project_src: /local/content/docker +# files: app.yml +# state: present + + +# - name: "wait for {{ frontend_url }} to become available" +# uri: +# url: "{{ frontend_url }}" +# follow_redirects: none +# method: GET +# register: _result +# until: ('status' in _result) and (_result.status == 200) +# retries: 100 +# delay: 10 + + + diff --git a/ansible/roles/ppdc-otg/tasks/main.yml b/ansible/roles/ppdc-otg/tasks/main.yml new file mode 100644 index 000000000..a25436eb4 --- /dev/null +++ b/ansible/roles/ppdc-otg/tasks/main.yml @@ -0,0 +1,2 @@ +--- +# tasks file for ppdc-otg \ No newline at end of file diff --git a/ansible/roles/ppdc-otg/tests/inventory b/ansible/roles/ppdc-otg/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/ppdc-otg/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/ppdc-otg/tests/test.yml b/ansible/roles/ppdc-otg/tests/test.yml new file mode 100644 index 000000000..eb0831394 --- /dev/null +++ b/ansible/roles/ppdc-otg/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - ppdc-otg \ No newline at end of file diff --git a/ansible/roles/ppdc-otg/vars/main.yml b/ansible/roles/ppdc-otg/vars/main.yml new file mode 100644 index 000000000..d2abd363d --- /dev/null +++ b/ansible/roles/ppdc-otg/vars/main.yml @@ -0,0 +1,20 @@ +--- +# vars file for cicd +platform: aws +tier: "{{ lookup('env','TIER') }}" +version: "{{ lookup('env','VERSION') }}" +workspace: "{{ lookup('env','WORKSPACE') }}" +docker_user: "{{ lookup('env','DOCKER_USER') }}" +docker_password: "{{ lookup('env','DOCKER_PASSWORD') }}" +build_number: "{{ lookup('env','BUILD_NUMBER')}}" +region: us-east-1 +newrelic_license_key: "{{ lookup('aws_ssm', 'newrelic_license_key', region='us-east-1' ) }}" +sumo_access_id: "{{ lookup('aws_ssm', 'sumo_access_id', region='us-east-1' ) }}" +sumo_access_key: "{{ lookup('aws_ssm', 'sumo_access_key', region='us-east-1' ) }}" +syslog_host: "{{ lookup('aws_ssm', 'syslog_host', region='us-east-1' ) }}" +app_name: ppdc-otg-{{platform}}-{{tier} +frontend_version: "{{version}}" +neo4j_ip: "{{ lookup('env','NEO4J_IP') }}" +neo4j_bearer: "{{ lookup('env','BEARER') }}" + +# hostname: "{{ansible_fqdn}}" \ No newline at end of file diff --git a/ansible/roles/ppdc-otp/.travis.yml b/ansible/roles/ppdc-otp/.travis.yml new file mode 100644 index 000000000..36bbf6208 --- /dev/null +++ b/ansible/roles/ppdc-otp/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/ansible/roles/ppdc-otp/README.md b/ansible/roles/ppdc-otp/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/ppdc-otp/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/ppdc-otp/defaults/main.yml b/ansible/roles/ppdc-otp/defaults/main.yml new file mode 100644 index 000000000..cf9baabb1 --- /dev/null +++ b/ansible/roles/ppdc-otp/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for ppdc-otg \ No newline at end of file diff --git a/ansible/roles/ppdc-otp/files/inject.template.js b/ansible/roles/ppdc-otp/files/inject.template.js new file mode 100644 index 000000000..aab563239 --- /dev/null +++ b/ansible/roles/ppdc-otp/files/inject.template.js @@ -0,0 +1,4 @@ +window.injectedEnv = { + configUrlApi: '${CONFIG_URL_API}', + configUrlApiBeta: '${CONFIG_URL_API_BETA}', +}; diff --git a/ansible/roles/ppdc-otp/files/nginx-entrypoint.sh b/ansible/roles/ppdc-otp/files/nginx-entrypoint.sh new file mode 100755 index 000000000..39699dc7d --- /dev/null +++ b/ansible/roles/ppdc-otp/files/nginx-entrypoint.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +WWW_DIR=/usr/share/nginx/html +INJECT_FILE_SRC="${WWW_DIR}/inject.template.js" +INJECT_FILE_DST="${WWW_DIR}/config.js" +envsubst < "${INJECT_FILE_SRC}" > "${INJECT_FILE_DST}" + +[ -z "$@" ] && nginx -g 'daemon off;' || $@ \ No newline at end of file diff --git a/ansible/roles/ppdc-otp/handlers/main.yml b/ansible/roles/ppdc-otp/handlers/main.yml new file mode 100644 index 000000000..e36a3cae6 --- /dev/null +++ b/ansible/roles/ppdc-otp/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for ppdc-otg \ No newline at end of file diff --git a/ansible/roles/ppdc-otp/meta/main.yml b/ansible/roles/ppdc-otp/meta/main.yml new file mode 100644 index 000000000..227ad9c34 --- /dev/null +++ b/ansible/roles/ppdc-otp/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.9 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/ansible/roles/ppdc-otp/tasks/build.yml b/ansible/roles/ppdc-otp/tasks/build.yml new file mode 100644 index 000000000..74d8918b1 --- /dev/null +++ b/ansible/roles/ppdc-otp/tasks/build.yml @@ -0,0 +1,89 @@ +--- +- name: set dev environment facts + set_fact: + neo4j_ip: "{{ lookup('env','NEO4J_DEV_IP') }}" + bearer: "{{ lookup('env','BEARER') }}" + when: tier == "dev" + +- name: set qa environment facts + set_fact: + neo4j_ip: "{{ lookup('env','NEO4J_QA_IP') }}" + bearer: "{{ lookup('env','BEARER') }}" + when: tier == "qa" + +- name: set stage environment facts + set_fact: + neo4j_ip: "{{ lookup('env','NEO4J_PERF_IP') }}" + bearer: "{{ lookup('env','BEARER') }}" + when: tier == "perf" + +- name: set prod environment facts + set_fact: + neo4j_ip: "{{ lookup('env','NEO4J_PROD_IP') }}" + bearer: "{{ lookup('env','BEARER') }}" + when: tier == "prod" + +- name: copy nginx conf + copy: + remote_src: yes + src: '{{workspace}}/icdc-devops/docker/dockerfiles/nginx-ppdc.conf' + dest: '{{workspace}}/ppdc-otp-frontend/nginx.conf' + +- name: copy entrypoint.sh to workspace + copy: + src: "nginx-entrypoint.sh" + dest: "{{workspace}}/ppdc-otp-frontend/nginx-entrypoint.sh" + mode: 0755 + +- name: run yarn install in {{workspace}}/ppdc-otp-frontend + shell: yarn install + environment: + NODE_OPTIONS: --max-old-space-size=2048 + args: + chdir: "{{workspace}}/ppdc-otp-frontend" + +- name: run yarn build in {{workspace}}/ppdc-otp-frontend + shell: yarn build + args: + chdir: "{{workspace}}/ppdc-otp-frontend" + +- name: copy env to build + copy: + src: inject.template.js + dest: "{{workspace}}/ppdc-otp-frontend/build/inject.template.js" + mode: 0755 + +- name: log into DockerHub + docker_login: + username: "{{docker_user}}" + password: "{{docker_password}}" + +- name: rename build to dist to avoid .dockerignore problem with build directory + command: mv build dist + args: + chdir: "{{workspace}}/ppdc-otp-frontend" + + +- name: build cbiitssrepo/ppdc-otp-frontend image + docker_image: + build: + path: "{{workspace}}/ppdc-otp-frontend" + dockerfile: "{{workspace}}/icdc-devops/docker/dockerfiles/frontend-ppdc-dockerfile" + + pull: yes + nocache: yes + name: cbiitssrepo/ppdc-otp-frontend + tag: "{{frontend_version}}-{{build_number}}" + push: yes + force_source: yes + source: build + +- name: Add tag latest to cbiitssrepo/ppdc-otp-frontend image + docker_image: + name: "cbiitssrepo/ppdc-otp-frontend:{{frontend_version}}-{{build_number}}" + repository: cbiitssrepo/ppdc-otp-frontend:latest + force_tag: yes + push: yes + source: local + + \ No newline at end of file diff --git a/ansible/roles/ppdc-otp/tasks/deploy.yml b/ansible/roles/ppdc-otp/tasks/deploy.yml new file mode 100644 index 000000000..34cfd0ce6 --- /dev/null +++ b/ansible/roles/ppdc-otp/tasks/deploy.yml @@ -0,0 +1,36 @@ +--- +- name: log into DockerHub + docker_login: + username: "{{ docker_user }}" + password: "{{ docker_password }}" + # registry: https://ncidockerhub.nci.nih.gov + +- name: remove frontend container + docker_container: + name: ppdc-otp + state: absent + +- name: clean up + command: docker system prune -a -f + +- name: ensure /local/content/docker exists + file: + path: /local/content/docker + state: directory + +- name: launch the new ppdc-otp + docker_container: + name: ppdc-otp + image: cbiitssrepo/ppdc-otp-frontend:{{frontend_version}}-{{build_number}} + env: + CONFIG_URL_API: "{% if tier == 'prod' %}https://{{stack_name}}.bento-tools.org/api/v4/graphql{% else %}https://{{stack_name}}-{{ tier }}.bento-tools.org/api/v4/graphql{% endif %}" + CONFIG_URL_API_BETA: "{% if tier == 'prod' %}https://{{stack_name}}.bento-tools.org/api/v4/graphql{% else %}https://{{stack_name}}-{{ tier }}.bento-tools.org/api/v4/graphql{% endif %}" + frontend_version : "{{ frontend_version }}" + restart_policy: always + ports: + - "80:80" + + + + + diff --git a/ansible/roles/ppdc-otp/tasks/main.yml b/ansible/roles/ppdc-otp/tasks/main.yml new file mode 100644 index 000000000..a25436eb4 --- /dev/null +++ b/ansible/roles/ppdc-otp/tasks/main.yml @@ -0,0 +1,2 @@ +--- +# tasks file for ppdc-otg \ No newline at end of file diff --git a/ansible/roles/ppdc-otp/tests/inventory b/ansible/roles/ppdc-otp/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/ppdc-otp/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/ppdc-otp/tests/test.yml b/ansible/roles/ppdc-otp/tests/test.yml new file mode 100644 index 000000000..eb0831394 --- /dev/null +++ b/ansible/roles/ppdc-otp/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - ppdc-otg \ No newline at end of file diff --git a/ansible/roles/ppdc-otp/vars/main.yml b/ansible/roles/ppdc-otp/vars/main.yml new file mode 100644 index 000000000..264b25151 --- /dev/null +++ b/ansible/roles/ppdc-otp/vars/main.yml @@ -0,0 +1,22 @@ +--- +# vars file for cicd +platform: aws +stack_name: ppdc-otp +tier: "{{ lookup('env','TIER') }}" +version: "{{ lookup('env','VERSION') }}" +workspace: "{{ lookup('env','WORKSPACE') }}" +docker_user: "{{ lookup('env','DOCKER_USER') }}" +docker_password: "{{ lookup('env','DOCKER_PASSWORD') }}" +build_number: "{{ lookup('env','BUILD_NUMBER')}}" +region: us-east-1 +newrelic_license_key: "{{ lookup('aws_ssm', 'newrelic_license_key', region='us-east-1' ) }}" +sumo_access_id: "{{ lookup('aws_ssm', 'sumo_access_id', region='us-east-1' ) }}" +sumo_access_key: "{{ lookup('aws_ssm', 'sumo_access_key', region='us-east-1' ) }}" +syslog_host: "{{ lookup('aws_ssm', 'syslog_host', region='us-east-1' ) }}" +app_name: ppdc-otg-{{platform}}-{{tier} +frontend_version: "{{version}}" +neo4j_ip: "{{ lookup('env','NEO4J_IP') }}" +neo4j_bearer: "{{ lookup('env','BEARER') }}" +slick_clickhouse_url: "{{ lookup('env','SLICK_CLICKHOUSE_URL') }}" + +# hostname: "{{ansible_fqdn}}" \ No newline at end of file diff --git a/ansible/roles/redis-intergration/.travis.yml b/ansible/roles/redis-intergration/.travis.yml new file mode 100644 index 000000000..36bbf6208 --- /dev/null +++ b/ansible/roles/redis-intergration/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/ansible/roles/redis-intergration/README.md b/ansible/roles/redis-intergration/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/redis-intergration/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/redis-intergration/defaults/main.yml b/ansible/roles/redis-intergration/defaults/main.yml new file mode 100644 index 000000000..5a6aafe8a --- /dev/null +++ b/ansible/roles/redis-intergration/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for redis-intergration \ No newline at end of file diff --git a/ansible/roles/redis-intergration/handlers/main.yml b/ansible/roles/redis-intergration/handlers/main.yml new file mode 100644 index 000000000..3e97a84c9 --- /dev/null +++ b/ansible/roles/redis-intergration/handlers/main.yml @@ -0,0 +1,15 @@ +--- +# handlers file for redis-intergration +- name: restart redis + service: + name: redis + state: restarted + enabled: yes + +- name: restart newrelic + service: + name: newrelic-infra + state: restarted + enabled: yes + + \ No newline at end of file diff --git a/ansible/roles/redis-intergration/meta/main.yml b/ansible/roles/redis-intergration/meta/main.yml new file mode 100644 index 000000000..227ad9c34 --- /dev/null +++ b/ansible/roles/redis-intergration/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.9 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/ansible/roles/redis-intergration/tasks/intergration.yml b/ansible/roles/redis-intergration/tasks/intergration.yml new file mode 100644 index 000000000..ed89f3abf --- /dev/null +++ b/ansible/roles/redis-intergration/tasks/intergration.yml @@ -0,0 +1,14 @@ +--- +- name: install redis integration + yum: + name: + - nri-redis + +- name: copy redis config file to /etc/newrelic-infra/integrations.d/ + template: + src: "{{item.src}}" + dest: "{{item.dest}}" + loop: + - {src: redis-config.yml.j2,dest: /etc/newrelic-infra/integrations.d/redis-config.yml } + - {src: redis-definition.yml.j2,dest: /var/db/newrelic-infra/newrelic-integrations/redis-definition.yml} + notify: restart newrelic \ No newline at end of file diff --git a/ansible/roles/redis-intergration/tasks/main.yml b/ansible/roles/redis-intergration/tasks/main.yml new file mode 100644 index 000000000..942b2c663 --- /dev/null +++ b/ansible/roles/redis-intergration/tasks/main.yml @@ -0,0 +1,9 @@ +--- +- name: ensure newrelic is installed + include_tasks: newrelic.yml + +- name: install redis + include_tasks: redis.yml + +- name: install redis intergration + include_tasks: intergration.yml \ No newline at end of file diff --git a/ansible/roles/redis-intergration/tasks/newrelic.yml b/ansible/roles/redis-intergration/tasks/newrelic.yml new file mode 100644 index 000000000..736e8b3c6 --- /dev/null +++ b/ansible/roles/redis-intergration/tasks/newrelic.yml @@ -0,0 +1,31 @@ +--- +- name: add newrelic-infra gpg key + rpm_key: + state: present + key: https://download.newrelic.com/infrastructure_agent/gpg/newrelic-infra.gpg + +- name: setup newrelic repo + yum_repository: + name: newrelic-infra + description: Newrelic infrastruture repository + baseurl: https://download.newrelic.com/infrastructure_agent/linux/yum/el/7/x86_64/ + +- name: install newrelic-infra + package: + name: + - libcap + - newrelic-infra + state: installed + environment: + NRIA_MODE: PRIVILEGED + +- name: copy newrelic config file to /etc/ + template: + src: newrelic-infra.yml.j2 + dest: /etc/newrelic-infra.yml + +- name: enable and restart newrelic-infra service + service: + name: newrelic-infra + state: restarted + enabled: yes diff --git a/ansible/roles/redis-intergration/tasks/redis.yml b/ansible/roles/redis-intergration/tasks/redis.yml new file mode 100644 index 000000000..805e8c16d --- /dev/null +++ b/ansible/roles/redis-intergration/tasks/redis.yml @@ -0,0 +1,27 @@ +--- +# tasks file for redis-intergration + +- name: install redis + yum: + name: + - redis + state: latest + +- name: start and enable redis + service: + name: redis + state: started + enabled: yes + +- name: configure redis to listen on host ip + template: + src: redis.conf.j2 + dest: /etc/redis.conf + owner: root + group: root + mode: 0644 + notify: restart redis + + + + \ No newline at end of file diff --git a/ansible/roles/redis-intergration/templates/newrelic-infra.yml.j2 b/ansible/roles/redis-intergration/templates/newrelic-infra.yml.j2 new file mode 100644 index 000000000..509d11eaa --- /dev/null +++ b/ansible/roles/redis-intergration/templates/newrelic-infra.yml.j2 @@ -0,0 +1,6 @@ +license_key: {{ newrelic_license_key }} +log_file: /var/log/newrelic-infra/newrelic-infra.log +display_name: {{ collector_name }} +collector_url: {{ newrelic_collector_url }} +identity_url: {{ newrelic_identity_url }} +command_channel_url: {{ newrelic_command_channel_url }} \ No newline at end of file diff --git a/ansible/roles/redis-intergration/templates/redis-config.yml.j2 b/ansible/roles/redis-intergration/templates/redis-config.yml.j2 new file mode 100644 index 000000000..5f4d3356f --- /dev/null +++ b/ansible/roles/redis-intergration/templates/redis-config.yml.j2 @@ -0,0 +1,45 @@ +integration_name: {{project}}-{{env}}-redis + +instances: + - name: redis-metrics + command: metrics + arguments: + hostname: {{ansible_hostname}} + port: 6379 + keys: '{"0":[""],"1":[""]}' + + # New users should leave this property as `true`, to identify the + # monitored entities as `remote`. Setting this property to `false` (the + # default value) is deprecated and will be removed soon, disallowing + # entities that are identified as `local`. + # Please check the documentation to get more information about local + # versus remote entities: + # https://github.com/newrelic/infra-integrations-sdk/blob/master/docs/entity-definition.md + remote_monitoring: true + + # New users should leave this property as `true`, to uniquely identify the monitored entities when using + # Unix sockets. + use_unix_socket: true + labels: + environment: {{env}} + + - name: redis-inventory + command: inventory + arguments: + hostname: {{ansible_hostname}} + port: 6379 + + # New users should leave this property as `true`, to identify the + # monitored entities as `remote`. Setting this property to `false` (the + # default value) is deprecated and will be removed soon, disallowing + # entities that are identified as `local`. + # Please check the documentation to get more information about local + # versus remote entities: + # https://github.com/newrelic/infra-integrations-sdk/blob/master/docs/entity-definition.md + remote_monitoring: true + + # New users should leave this property as `true`, to uniquely identify the monitored entities when using + # Unix sockets. + use_unix_socket: true + labels: + environment: {{env}} \ No newline at end of file diff --git a/ansible/roles/redis-intergration/templates/redis-definition.yml.j2 b/ansible/roles/redis-intergration/templates/redis-definition.yml.j2 new file mode 100644 index 000000000..41b894d21 --- /dev/null +++ b/ansible/roles/redis-intergration/templates/redis-definition.yml.j2 @@ -0,0 +1,18 @@ +name: {{project}}-{{env}}-redis +description: Reports status and metrics for redis service +protocol_version: 3 +os: linux + +commands: + metrics: + command: + - ./bin/nri-redis + - --metrics + interval: 15 + + inventory: + command: + - ./bin/nri-redis + - --inventory + prefix: config/redis + interval: 60 diff --git a/ansible/roles/redis-intergration/templates/redis.conf.j2 b/ansible/roles/redis-intergration/templates/redis.conf.j2 new file mode 100644 index 000000000..ad83eecdb --- /dev/null +++ b/ansible/roles/redis-intergration/templates/redis.conf.j2 @@ -0,0 +1,1052 @@ +# Redis configuration file example. +# +# Note that in order to read the configuration file, Redis must be +# started with the file path as first argument: +# +# ./redis-server /path/to/redis.conf + +# Note on units: when memory size is needed, it is possible to specify +# it in the usual form of 1k 5GB 4M and so forth: +# +# 1k => 1000 bytes +# 1kb => 1024 bytes +# 1m => 1000000 bytes +# 1mb => 1024*1024 bytes +# 1g => 1000000000 bytes +# 1gb => 1024*1024*1024 bytes +# +# units are case insensitive so 1GB 1Gb 1gB are all the same. + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis servers but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# Notice option "include" won't be rewritten by command "CONFIG REWRITE" +# from admin or Redis Sentinel. Since Redis always uses the last processed +# line as value of a configuration directive, you'd better put includes +# at the beginning of this file to avoid overwriting config change at runtime. +# +# If instead you are interested in using includes to override configuration +# options, it is better to use include as the last line. +# +# include /path/to/local.conf +# include /path/to/other.conf + +################################## NETWORK ##################################### + +# By default, if no "bind" configuration directive is specified, Redis listens +# for connections from all the network interfaces available on the server. +# It is possible to listen to just one or multiple selected interfaces using +# the "bind" configuration directive, followed by one or more IP addresses. +# +# Examples: +# +# bind 192.168.1.100 10.0.0.1 +# bind 127.0.0.1 ::1 +# +# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the +# internet, binding to all the interfaces is dangerous and will expose the +# instance to everybody on the internet. So by default we uncomment the +# following bind directive, that will force Redis to listen only into +# the IPv4 lookback interface address (this means Redis will be able to +# accept connections only from clients running into the same computer it +# is running). +# +# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES +# JUST COMMENT THE FOLLOWING LINE. +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +bind {{ansible_host}} + +# Protected mode is a layer of security protection, in order to avoid that +# Redis instances left open on the internet are accessed and exploited. +# +# When protected mode is on and if: +# +# 1) The server is not binding explicitly to a set of addresses using the +# "bind" directive. +# 2) No password is configured. +# +# The server only accepts connections from clients connecting from the +# IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain +# sockets. +# +# By default protected mode is enabled. You should disable it only if +# you are sure you want clients from other hosts to connect to Redis +# even if no authentication is configured, nor a specific set of interfaces +# are explicitly listed using the "bind" directive. +protected-mode yes + +# Accept connections on the specified port, default is 6379 (IANA #815344). +# If port 0 is specified Redis will not listen on a TCP socket. +port 6379 + +# TCP listen() backlog. +# +# In high requests-per-second environments you need an high backlog in order +# to avoid slow clients connections issues. Note that the Linux kernel +# will silently truncate it to the value of /proc/sys/net/core/somaxconn so +# make sure to raise both the value of somaxconn and tcp_max_syn_backlog +# in order to get the desired effect. +tcp-backlog 511 + +# Unix socket. +# +# Specify the path for the Unix socket that will be used to listen for +# incoming connections. There is no default, so Redis will not listen +# on a unix socket when not specified. +# +# unixsocket /tmp/redis.sock +# unixsocketperm 700 + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# TCP keepalive. +# +# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence +# of communication. This is useful for two reasons: +# +# 1) Detect dead peers. +# 2) Take the connection alive from the point of view of network +# equipment in the middle. +# +# On Linux, the specified value (in seconds) is the period used to send ACKs. +# Note that to close the connection the double of the time is needed. +# On other kernels the period depends on the kernel configuration. +# +# A reasonable value for this option is 300 seconds, which is the new +# Redis default starting with Redis 3.2.1. +tcp-keepalive 300 + +################################# GENERAL ##################################### + +# By default Redis does not run as a daemon. Use 'yes' if you need it. +# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. +daemonize no + +# If you run Redis from upstart or systemd, Redis can interact with your +# supervision tree. Options: +# supervised no - no supervision interaction +# supervised upstart - signal upstart by putting Redis into SIGSTOP mode +# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET +# supervised auto - detect upstart or systemd method based on +# UPSTART_JOB or NOTIFY_SOCKET environment variables +# Note: these supervision methods only signal "process is ready." +# They do not enable continuous liveness pings back to your supervisor. +supervised no + +# If a pid file is specified, Redis writes it where specified at startup +# and removes it at exit. +# +# When the server runs non daemonized, no pid file is created if none is +# specified in the configuration. When the server is daemonized, the pid file +# is used even if not specified, defaulting to "/var/run/redis.pid". +# +# Creating a pid file is best effort: if Redis is not able to create it +# nothing bad happens, the server will start and run normally. +pidfile /var/run/redis_6379.pid + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +loglevel notice + +# Specify the log file name. Also the empty string can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile /var/log/redis/redis.log + +# To enable logging to the system logger, just set 'syslog-enabled' to yes, +# and optionally update the other syslog parameters to suit your needs. +# syslog-enabled no + +# Specify the syslog identity. +# syslog-ident redis + +# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. +# syslog-facility local0 + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +################################ SNAPSHOTTING ################################ +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behaviour will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving completely by commenting out all "save" lines. +# +# It is also possible to remove all the previously configured save +# points by adding a save directive with a single empty string argument +# like in the following example: +# +# save "" + +save 900 1 +save 300 10 +save 60 10000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in a hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# disaster will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usual even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes + +# Compress string objects using LZF when dump .rdb databases? +# For default that's set to 'yes' as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes + +# The filename where to dump the DB +dbfilename dump.rdb + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir /var/lib/redis + +################################# REPLICATION ################################# + +# Master-Slave replication. Use slaveof to make a Redis instance a copy of +# another Redis server. A few things to understand ASAP about Redis replication. +# +# 1) Redis replication is asynchronous, but you can configure a master to +# stop accepting writes if it appears to be not connected with at least +# a given number of slaves. +# 2) Redis slaves are able to perform a partial resynchronization with the +# master if the replication link is lost for a relatively small amount of +# time. You may want to configure the replication backlog size (see the next +# sections of this file) with a sensible value depending on your needs. +# 3) Replication is automatic and does not need user intervention. After a +# network partition slaves automatically try to reconnect to masters +# and resynchronize with them. +# +# slaveof + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the slave to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the slave request. +# +# masterauth + +# When a slave loses its connection with the master, or when the replication +# is still in progress, the slave can act in two different ways: +# +# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) if slave-serve-stale-data is set to 'no' the slave will reply with +# an error "SYNC with master in progress" to all the kind of commands +# but to INFO and SLAVEOF. +# +slave-serve-stale-data yes + +# You can configure a slave instance to accept writes or not. Writing against +# a slave instance may be useful to store some ephemeral data (because data +# written on a slave will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default slaves are read-only. +# +# Note: read only slaves are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only slave exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve +# security of read only slaves using 'rename-command' to shadow all the +# administrative / dangerous commands. +slave-read-only yes + +# Replication SYNC strategy: disk or socket. +# +# ------------------------------------------------------- +# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY +# ------------------------------------------------------- +# +# New slaves and reconnecting slaves that are not able to continue the replication +# process just receiving differences, need to do what is called a "full +# synchronization". An RDB file is transmitted from the master to the slaves. +# The transmission can happen in two different ways: +# +# 1) Disk-backed: The Redis master creates a new process that writes the RDB +# file on disk. Later the file is transferred by the parent +# process to the slaves incrementally. +# 2) Diskless: The Redis master creates a new process that directly writes the +# RDB file to slave sockets, without touching the disk at all. +# +# With disk-backed replication, while the RDB file is generated, more slaves +# can be queued and served with the RDB file as soon as the current child producing +# the RDB file finishes its work. With diskless replication instead once +# the transfer starts, new slaves arriving will be queued and a new transfer +# will start when the current one terminates. +# +# When diskless replication is used, the master waits a configurable amount of +# time (in seconds) before starting the transfer in the hope that multiple slaves +# will arrive and the transfer can be parallelized. +# +# With slow disks and fast (large bandwidth) networks, diskless replication +# works better. +repl-diskless-sync no + +# When diskless replication is enabled, it is possible to configure the delay +# the server waits in order to spawn the child that transfers the RDB via socket +# to the slaves. +# +# This is important since once the transfer starts, it is not possible to serve +# new slaves arriving, that will be queued for the next RDB transfer, so the server +# waits a delay in order to let more slaves arrive. +# +# The delay is specified in seconds, and by default is 5 seconds. To disable +# it entirely just set it to 0 seconds and the transfer will start ASAP. +repl-diskless-sync-delay 5 + +# Slaves send PINGs to server in a predefined interval. It's possible to change +# this interval with the repl_ping_slave_period option. The default value is 10 +# seconds. +# +# repl-ping-slave-period 10 + +# The following option sets the replication timeout for: +# +# 1) Bulk transfer I/O during SYNC, from the point of view of slave. +# 2) Master timeout from the point of view of slaves (data, pings). +# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-slave-period otherwise a timeout will be detected +# every time there is low traffic between the master and the slave. +# +# repl-timeout 60 + +# Disable TCP_NODELAY on the slave socket after SYNC? +# +# If you select "yes" Redis will use a smaller number of TCP packets and +# less bandwidth to send data to slaves. But this can add a delay for +# the data to appear on the slave side, up to 40 milliseconds with +# Linux kernels using a default configuration. +# +# If you select "no" the delay for data to appear on the slave side will +# be reduced but more bandwidth will be used for replication. +# +# By default we optimize for low latency, but in very high traffic conditions +# or when the master and slaves are many hops away, turning this to "yes" may +# be a good idea. +repl-disable-tcp-nodelay no + +# Set the replication backlog size. The backlog is a buffer that accumulates +# slave data when slaves are disconnected for some time, so that when a slave +# wants to reconnect again, often a full resync is not needed, but a partial +# resync is enough, just passing the portion of data the slave missed while +# disconnected. +# +# The bigger the replication backlog, the longer the time the slave can be +# disconnected and later be able to perform a partial resynchronization. +# +# The backlog is only allocated once there is at least a slave connected. +# +# repl-backlog-size 1mb + +# After a master has no longer connected slaves for some time, the backlog +# will be freed. The following option configures the amount of seconds that +# need to elapse, starting from the time the last slave disconnected, for +# the backlog buffer to be freed. +# +# A value of 0 means to never release the backlog. +# +# repl-backlog-ttl 3600 + +# The slave priority is an integer number published by Redis in the INFO output. +# It is used by Redis Sentinel in order to select a slave to promote into a +# master if the master is no longer working correctly. +# +# A slave with a low priority number is considered better for promotion, so +# for instance if there are three slaves with priority 10, 100, 25 Sentinel will +# pick the one with priority 10, that is the lowest. +# +# However a special priority of 0 marks the slave as not able to perform the +# role of master, so a slave with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +slave-priority 100 + +# It is possible for a master to stop accepting writes if there are less than +# N slaves connected, having a lag less or equal than M seconds. +# +# The N slaves need to be in "online" state. +# +# The lag in seconds, that must be <= the specified value, is calculated from +# the last ping received from the slave, that is usually sent every second. +# +# This option does not GUARANTEE that N replicas will accept the write, but +# will limit the window of exposure for lost writes in case not enough slaves +# are available, to the specified number of seconds. +# +# For example to require at least 3 slaves with a lag <= 10 seconds use: +# +# min-slaves-to-write 3 +# min-slaves-max-lag 10 +# +# Setting one or the other to 0 disables the feature. +# +# By default min-slaves-to-write is set to 0 (feature disabled) and +# min-slaves-max-lag is set to 10. + +# A Redis master is able to list the address and port of the attached +# slaves in different ways. For example the "INFO replication" section +# offers this information, which is used, among other tools, by +# Redis Sentinel in order to discover slave instances. +# Another place where this info is available is in the output of the +# "ROLE" command of a masteer. +# +# The listed IP and address normally reported by a slave is obtained +# in the following way: +# +# IP: The address is auto detected by checking the peer address +# of the socket used by the slave to connect with the master. +# +# Port: The port is communicated by the slave during the replication +# handshake, and is normally the port that the slave is using to +# list for connections. +# +# However when port forwarding or Network Address Translation (NAT) is +# used, the slave may be actually reachable via different IP and port +# pairs. The following two options can be used by a slave in order to +# report to its master a specific set of IP and port, so that both INFO +# and ROLE will report those values. +# +# There is no need to use both the options if you need to override just +# the port or the IP address. +# +# slave-announce-ip 5.5.5.5 +# slave-announce-port 1234 + +################################## SECURITY ################################### + +# Require clients to issue AUTH before processing any other +# commands. This might be useful in environments in which you do not trust +# others with access to the host running redis-server. +# +# This should stay commented out for backward compatibility and because most +# people do not need auth (e.g. they run their own servers). +# +# Warning: since Redis is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. +# +# requirepass foobared + +# Command renaming. +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to slaves may cause problems. + +################################### LIMITS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# maxclients 10000 + +# Don't use more memory than the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# according to the eviction policy selected (see maxmemory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +# This option is usually useful when using Redis as an LRU cache, or to set +# a hard memory limit for an instance (using the 'noeviction' policy). +# +# WARNING: If you have slaves attached to an instance with maxmemory on, +# the size of the output buffers needed to feed the slaves are subtracted +# from the used memory count, so that network problems / resyncs will +# not trigger a loop where keys are evicted, and in turn the output +# buffer of slaves is full with DELs of keys evicted triggering the deletion +# of more keys, and so forth until the database is completely emptied. +# +# In short... if you have slaves attached it is suggested that you set a lower +# limit for maxmemory so that there is some free RAM on the system for slave +# output buffers (but this is not needed if the policy is 'noeviction'). +# +# maxmemory + +# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory +# is reached. You can select among five behaviors: +# +# volatile-lru -> remove the key with an expire set using an LRU algorithm +# allkeys-lru -> remove any key according to the LRU algorithm +# volatile-random -> remove a random key with an expire set +# allkeys-random -> remove a random key, any key +# volatile-ttl -> remove the key with the nearest expire time (minor TTL) +# noeviction -> don't expire at all, just return an error on write operations +# +# Note: with any of the above policies, Redis will return an error on write +# operations, when there are no suitable keys for eviction. +# +# At the date of writing these commands are: set setnx setex append +# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd +# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby +# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby +# getset mset msetnx exec sort +# +# The default is: +# +# maxmemory-policy noeviction + +# LRU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can tune it for speed or +# accuracy. For default Redis will check five keys and pick the one that was +# used less recently, you can change the sample size using the following +# configuration directive. +# +# The default of 5 produces good enough results. 10 Approximates very closely +# true LRU but costs a bit more CPU. 3 is very fast but not very accurate. +# +# maxmemory-samples 5 + +############################## APPEND ONLY MODE ############################### + +# By default Redis asynchronously dumps the dataset on disk. This mode is +# good enough in many applications, but an issue with the Redis process or +# a power outage may result into a few minutes of writes lost (depending on +# the configured save points). +# +# The Append Only File is an alternative persistence mode that provides +# much better durability. For instance using the default data fsync policy +# (see later in the config file) Redis can lose just one second of writes in a +# dramatic event like a server power outage, or a single write if something +# wrong with the Redis process itself happens, but the operating system is +# still running correctly. +# +# AOF and RDB persistence can be enabled at the same time without problems. +# If the AOF is enabled on startup Redis will load the AOF, that is the file +# with the better durability guarantees. +# +# Please check http://redis.io/topics/persistence for more information. + +appendonly no + +# The name of the append only file (default: "appendonly.aof") + +appendfilename "appendonly.aof" + +# The fsync() call tells the Operating System to actually write data on disk +# instead of waiting for more data in the output buffer. Some OS will really flush +# data on disk, some other OS will just try to do it ASAP. +# +# Redis supports three different modes: +# +# no: don't fsync, just let the OS flush the data when it wants. Faster. +# always: fsync after every write to the append only log. Slow, Safest. +# everysec: fsync only one time every second. Compromise. +# +# The default is "everysec", as that's usually the right compromise between +# speed and data safety. It's up to you to understand if you can relax this to +# "no" that will let the operating system flush the output buffer when +# it wants, for better performances (but if you can live with the idea of +# some data loss consider the default persistence mode that's snapshotting), +# or on the contrary, use "always" that's very slow but a bit safer than +# everysec. +# +# More details please check the following article: +# http://antirez.com/post/redis-persistence-demystified.html +# +# If unsure, use "everysec". + +# appendfsync always +appendfsync everysec +# appendfsync no + +# When the AOF fsync policy is set to always or everysec, and a background +# saving process (a background save or AOF log background rewriting) is +# performing a lot of I/O against the disk, in some Linux configurations +# Redis may block too long on the fsync() call. Note that there is no fix for +# this currently, as even performing fsync in a different thread will block +# our synchronous write(2) call. +# +# In order to mitigate this problem it's possible to use the following option +# that will prevent fsync() from being called in the main process while a +# BGSAVE or BGREWRITEAOF is in progress. +# +# This means that while another child is saving, the durability of Redis is +# the same as "appendfsync none". In practical terms, this means that it is +# possible to lose up to 30 seconds of log in the worst scenario (with the +# default Linux settings). +# +# If you have latency problems turn this to "yes". Otherwise leave it as +# "no" that is the safest pick from the point of view of durability. + +no-appendfsync-on-rewrite no + +# Automatic rewrite of the append only file. +# Redis is able to automatically rewrite the log file implicitly calling +# BGREWRITEAOF when the AOF log size grows by the specified percentage. +# +# This is how it works: Redis remembers the size of the AOF file after the +# latest rewrite (if no rewrite has happened since the restart, the size of +# the AOF at startup is used). +# +# This base size is compared to the current size. If the current size is +# bigger than the specified percentage, the rewrite is triggered. Also +# you need to specify a minimal size for the AOF file to be rewritten, this +# is useful to avoid rewriting the AOF file even if the percentage increase +# is reached but it is still pretty small. +# +# Specify a percentage of zero in order to disable the automatic AOF +# rewrite feature. + +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb + +# An AOF file may be found to be truncated at the end during the Redis +# startup process, when the AOF data gets loaded back into memory. +# This may happen when the system where Redis is running +# crashes, especially when an ext4 filesystem is mounted without the +# data=ordered option (however this can't happen when Redis itself +# crashes or aborts but the operating system still works correctly). +# +# Redis can either exit with an error when this happens, or load as much +# data as possible (the default now) and start if the AOF file is found +# to be truncated at the end. The following option controls this behavior. +# +# If aof-load-truncated is set to yes, a truncated AOF file is loaded and +# the Redis server starts emitting a log to inform the user of the event. +# Otherwise if the option is set to no, the server aborts with an error +# and refuses to start. When the option is set to no, the user requires +# to fix the AOF file using the "redis-check-aof" utility before to restart +# the server. +# +# Note that if the AOF file will be found to be corrupted in the middle +# the server will still exit with an error. This option only applies when +# Redis will try to read more data from the AOF file but not enough bytes +# will be found. +aof-load-truncated yes + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceeds the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet called write commands. The second +# is the only way to shut down the server in the case a write command was +# already issued by the script but the user doesn't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +lua-time-limit 5000 + +################################ REDIS CLUSTER ############################### +# +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however +# in order to mark it as "mature" we need to wait for a non trivial percentage +# of users to deploy it in production. +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# Normal Redis instances can't be part of a Redis Cluster; only nodes that are +# started as cluster nodes can. In order to start a Redis instance as a +# cluster node enable the cluster support uncommenting the following: +# +# cluster-enabled yes + +# Every cluster node has a cluster configuration file. This file is not +# intended to be edited by hand. It is created and updated by Redis nodes. +# Every Redis Cluster node requires a different cluster configuration file. +# Make sure that instances running in the same system do not have +# overlapping cluster configuration file names. +# +# cluster-config-file nodes-6379.conf + +# Cluster node timeout is the amount of milliseconds a node must be unreachable +# for it to be considered in failure state. +# Most other internal time limits are multiple of the node timeout. +# +# cluster-node-timeout 15000 + +# A slave of a failing master will avoid to start a failover if its data +# looks too old. +# +# There is no simple way for a slave to actually have a exact measure of +# its "data age", so the following two checks are performed: +# +# 1) If there are multiple slaves able to failover, they exchange messages +# in order to try to give an advantage to the slave with the best +# replication offset (more data from the master processed). +# Slaves will try to get their rank by offset, and apply to the start +# of the failover a delay proportional to their rank. +# +# 2) Every single slave computes the time of the last interaction with +# its master. This can be the last ping or command received (if the master +# is still in the "connected" state), or the time that elapsed since the +# disconnection with the master (if the replication link is currently down). +# If the last interaction is too old, the slave will not try to failover +# at all. +# +# The point "2" can be tuned by user. Specifically a slave will not perform +# the failover if, since the last interaction with the master, the time +# elapsed is greater than: +# +# (node-timeout * slave-validity-factor) + repl-ping-slave-period +# +# So for example if node-timeout is 30 seconds, and the slave-validity-factor +# is 10, and assuming a default repl-ping-slave-period of 10 seconds, the +# slave will not try to failover if it was not able to talk with the master +# for longer than 310 seconds. +# +# A large slave-validity-factor may allow slaves with too old data to failover +# a master, while a too small value may prevent the cluster from being able to +# elect a slave at all. +# +# For maximum availability, it is possible to set the slave-validity-factor +# to a value of 0, which means, that slaves will always try to failover the +# master regardless of the last time they interacted with the master. +# (However they'll always try to apply a delay proportional to their +# offset rank). +# +# Zero is the only value able to guarantee that when all the partitions heal +# the cluster will always be able to continue. +# +# cluster-slave-validity-factor 10 + +# Cluster slaves are able to migrate to orphaned masters, that are masters +# that are left without working slaves. This improves the cluster ability +# to resist to failures as otherwise an orphaned master can't be failed over +# in case of failure if it has no working slaves. +# +# Slaves migrate to orphaned masters only if there are still at least a +# given number of other working slaves for their old master. This number +# is the "migration barrier". A migration barrier of 1 means that a slave +# will migrate only if there is at least 1 other working slave for its master +# and so forth. It usually reflects the number of slaves you want for every +# master in your cluster. +# +# Default is 1 (slaves migrate only if their masters remain with at least +# one slave). To disable migration just set it to a very large value. +# A value of 0 can be set but is useful only for debugging and dangerous +# in production. +# +# cluster-migration-barrier 1 + +# By default Redis Cluster nodes stop accepting queries if they detect there +# is at least an hash slot uncovered (no available node is serving it). +# This way if the cluster is partially down (for example a range of hash slots +# are no longer covered) all the cluster becomes, eventually, unavailable. +# It automatically returns available as soon as all the slots are covered again. +# +# However sometimes you want the subset of the cluster which is working, +# to continue to accept queries for the part of the key space that is still +# covered. In order to do so, just set the cluster-require-full-coverage +# option to no. +# +# cluster-require-full-coverage yes + +# In order to setup your cluster make sure to read the documentation +# available at http://redis.io web site. + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +################################ LATENCY MONITOR ############################## + +# The Redis latency monitoring subsystem samples different operations +# at runtime in order to collect data related to possible sources of +# latency of a Redis instance. +# +# Via the LATENCY command this information is available to the user that can +# print graphs and obtain reports. +# +# The system only logs operations that were performed in a time equal or +# greater than the amount of milliseconds specified via the +# latency-monitor-threshold configuration directive. When its value is set +# to zero, the latency monitor is turned off. +# +# By default latency monitoring is disabled since it is mostly not needed +# if you don't have latency issues, and collecting data has a performance +# impact, that while very small, can be measured under big load. Latency +# monitoring can easily be enabled at runtime using the command +# "CONFIG SET latency-monitor-threshold " if needed. +latency-monitor-threshold 0 + +############################# EVENT NOTIFICATION ############################## + +# Redis can notify Pub/Sub clients about events happening in the key space. +# This feature is documented at http://redis.io/topics/notifications +# +# For instance if keyspace events notification is enabled, and a client +# performs a DEL operation on key "foo" stored in the Database 0, two +# messages will be published via Pub/Sub: +# +# PUBLISH __keyspace@0__:foo del +# PUBLISH __keyevent@0__:del foo +# +# It is possible to select the events that Redis will notify among a set +# of classes. Every class is identified by a single character: +# +# K Keyspace events, published with __keyspace@__ prefix. +# E Keyevent events, published with __keyevent@__ prefix. +# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... +# $ String commands +# l List commands +# s Set commands +# h Hash commands +# z Sorted set commands +# x Expired events (events generated every time a key expires) +# e Evicted events (events generated when a key is evicted for maxmemory) +# A Alias for g$lshzxe, so that the "AKE" string means all the events. +# +# The "notify-keyspace-events" takes as argument a string that is composed +# of zero or multiple characters. The empty string means that notifications +# are disabled. +# +# Example: to enable list and generic events, from the point of view of the +# event name, use: +# +# notify-keyspace-events Elg +# +# Example 2: to get the stream of the expired keys subscribing to channel +# name __keyevent@0__:expired use: +# +# notify-keyspace-events Ex +# +# By default all notifications are disabled because most users don't need +# this feature and the feature has some overhead. Note that if you don't +# specify at least one of K or E, no events will be delivered. +notify-keyspace-events "" + +############################### ADVANCED CONFIG ############################### + +# Hashes are encoded using a memory efficient data structure when they have a +# small number of entries, and the biggest entry does not exceed a given +# threshold. These thresholds can be configured using the following directives. +hash-max-ziplist-entries 512 +hash-max-ziplist-value 64 + +# Lists are also encoded in a special way to save a lot of space. +# The number of entries allowed per internal list node can be specified +# as a fixed maximum size or a maximum number of elements. +# For a fixed maximum size, use -5 through -1, meaning: +# -5: max size: 64 Kb <-- not recommended for normal workloads +# -4: max size: 32 Kb <-- not recommended +# -3: max size: 16 Kb <-- probably not recommended +# -2: max size: 8 Kb <-- good +# -1: max size: 4 Kb <-- good +# Positive numbers mean store up to _exactly_ that number of elements +# per list node. +# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), +# but if your use case is unique, adjust the settings as necessary. +list-max-ziplist-size -2 + +# Lists may also be compressed. +# Compress depth is the number of quicklist ziplist nodes from *each* side of +# the list to *exclude* from compression. The head and tail of the list +# are always uncompressed for fast push/pop operations. Settings are: +# 0: disable all list compression +# 1: depth 1 means "don't start compressing until after 1 node into the list, +# going from either the head or tail" +# So: [head]->node->node->...->node->[tail] +# [head], [tail] will always be uncompressed; inner nodes will compress. +# 2: [head]->[next]->node->node->...->node->[prev]->[tail] +# 2 here means: don't compress head or head->next or tail->prev or tail, +# but compress all nodes between them. +# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] +# etc. +list-compress-depth 0 + +# Sets have a special encoding in just one case: when a set is composed +# of just strings that happen to be integers in radix 10 in the range +# of 64 bit signed integers. +# The following configuration setting sets the limit in the size of the +# set in order to use this special memory saving encoding. +set-max-intset-entries 512 + +# Similarly to hashes and lists, sorted sets are also specially encoded in +# order to save a lot of space. This encoding is only used when the length and +# elements of a sorted set are below the following limits: +zset-max-ziplist-entries 128 +zset-max-ziplist-value 64 + +# HyperLogLog sparse representation bytes limit. The limit includes the +# 16 bytes header. When an HyperLogLog using the sparse representation crosses +# this limit, it is converted into the dense representation. +# +# A value greater than 16000 is totally useless, since at that point the +# dense representation is more memory efficient. +# +# The suggested value is ~ 3000 in order to have the benefits of +# the space efficient encoding without slowing down too much PFADD, +# which is O(N) with the sparse encoding. The value can be raised to +# ~ 10000 when CPU is not a concern, but space is, and the data set is +# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. +hll-sparse-max-bytes 3000 + +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation Redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into a hash table +# that is rehashing, the more rehashing "steps" are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# actively rehash the main dictionaries, freeing memory when possible. +# +# If unsure: +# use "activerehashing no" if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply from time to time +# to queries with 2 milliseconds delay. +# +# use "activerehashing yes" if you don't have such hard requirements but +# want to free memory asap when possible. +activerehashing yes + +# The client output buffer limits can be used to force disconnection of clients +# that are not reading data from the server fast enough for some reason (a +# common reason is that a Pub/Sub client can't consume messages as fast as the +# publisher can produce them). +# +# The limit can be set differently for the three different classes of clients: +# +# normal -> normal clients including MONITOR clients +# slave -> slave clients +# pubsub -> clients subscribed to at least one pubsub channel or pattern +# +# The syntax of every client-output-buffer-limit directive is the following: +# +# client-output-buffer-limit +# +# A client is immediately disconnected once the hard limit is reached, or if +# the soft limit is reached and remains reached for the specified number of +# seconds (continuously). +# So for instance if the hard limit is 32 megabytes and the soft limit is +# 16 megabytes / 10 seconds, the client will get disconnected immediately +# if the size of the output buffers reach 32 megabytes, but will also get +# disconnected if the client reaches 16 megabytes and continuously overcomes +# the limit for 10 seconds. +# +# By default normal clients are not limited because they don't receive data +# without asking (in a push way), but just after a request, so only +# asynchronous clients may create a scenario where data is requested faster +# than it can read. +# +# Instead there is a default limit for pubsub and slave clients, since +# subscribers and slaves receive data in a push fashion. +# +# Both the hard or the soft limit can be disabled by setting them to zero. +client-output-buffer-limit normal 0 0 0 +client-output-buffer-limit slave 256mb 64mb 60 +client-output-buffer-limit pubsub 32mb 8mb 60 + +# Redis calls an internal function to perform many background tasks, like +# closing connections of clients in timeout, purging expired keys that are +# never requested, and so forth. +# +# Not all tasks are performed with the same frequency, but Redis checks for +# tasks to perform according to the specified "hz" value. +# +# By default "hz" is set to 10. Raising the value will use more CPU when +# Redis is idle, but at the same time will make Redis more responsive when +# there are many keys expiring at the same time, and timeouts may be +# handled with more precision. +# +# The range is between 1 and 500, however a value over 100 is usually not +# a good idea. Most users should use the default of 10 and raise this up to +# 100 only in environments where very low latency is required. +hz 10 + +# When a child rewrites the AOF file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +aof-rewrite-incremental-fsync yes diff --git a/ansible/roles/redis-intergration/tests/inventory b/ansible/roles/redis-intergration/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/redis-intergration/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/redis-intergration/tests/test.yml b/ansible/roles/redis-intergration/tests/test.yml new file mode 100644 index 000000000..8bbd724f5 --- /dev/null +++ b/ansible/roles/redis-intergration/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - redis-intergration \ No newline at end of file diff --git a/ansible/roles/redis-intergration/vars/main.yml b/ansible/roles/redis-intergration/vars/main.yml new file mode 100644 index 000000000..18acde946 --- /dev/null +++ b/ansible/roles/redis-intergration/vars/main.yml @@ -0,0 +1,9 @@ +--- +# vars file for redis-intergration +hostname: "{{ ansible_hostname }}" +collector_name: "{{ project }}-{{ env }}-{{ ansible_hostname }}" +newrelic_license_key: "{{ newrelic_license_key }}" +newrelic_collector_url: "https://gov-infra-api.newrelic.com" +newrelic_identity_url: "https://gov-identity-api.newrelic.com" +newrelic_command_channel_url: "https://gov-infrastructure-command-api.newrelic.com" + diff --git a/ansible/roles/redis/.travis.yml b/ansible/roles/redis/.travis.yml new file mode 100644 index 000000000..36bbf6208 --- /dev/null +++ b/ansible/roles/redis/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/ansible/roles/redis/README.md b/ansible/roles/redis/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/redis/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/redis/defaults/main.yml b/ansible/roles/redis/defaults/main.yml new file mode 100644 index 000000000..c5f8a6d8f --- /dev/null +++ b/ansible/roles/redis/defaults/main.yml @@ -0,0 +1,19 @@ +--- +# defaults file for redis +redis_host: + dev: "bento-dev-redis-cluster.l5vrvc.clustercfg.use1.cache.amazonaws.com" + qa: "bento-qa-redis-cluster.l5vrvc.clustercfg.use1.cache.amazonaws.com" + perf: "bento-perf-redis-cluster.l5vrvc.clustercfg.use1.cache.amazonaws.com" + icdc: "bento-icdc-redis-cluster.l5vrvc.clustercfg.use1.cache.amazonaws.com" + prod: "bento-prod-redis-cluster.l5vrvc.clustercfg.use1.cache.amazonaws.com" + gmb-dev: "gmb-dev-redis-cluster.l5vrvc.clustercfg.use1.cache.amazonaws.com" + pcdc-dev: "pcdc-dev-redis-cluster.l5vrvc.clustercfg.use1.cache.amazonaws.com" + gitlab: "bento-dev-redis-cluster.l5vrvc.clustercfg.use1.cache.amazonaws.com" +redis_host_icdc: + dev: "10.208.2.75" + qa: "10.208.10.169" + stage: "10.208.18.154" + prod: "10.208.26.177" + demo: "localhost " +redis_password: "" +redis_port: 6379 diff --git a/ansible/roles/redis/handlers/main.yml b/ansible/roles/redis/handlers/main.yml new file mode 100644 index 000000000..b51e39747 --- /dev/null +++ b/ansible/roles/redis/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for redis diff --git a/ansible/roles/redis/meta/main.yml b/ansible/roles/redis/meta/main.yml new file mode 100644 index 000000000..227ad9c34 --- /dev/null +++ b/ansible/roles/redis/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.9 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/ansible/roles/redis/tasks/main.yml b/ansible/roles/redis/tasks/main.yml new file mode 100644 index 000000000..7744df808 --- /dev/null +++ b/ansible/roles/redis/tasks/main.yml @@ -0,0 +1,7 @@ +--- +# tasks file for redis +- name: confirm redis redis_host + debug: + msg: "{{ redis_host[tier] }}" +- name: flush redis cache + shell: echo -e "get abc \nFLUSHALL" | redis-cli -h {{ redis_host[tier] }} -p {{ redis_port }} -c diff --git a/ansible/roles/redis/tasks/redis_icdc.yml b/ansible/roles/redis/tasks/redis_icdc.yml new file mode 100644 index 000000000..03e983f79 --- /dev/null +++ b/ansible/roles/redis/tasks/redis_icdc.yml @@ -0,0 +1,8 @@ +--- +# tasks file for redis +- name: confirm redis redis_host + debug: + msg: "{{ redis_host_icdc[tier] }}" + +- name: flush redis cache + shell: echo -e "get abc \nFLUSHALL" | redis-cli -h {{ redis_host_icdc[tier] }} -p {{ redis_port }} -c \ No newline at end of file diff --git a/ansible/roles/redis/templates/flush_redis.sh.j2 b/ansible/roles/redis/templates/flush_redis.sh.j2 new file mode 100644 index 000000000..684327f26 --- /dev/null +++ b/ansible/roles/redis/templates/flush_redis.sh.j2 @@ -0,0 +1 @@ +echo -e "get abc \nping" | redis-cli -h "{{ redis_host_icdc[tier] }}" -p "{{ redis_port }}" -c \ No newline at end of file diff --git a/ansible/roles/redis/tests/inventory b/ansible/roles/redis/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/redis/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/redis/tests/test.yml b/ansible/roles/redis/tests/test.yml new file mode 100644 index 000000000..c5322791e --- /dev/null +++ b/ansible/roles/redis/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - redis \ No newline at end of file diff --git a/ansible/roles/redis/vars/main.yml b/ansible/roles/redis/vars/main.yml new file mode 100644 index 000000000..aad39bf46 --- /dev/null +++ b/ansible/roles/redis/vars/main.yml @@ -0,0 +1,3 @@ +--- +# vars file for redis +tier: "{{ lookup('env','TIER') }}" \ No newline at end of file diff --git a/ansible/roles/selenium/README.md b/ansible/roles/selenium/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/selenium/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/selenium/defaults/main.yml b/ansible/roles/selenium/defaults/main.yml new file mode 100644 index 000000000..b5ae22e3c --- /dev/null +++ b/ansible/roles/selenium/defaults/main.yml @@ -0,0 +1,4 @@ +--- +# defaults file for selenium-vm +driver_version: 76.0.3809.68 +maven_version: 3.6.1 \ No newline at end of file diff --git a/ansible/roles/selenium/handlers/main.yml b/ansible/roles/selenium/handlers/main.yml new file mode 100644 index 000000000..f8d3978de --- /dev/null +++ b/ansible/roles/selenium/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for selenium-vm \ No newline at end of file diff --git a/ansible/roles/selenium/meta/main.yml b/ansible/roles/selenium/meta/main.yml new file mode 100644 index 000000000..5d50bf41b --- /dev/null +++ b/ansible/roles/selenium/meta/main.yml @@ -0,0 +1,60 @@ +galaxy_info: + author: your name + description: your description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Some suggested licenses: + # - BSD (default) + # - MIT + # - GPLv2 + # - GPLv3 + # - Apache + # - CC-BY + license: license (GPLv2, CC-BY, etc) + + min_ansible_version: 2.4 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # Optionally specify the branch Galaxy will use when accessing the GitHub + # repo for this role. During role install, if no tags are available, + # Galaxy will use this branch. During import Galaxy will access files on + # this branch. If Travis integration is configured, only notifications for this + # branch will be accepted. Otherwise, in all cases, the repo's default branch + # (usually master) will be used. + #github_branch: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. \ No newline at end of file diff --git a/ansible/roles/selenium/tasks/main.yml b/ansible/roles/selenium/tasks/main.yml new file mode 100644 index 000000000..e350a57a1 --- /dev/null +++ b/ansible/roles/selenium/tasks/main.yml @@ -0,0 +1,112 @@ +--- +# tasks file for selenium-vm +- name: add chrome-stable repo + yum_repository: + name: chrome-stable + description: google chrome-stable + baseurl: http://dl.google.com/linux/chrome/rpm/stable/x86_64 + gpgcheck: yes + enabled: yes + gpgkey: https://dl.google.com/linux/linux_signing_key.pub + + +- name: install epel-release + yum: + name: + - epel-release + state: installed + +- name: "install gui utilities and xrdp" + yum: + name: + - "@General Purpose Desktop" + - "@Development Tools" + # - "X Window System" + - which + - unzip + - google-chrome-stable + - java-11-openjdk + - xorg-x11-server-Xvfb + - libXfont + state: latest + +# - name: install libxi6 and libgconf-2-4 +# yum: +# name: +# - libxi6 +# - libgconf-2-4 +# state: latest + +- name: download chromedriver and maven + get_url: + url: "{{item}}" + dest: /tmp + loop: + - https://chromedriver.storage.googleapis.com/{{driver_version}}/chromedriver_linux64.zip + - https://www-us.apache.org/dist/maven/maven-3/{{maven_version}}/binaries/apache-maven-{{maven_version}}-bin.tar.gz + +- name: unzip chromedriver + unarchive: + src: /tmp/chromedriver_linux64.zip + dest: /usr/local/bin + mode: '0755' + remote_src: yes + +- name: untar maven + unarchive: + src: /tmp/apache-maven-{{maven_version}}-bin.tar.gz + dest: /usr/local + remote_src: yes + +- name: remove the downloaded files + file: + path: "{{item}}" + state: absent + loop: + - /tmp/chromedriver_linux64.zip + - /tmp/apache-maven-{{maven_version}}-bin.tar.gz + +- name: create maven symlink + file: + src: /usr/local/apache-maven-{{maven_version}} + dest: /usr/local/maven + state: link + +- name: create maven profile + file: + path: /etc/profile.d/maven.sh + state: touch + +- name: update system path + lineinfile: + dest: /etc/profile.d/maven.sh + state: present + line: export PATH=$PATH:/usr/local/maven/bin + +- name: "enable gui on bootup" + file: + dest: /etc/systemd/system/default.target + src: /lib/systemd/system/runlevel5.target + state: link + +# - name: "open rdp port 3389" +# firewalld: +# immediate: true +# permanent: true +# port: 3389/tcp +# state: enabled + +# - name: apply selinux +# command: "{{ item }}" +# loop: +# - chcon --type=bin_t /usr/sbin/xrdp +# - chcon --type=bin_t /usr/sbin/xrdp-sesman + +# - name: "start and enable xrdp" +# service: +# enabled: true +# name: xrdp +# state: started + +# - name: reboot +# reboot: diff --git a/ansible/roles/selenium/tests/inventory b/ansible/roles/selenium/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/selenium/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/selenium/tests/test.yml b/ansible/roles/selenium/tests/test.yml new file mode 100644 index 000000000..aeed11522 --- /dev/null +++ b/ansible/roles/selenium/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - selenium-vm \ No newline at end of file diff --git a/ansible/roles/selenium/vars/main.yml b/ansible/roles/selenium/vars/main.yml new file mode 100644 index 000000000..a9082010f --- /dev/null +++ b/ansible/roles/selenium/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for selenium-vm \ No newline at end of file diff --git a/terraform/icdc/roles/setup-docker/handlers/main.yml b/ansible/roles/setup-docker/handlers/main.yml similarity index 100% rename from terraform/icdc/roles/setup-docker/handlers/main.yml rename to ansible/roles/setup-docker/handlers/main.yml diff --git a/terraform/icdc/roles/setup-docker/tasks/main.yml b/ansible/roles/setup-docker/tasks/main.yml similarity index 95% rename from terraform/icdc/roles/setup-docker/tasks/main.yml rename to ansible/roles/setup-docker/tasks/main.yml index d991fe585..b40005446 100644 --- a/terraform/icdc/roles/setup-docker/tasks/main.yml +++ b/ansible/roles/setup-docker/tasks/main.yml @@ -1,4 +1,5 @@ -- name: install systems packages needed for docker +# - name: install systems packages needed for docker +- name: install yum-utils yum: name: - yum-utils diff --git a/terraform/icdc/roles/setup-jenkins/handlers/main.yml b/ansible/roles/setup-jenkins/handlers/main.yml similarity index 100% rename from terraform/icdc/roles/setup-jenkins/handlers/main.yml rename to ansible/roles/setup-jenkins/handlers/main.yml diff --git a/terraform/icdc/roles/setup-jenkins/tasks/main.yml b/ansible/roles/setup-jenkins/tasks/main.yml similarity index 52% rename from terraform/icdc/roles/setup-jenkins/tasks/main.yml rename to ansible/roles/setup-jenkins/tasks/main.yml index c3c8b5d90..c777ef88f 100644 --- a/terraform/icdc/roles/setup-jenkins/tasks/main.yml +++ b/ansible/roles/setup-jenkins/tasks/main.yml @@ -26,41 +26,89 @@ # name: centos # gid: 1001 # when: '"centos" in current_users.stdout' - -- name: create jenkins group - group: - name: jenkins - gid: 1000 - state: present -- name: create jenkins user - user: - name: jenkins - uid: 1000 - group: jenkins +# - name: create jenkins group +# group: +# name: jenkins +# gid: 1001 +# state: present + +# - name: create jenkins user +# user: +# name: jenkins +# uid: 1001 +# group: jenkins + +- name: install openjdk11 for local slave + yum: + name: java-11.0-openjdk + state: latest - name: create file structure for the jenkins file: path: "{{ item }}" state: directory - group: jenkins - owner: jenkins + # group: jenkins + # owner: jenkins + group: centos + owner: centos loop: - - "{{ jenkins_home }}" + - "{{ jenkins_home }}/jk_secrets" - "{{ docker_home }}" - - "{{ secrets_home }}" - name: copy docker files template: src: "{{ item.src }}" dest: "{{ item.dest }}" - group: jenkins - owner: jenkins - with_items: + # group: jenkins + # owner: jenkins + group: centos + owner: centos + loop: - {src: 'docker-compose.yml.j2',dest: '{{ docker_home }}/docker-compose.yml'} - {src: 'dockerfile_jenkins.j2',dest: '{{docker_home}}/dockerfile_jenkins'} - {src: 'plugins.txt.j2',dest: '{{docker_home}}/plugins.txt'} + - {src: 'jenkins.env.j2',dest: '{{docker_home}}/jenkins.env'} + + +- name: copy conf files + copy: + src: "{{jenkins_yaml}}" + dest: "{{jenkins_home}}/jenkins.yaml" + owner: centos + group: centos + +- name: copy server_sshkey files + copy: + src: "{{server_sshkey_file}}" + dest: "{{jenkins_home}}/jk_secrets/server_sshkey" + owner: centos + group: centos + +- name: add secret files + copy: + content: "{{docker_agent_ip}}" + dest: "{{jenkins_home}}/docker_agent_ip" + +- name: add secrets files + copy: + content: "{{ item.src }}" + dest: "{{jenkins_home}}/jk_secrets/{{ item.dest }}" + # group: jenkins + # owner: jenkins + group: centos + owner: centos + loop: + - {src: "{{docker_agent_ip}}",dest: "docker_agent_ip"} + - {src: "{{tomcat01_ip}}",dest: "tomcat01_ip"} + - {src: "{{tomcat02_ip}}",dest: "tomcat02_ip"} + - {src: "{{slack_url}}",dest: "slack_url"} + - {src: "{{jenkinsadmin_ps}}",dest: "jenkinsadmin_ps"} + - {src: "{{bearer_ps}}",dest: "bearer_ps"} + - {src: "{{vdonkor_ps}}",dest: "vdonkor_ps"} + - {src: "{{neo4j_ps}}",dest: "neo4j_ps"} + - {src: "{{sshkey}}",dest: "sshkey"} - name: build the docker image docker_image: @@ -69,43 +117,6 @@ dockerfile: "{{ dockerfile }}" state: present -- name: create jenkinsAdmin secret - file: - path: "{{ secrets_home }}/{{item}}" - owner: jenkins - group: jenkins - state: touch - loop: - - jenkinsAdmin - - vdonkor - - neo4j - - bearer - - sshkey - -- name: update jenkinsAdmin with pass - lineinfile: - path: "{{ secrets_home }}/jenkinsAdmin" - line: "{{ jenkinsAdmin }}" - -- name: update vdonkor with pass - lineinfile: - path: "{{ secrets_home }}/vdonkor" - line: "{{ vdonkor }}" - -- name: update neo4j with pass - lineinfile: - path: "{{ secrets_home }}/neo4j" - line: "{{ neo4j }}" - -- name: update bearer with pass - lineinfile: - path: "{{ secrets_home }}/bearer" - line: "{{ bearer }}" - -- name: update sshkey with pass - lineinfile: - path: "{{ secrets_home }}/sshkey" - line: "{{ sshkey }}" - name: start the jenkins docker_service: @@ -121,7 +132,7 @@ # register: output_secret # - debug: -# msg: "{{ output_secret.stdout }}" +# msg: "{{ output_secret.stdout }}" # - name: set docker secret user # shell: echo {{ jenkinsAdmin }} | docker secret create jenkinsAdmin - @@ -134,5 +145,3 @@ # - debug: # msg: "{{output.stdout }}" - - diff --git a/ansible/roles/setup-jenkins/templates/docker-compose.yml.j2 b/ansible/roles/setup-jenkins/templates/docker-compose.yml.j2 new file mode 100644 index 000000000..c11884896 --- /dev/null +++ b/ansible/roles/setup-jenkins/templates/docker-compose.yml.j2 @@ -0,0 +1,15 @@ +version: '3.1' +services: + jenkins: + image: k9dc/jenkins + ports: + - 80:8080 + - 5001:5000 + volumes: + - {{jenkins_home}}:/var/jenkins_home + env_file: + - ./jenkins.env + environment: + - CASC_JENKINS_CONFIG=/var/jenkins_home/jenkins.yaml + - SECRETS=/var/jenkins_home/jk_secrets/ + restart: always diff --git a/ansible/roles/setup-jenkins/templates/dockerfile_jenkins.j2 b/ansible/roles/setup-jenkins/templates/dockerfile_jenkins.j2 new file mode 100644 index 000000000..c2c4a07db --- /dev/null +++ b/ansible/roles/setup-jenkins/templates/dockerfile_jenkins.j2 @@ -0,0 +1,7 @@ +FROM jenkins/jenkins:2.176.2 + +ARG JAVA_OPTS +ENV JAVA_OPTS="-Djenkins.install.runSetupWizard=false ${JAVA_OPTS:-}" + +COPY plugins.txt /usr/share/jenkins/ref/plugins.txt +RUN xargs /usr/local/bin/install-plugins.sh < /usr/share/jenkins/ref/plugins.txt diff --git a/ansible/roles/setup-jenkins/templates/jenkins.env.j2 b/ansible/roles/setup-jenkins/templates/jenkins.env.j2 new file mode 100644 index 000000000..03d5ff56e --- /dev/null +++ b/ansible/roles/setup-jenkins/templates/jenkins.env.j2 @@ -0,0 +1,10 @@ +DOCKER_AGENT_IP={{ docker_agent_ip }} +TOMCAT01_IP={{ tomcat01_ip }} +TOMCAT02_IP={{ tomcat02_ip }} +NEO4J_IP={{ neo4j_ip }} +SLACK_URL={{ slack_url }} +jenkinsAdmin={{ jenkinsadmin_ps }} +bearer={{ bearer_ps }} +vdonkor={{ vdonkor_ps }} +neo4j={{ neo4j_ps }} +sshkey="{{ sshkey }}" diff --git a/terraform/icdc/roles/setup-jenkins/templates/plugins.txt.j2 b/ansible/roles/setup-jenkins/templates/plugins.txt.j2 similarity index 100% rename from terraform/icdc/roles/setup-jenkins/templates/plugins.txt.j2 rename to ansible/roles/setup-jenkins/templates/plugins.txt.j2 diff --git a/ansible/roles/setup-nat-instance/tasks/main.yml b/ansible/roles/setup-nat-instance/tasks/main.yml new file mode 100644 index 000000000..cdd8f0d5e --- /dev/null +++ b/ansible/roles/setup-nat-instance/tasks/main.yml @@ -0,0 +1,30 @@ +- name: install firewalld package + yum: + name: firewalld + state: latest + +- name: enable and start firewalld + service: + name: firewalld + state: started + enabled: yes + +- name: configure ip masquerade + command: "{{ item}}" + loop: + - "firewall-offline-cmd --direct --add-rule ipv4 nat POSTROUTING 0 -o eth0 -j MASQUERADE" + - "firewall-offline-cmd --direct --add-rule ipv4 filter FORWARD 0 -i eth0 -j ACCEPT" + +- name: restart firewalld service + service: + name: firewalld + state: restarted + + +- name: Set ip forwarding on sysctl file and reload if necessary + sysctl: + name: net.ipv4.ip_forward + value: 1 + sysctl_set: yes + state: present + reload: yes \ No newline at end of file diff --git a/ansible/roles/slack-notification/.travis.yml b/ansible/roles/slack-notification/.travis.yml new file mode 100644 index 000000000..36bbf6208 --- /dev/null +++ b/ansible/roles/slack-notification/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/ansible/roles/slack-notification/README.md b/ansible/roles/slack-notification/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/slack-notification/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/slack-notification/defaults/main.yml b/ansible/roles/slack-notification/defaults/main.yml new file mode 100644 index 000000000..7dfc1014e --- /dev/null +++ b/ansible/roles/slack-notification/defaults/main.yml @@ -0,0 +1,4 @@ +--- +# defaults file for slack-notification +title: "File Monitor URL Status" +text: "Current Status of {{item}} is good" \ No newline at end of file diff --git a/ansible/roles/slack-notification/handlers/main.yml b/ansible/roles/slack-notification/handlers/main.yml new file mode 100644 index 000000000..53bae9c35 --- /dev/null +++ b/ansible/roles/slack-notification/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for slack-notification \ No newline at end of file diff --git a/ansible/roles/slack-notification/meta/main.yml b/ansible/roles/slack-notification/meta/main.yml new file mode 100644 index 000000000..227ad9c34 --- /dev/null +++ b/ansible/roles/slack-notification/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.9 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/ansible/roles/slack-notification/tasks/main.yml b/ansible/roles/slack-notification/tasks/main.yml new file mode 100644 index 000000000..bd95957e1 --- /dev/null +++ b/ansible/roles/slack-notification/tasks/main.yml @@ -0,0 +1,15 @@ +--- +# tasks file for slack-notification + +- name: Use the attachments API + community.general.slack: + token: "{{slack_token}}" + icon_emoji: ":alert:" + attachments: + - text: "{{custom_text}}" + title: "{{custom_title}}" + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"] + fallback: "Bento Jenkins Build", \ No newline at end of file diff --git a/ansible/roles/slack-notification/tests/inventory b/ansible/roles/slack-notification/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/slack-notification/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/slack-notification/tests/test.yml b/ansible/roles/slack-notification/tests/test.yml new file mode 100644 index 000000000..c31168b82 --- /dev/null +++ b/ansible/roles/slack-notification/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - slack-notification \ No newline at end of file diff --git a/ansible/roles/slack-notification/vars/main.yml b/ansible/roles/slack-notification/vars/main.yml new file mode 100644 index 000000000..a2669e1e7 --- /dev/null +++ b/ansible/roles/slack-notification/vars/main.yml @@ -0,0 +1,3 @@ +--- +# vars file for slack-notification +slack_token: "{{ lookup('env','SLACK_TOKEN') }}" \ No newline at end of file diff --git a/ansible/roles/sumologic-icdc/README.md b/ansible/roles/sumologic-icdc/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/sumologic-icdc/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/sumologic-icdc/Users/vdonkor/icdc_devops/ansible/roles/sumologic/README.md b/ansible/roles/sumologic-icdc/Users/vdonkor/icdc_devops/ansible/roles/sumologic/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/sumologic-icdc/Users/vdonkor/icdc_devops/ansible/roles/sumologic/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/sumologic-icdc/Users/vdonkor/icdc_devops/ansible/roles/sumologic/defaults/main.yml b/ansible/roles/sumologic-icdc/Users/vdonkor/icdc_devops/ansible/roles/sumologic/defaults/main.yml new file mode 100644 index 000000000..60b4b0aa0 --- /dev/null +++ b/ansible/roles/sumologic-icdc/Users/vdonkor/icdc_devops/ansible/roles/sumologic/defaults/main.yml @@ -0,0 +1,4 @@ +--- +# defaults file for sumologic +timezone: 'Etc/EST' +additional_logs: [] \ No newline at end of file diff --git a/ansible/roles/sumologic-icdc/Users/vdonkor/icdc_devops/ansible/roles/sumologic/handlers/main.yml b/ansible/roles/sumologic-icdc/Users/vdonkor/icdc_devops/ansible/roles/sumologic/handlers/main.yml new file mode 100644 index 000000000..be869c71d --- /dev/null +++ b/ansible/roles/sumologic-icdc/Users/vdonkor/icdc_devops/ansible/roles/sumologic/handlers/main.yml @@ -0,0 +1,6 @@ +--- +# handlers file for sumologic +- name: restart collector + service: + name: collector + state: restarted \ No newline at end of file diff --git a/ansible/roles/sumologic-icdc/Users/vdonkor/icdc_devops/ansible/roles/sumologic/meta/main.yml b/ansible/roles/sumologic-icdc/Users/vdonkor/icdc_devops/ansible/roles/sumologic/meta/main.yml new file mode 100644 index 000000000..5d50bf41b --- /dev/null +++ b/ansible/roles/sumologic-icdc/Users/vdonkor/icdc_devops/ansible/roles/sumologic/meta/main.yml @@ -0,0 +1,60 @@ +galaxy_info: + author: your name + description: your description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Some suggested licenses: + # - BSD (default) + # - MIT + # - GPLv2 + # - GPLv3 + # - Apache + # - CC-BY + license: license (GPLv2, CC-BY, etc) + + min_ansible_version: 2.4 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # Optionally specify the branch Galaxy will use when accessing the GitHub + # repo for this role. During role install, if no tags are available, + # Galaxy will use this branch. During import Galaxy will access files on + # this branch. If Travis integration is configured, only notifications for this + # branch will be accepted. Otherwise, in all cases, the repo's default branch + # (usually master) will be used. + #github_branch: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. \ No newline at end of file diff --git a/ansible/roles/sumologic-icdc/Users/vdonkor/icdc_devops/ansible/roles/sumologic/tasks/main.yml b/ansible/roles/sumologic-icdc/Users/vdonkor/icdc_devops/ansible/roles/sumologic/tasks/main.yml new file mode 100644 index 000000000..445d4923f --- /dev/null +++ b/ansible/roles/sumologic-icdc/Users/vdonkor/icdc_devops/ansible/roles/sumologic/tasks/main.yml @@ -0,0 +1,36 @@ +--- +# tasks file for sumologic + +- name: check if sumologic is installed + yum: + list: 'SumoCollector' + register: sumo_installed + +- name: download sumologic rpm + get_url: + url: https://collectors.sumologic.com/rest/download/rpm/64 + dest: /tmp/sumologic.rpm + when: sumo_installed.results == [] + +- name: Install SumoCollector + yum: + name: '/tmp/sumologic.rpm' + state: installed + when: sumo_installed.results == [] + +- name: copy user.properties and source configuration + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + group: sumologic_collector + with_items: + - {src: 'sources.json.j2',dest: '{{config}}/sources.json'} + - {src: 'user.properties.j2',dest: '{{config}}/user.properties'} + notify: restart collector + + +- name: Start service + service: + name: collector + state: started + enabled: yes diff --git a/ansible/roles/sumologic-icdc/Users/vdonkor/icdc_devops/ansible/roles/sumologic/templates/sources.json.j2 b/ansible/roles/sumologic-icdc/Users/vdonkor/icdc_devops/ansible/roles/sumologic/templates/sources.json.j2 new file mode 100644 index 000000000..4c96dd943 --- /dev/null +++ b/ansible/roles/sumologic-icdc/Users/vdonkor/icdc_devops/ansible/roles/sumologic/templates/sources.json.j2 @@ -0,0 +1,68 @@ +{ + "api.version": "v1", + "sources": [ + { + "name": "Linux Secure Log", + "description": "Security events and user logins", + "category": "{{env}}/OS/Linux", + "automaticDateParsing": false, + "multilineProcessingEnabled": false, + "useAutolineMatching": false, + "forceTimeZone": true, + "timeZone": "{{ timezone }}", + "filters": [], + "encoding": "UTF-8", + "pathExpression": "/var/log/secure*", + "blacklist": [], + "sourceType": "LocalFile" + }, + { + "name": "Linux Message Log", + "description": "System events, such as user creation, deletion, system start, shutdown, etc", + "category": "{{env}}/OS/Linux", + "automaticDateParsing": false, + "multilineProcessingEnabled": false, + "useAutolineMatching": false, + "forceTimeZone": true, + "timeZone": "{{ timezone }}", + "filters": [], + "encoding": "UTF-8", + "pathExpression": "/var/log/messages*", + "blacklist": [], + "sourceType": "LocalFile" + }, + { + "name": "Linux dmesg", + "description": "Kernel messages", + "category": "{{env}}/OS/Linux", + "automaticDateParsing": false, + "multilineProcessingEnabled": true, + "useAutolineMatching": false, + "forceTimeZone": true, + "timeZone": "{{ timezone }}", + "filters": [], + "encoding": "UTF-8", + "pathExpression": "/var/log/dmesg", + "blacklist": [], + "sourceType": "LocalFile" + } +{% for log in additional_logs %} + , + { + "name": "{{ log.name }}", + "description": "{{ log.description }}", + "category": "{{ log.category }}", + "automaticDateParsing": false, + "multilineProcessingEnabled": false, + "useAutolineMatching": false, + "forceTimeZone": true, + "timeZone": "{{ timezone }}", + "filters": [{{ log.filters }}], + "encoding": "UTF-8", + "pathExpression": "{{ log.path }}", + "blacklist": [], + "sourceType": "LocalFile" + } +{% endfor %} + ] +} \ No newline at end of file diff --git a/ansible/roles/sumologic-icdc/Users/vdonkor/icdc_devops/ansible/roles/sumologic/templates/user.properties.j2 b/ansible/roles/sumologic-icdc/Users/vdonkor/icdc_devops/ansible/roles/sumologic/templates/user.properties.j2 new file mode 100644 index 000000000..3e3d93f77 --- /dev/null +++ b/ansible/roles/sumologic-icdc/Users/vdonkor/icdc_devops/ansible/roles/sumologic/templates/user.properties.j2 @@ -0,0 +1,5 @@ +name = {{ collector_name }} +accessid = {{ access_id }} +accesskey = {{ access_key }} +ephemeral = true +syncSources = /opt/SumoCollector/config/sources.json \ No newline at end of file diff --git a/ansible/roles/sumologic-icdc/Users/vdonkor/icdc_devops/ansible/roles/sumologic/tests/inventory b/ansible/roles/sumologic-icdc/Users/vdonkor/icdc_devops/ansible/roles/sumologic/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/sumologic-icdc/Users/vdonkor/icdc_devops/ansible/roles/sumologic/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/sumologic-icdc/Users/vdonkor/icdc_devops/ansible/roles/sumologic/tests/test.yml b/ansible/roles/sumologic-icdc/Users/vdonkor/icdc_devops/ansible/roles/sumologic/tests/test.yml new file mode 100644 index 000000000..9049b5d4a --- /dev/null +++ b/ansible/roles/sumologic-icdc/Users/vdonkor/icdc_devops/ansible/roles/sumologic/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - sumologic \ No newline at end of file diff --git a/ansible/roles/sumologic-icdc/Users/vdonkor/icdc_devops/ansible/roles/sumologic/vars/main.yml b/ansible/roles/sumologic-icdc/Users/vdonkor/icdc_devops/ansible/roles/sumologic/vars/main.yml new file mode 100644 index 000000000..c0ea57ccc --- /dev/null +++ b/ansible/roles/sumologic-icdc/Users/vdonkor/icdc_devops/ansible/roles/sumologic/vars/main.yml @@ -0,0 +1,6 @@ +--- +# vars file for sumologic +accessid: "{{ access_id }}" +accesskey: "{{ access_key }}" +config: /opt/SumoCollector/config + diff --git a/ansible/roles/sumologic-icdc/defaults/main.yml b/ansible/roles/sumologic-icdc/defaults/main.yml new file mode 100644 index 000000000..60b4b0aa0 --- /dev/null +++ b/ansible/roles/sumologic-icdc/defaults/main.yml @@ -0,0 +1,4 @@ +--- +# defaults file for sumologic +timezone: 'Etc/EST' +additional_logs: [] \ No newline at end of file diff --git a/ansible/roles/sumologic-icdc/handlers/main.yml b/ansible/roles/sumologic-icdc/handlers/main.yml new file mode 100644 index 000000000..be869c71d --- /dev/null +++ b/ansible/roles/sumologic-icdc/handlers/main.yml @@ -0,0 +1,6 @@ +--- +# handlers file for sumologic +- name: restart collector + service: + name: collector + state: restarted \ No newline at end of file diff --git a/ansible/roles/sumologic-icdc/meta/main.yml b/ansible/roles/sumologic-icdc/meta/main.yml new file mode 100644 index 000000000..5d50bf41b --- /dev/null +++ b/ansible/roles/sumologic-icdc/meta/main.yml @@ -0,0 +1,60 @@ +galaxy_info: + author: your name + description: your description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Some suggested licenses: + # - BSD (default) + # - MIT + # - GPLv2 + # - GPLv3 + # - Apache + # - CC-BY + license: license (GPLv2, CC-BY, etc) + + min_ansible_version: 2.4 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # Optionally specify the branch Galaxy will use when accessing the GitHub + # repo for this role. During role install, if no tags are available, + # Galaxy will use this branch. During import Galaxy will access files on + # this branch. If Travis integration is configured, only notifications for this + # branch will be accepted. Otherwise, in all cases, the repo's default branch + # (usually master) will be used. + #github_branch: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. \ No newline at end of file diff --git a/ansible/roles/sumologic-icdc/tasks/main.yml b/ansible/roles/sumologic-icdc/tasks/main.yml new file mode 100644 index 000000000..696b08d5e --- /dev/null +++ b/ansible/roles/sumologic-icdc/tasks/main.yml @@ -0,0 +1,34 @@ +--- +# tasks file for sumologic +#- name: check if sumologic is installed +# yum: +# list: 'SumoCollector' +# register: sumo_installed + +- name: download sumologic rpm + get_url: + url: https://collectors.sumologic.com/rest/download/rpm/64 + dest: /tmp/sumologic.rpm +# when: sumo_installed.results == [] + +- name: Install SumoCollector + yum: + name: '/tmp/sumologic.rpm' + state: installed +# when: sumo_installed.results == [] + +- name: copy user.properties and source configuration + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + group: sumologic_collector + with_items: + - {src: 'sources.json.j2',dest: '{{config}}/sources.json'} + - {src: 'user.properties.j2',dest: '{{config}}/user.properties'} + notify: restart collector + +- name: Restart service + service: + name: collector + state: restarted + enabled: yes diff --git a/ansible/roles/sumologic-icdc/templates/sources.json.j2 b/ansible/roles/sumologic-icdc/templates/sources.json.j2 new file mode 100644 index 000000000..7bd33afe7 --- /dev/null +++ b/ansible/roles/sumologic-icdc/templates/sources.json.j2 @@ -0,0 +1,89 @@ +{ + "api.version": "v1", + "sources": [ + { + "name": "Linux Secure Log", + "description": "Security events and user logins", + "category": "{{env}}/OS/Linux/secure", + "automaticDateParsing": false, + "multilineProcessingEnabled": false, + "useAutolineMatching": false, + "forceTimeZone": true, + "timeZone": "{{ timezone }}", + "filters": [], + "encoding": "UTF-8", + "pathExpression": "/var/log/secure*", + "blacklist": [], + "sourceType": "LocalFile" + }, + { + "name": "Linux Message Log", + "description": "System events, such as user creation, deletion, system start, shutdown, etc", + "category": "{{env}}/OS/Linux/message", + "automaticDateParsing": false, + "multilineProcessingEnabled": false, + "useAutolineMatching": false, + "forceTimeZone": true, + "timeZone": "{{ timezone }}", + "filters": [], + "encoding": "UTF-8", + "pathExpression": "/var/log/messages*", + "blacklist": [], + "sourceType": "LocalFile" + }, + { + "name": "Linux dmesg", + "description": "Kernel messages", + "category": "{{env}}/OS/Linux/dmesg", + "automaticDateParsing": false, + "multilineProcessingEnabled": true, + "useAutolineMatching": false, + "forceTimeZone": true, + "timeZone": "{{ timezone }}", + "filters": [], + "encoding": "UTF-8", + "pathExpression": "/var/log/dmesg", + "blacklist": [], + "sourceType": "LocalFile" + } +{% for log in additional_logs %} + , + { + "name": "{{ log.name }}", + "description": "{{ log.description }}", + "category": "{{ log.category }}", + "automaticDateParsing": false, + "multilineProcessingEnabled": false, + "useAutolineMatching": false, + "forceTimeZone": true, + "timeZone": "{{ timezone }}", + "filters": [{{ log.filters }}], + "encoding": "UTF-8", + "pathExpression": "{{ log.path }}", + "blacklist": [], + "sourceType": "LocalFile" + } +{% endfor %} +{% for log in container_logs %} + , + { + "name": "{{ log.name }}", + "description": "{{ log.description }}", + "category": "{{ log.category }}", + "multilineProcessingEnabled": false, + "forceTimeZone": true, + "timeZone": "{{ timezone }}", + "uri": "unix:///var/run/docker.sock", + "sourceType": "{{ log.source }}" + "specifiedContainers": [], + "allContainers": true, + "certPath": "", + "collectEvents": true, + "pollInterval": "{{ log.poll }}" + "automaticDateParsing": true, + "cutoffTimestamp": 0 + + } +{% endfor %} + ] +} \ No newline at end of file diff --git a/ansible/roles/sumologic-icdc/templates/user.properties.j2 b/ansible/roles/sumologic-icdc/templates/user.properties.j2 new file mode 100644 index 000000000..6085cdff5 --- /dev/null +++ b/ansible/roles/sumologic-icdc/templates/user.properties.j2 @@ -0,0 +1,7 @@ +name = {{ collector_name }} +accessid = {{ sumo_access_id }} +accesskey = {{ sumo_access_key }} +ephemeral = true +clobber = true +syncSources = /opt/SumoCollector/config/sources.json +skipAccessKeyRemoval = true diff --git a/ansible/roles/sumologic-icdc/tests/inventory b/ansible/roles/sumologic-icdc/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/sumologic-icdc/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/sumologic-icdc/tests/test.yml b/ansible/roles/sumologic-icdc/tests/test.yml new file mode 100644 index 000000000..9049b5d4a --- /dev/null +++ b/ansible/roles/sumologic-icdc/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - sumologic \ No newline at end of file diff --git a/ansible/roles/sumologic-icdc/vars/main.yml b/ansible/roles/sumologic-icdc/vars/main.yml new file mode 100644 index 000000000..cd039b3e5 --- /dev/null +++ b/ansible/roles/sumologic-icdc/vars/main.yml @@ -0,0 +1,58 @@ +--- +# vars file for sumologic +config: /opt/SumoCollector/config +platform: cloudone +collector_name: "{{ project }}-{{ env }}-{{ inventory_hostname }}" +sumo_access_id: "{{ sumo_access_id }}" +sumo_access_key: "{{ sumo_access_key }}" +tomcat_logs: + - name: "{{ env }} Tomcat Access Logs" + description: "{{ env }} Tomcat Logs" + category: "{{ env }}/app/Tomcat/Access" + path: "{{ log_path }}/logs/localhost_access_log.*" + filters: "" + - name: "{{ env }} Tomcat Catalina Logs" + description: "{{ env }} Tomcat Catalina Logs" + category: "{{ env }}/app/Tomcat/Catalina" + path: "{{ log_path }}/logs/catalina.*" + filters: "" + - name: "{{ env }} Tomcat Localhost Logs" + description: "{{ env }} Tomcat Localhost Logs" + category: "{{ env }}/app/Tomcat/Localhost" + path: "{{ log_path }}/logs/localhost.*" + filters: "" + - name: "{{ env }} Tomcat Info Logs" + description: "{{ env }} Tomcat Info Logs" + category: "{{ env }}/app/Tomcat/Info" + path: "{{ log_path }}/logs/info.*" + filters: "" +nginx_logs: + - name: "{{ env }} Nginx Access Logs" + description: "{{ env }} Nginx Logs" + category: "{{ env }}/app/Nginx/Access" + path: "{{ log_path }}/nginx/access.*" + filters: "" + - name: "{{ env }} Nginx Error Logs" + description: "{{ env }} Nginx Error Logs" + category: "{{ env }}/app/Nginx/Error" + path: "{{ log_path }}/nginx/error.*" + filters: "" +docker_logs: + - name: "{{ env }} Docker Logs" + description: "{{ env }} Docker Logs" + category: "{{ env }}/Docker/Logs" + source: "DockerLog" + poll: "" + - name: "{{ env }} Docker Stats" + description: "{{ env }} Docker Stats" + category: "{{ env }}/Docker/Stats" + source: "DockerStats" + poll: "6000" +neo4j_logs: + - name: "{{ env }} Neo4j Logs" + description: "{{ env }} Neo4j Logs" + category: "{{ env }}/db/Neo4j" + path: "{{ log_path }}/*" + filters: "" +additional_logs: "{{ tomcat_logs + nginx_logs if app_type == 'app' else neo4j_logs }}" +container_logs: "{{ docker_logs if app_type == 'app' else '' }}" \ No newline at end of file diff --git a/ansible/roles/sumologic-journalctl/README.md b/ansible/roles/sumologic-journalctl/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/sumologic-journalctl/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/sumologic-journalctl/defaults/main.yml b/ansible/roles/sumologic-journalctl/defaults/main.yml new file mode 100644 index 000000000..05c2db3ca --- /dev/null +++ b/ansible/roles/sumologic-journalctl/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for sumo-journalctl \ No newline at end of file diff --git a/ansible/roles/sumologic-journalctl/handlers/main.yml b/ansible/roles/sumologic-journalctl/handlers/main.yml new file mode 100644 index 000000000..fa187b4da --- /dev/null +++ b/ansible/roles/sumologic-journalctl/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for sumo-journalctl \ No newline at end of file diff --git a/ansible/roles/sumologic-journalctl/meta/main.yml b/ansible/roles/sumologic-journalctl/meta/main.yml new file mode 100644 index 000000000..5d50bf41b --- /dev/null +++ b/ansible/roles/sumologic-journalctl/meta/main.yml @@ -0,0 +1,60 @@ +galaxy_info: + author: your name + description: your description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Some suggested licenses: + # - BSD (default) + # - MIT + # - GPLv2 + # - GPLv3 + # - Apache + # - CC-BY + license: license (GPLv2, CC-BY, etc) + + min_ansible_version: 2.4 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # Optionally specify the branch Galaxy will use when accessing the GitHub + # repo for this role. During role install, if no tags are available, + # Galaxy will use this branch. During import Galaxy will access files on + # this branch. If Travis integration is configured, only notifications for this + # branch will be accepted. Otherwise, in all cases, the repo's default branch + # (usually master) will be used. + #github_branch: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. \ No newline at end of file diff --git a/ansible/roles/sumologic-journalctl/tasks/main.yml b/ansible/roles/sumologic-journalctl/tasks/main.yml new file mode 100644 index 000000000..211cfd723 --- /dev/null +++ b/ansible/roles/sumologic-journalctl/tasks/main.yml @@ -0,0 +1,25 @@ +--- +# tasks file for sumo-journalctl +# +# this task updates journalctl to forward logs to rsyslog +# +# + +#- name: disable, stop journal_syslog.service +# systemd: +# state: stopped +# name: journal_syslog.service +# enabled: no + +- name: update journald configuration + lineinfile: + path: /etc/systemd/journald.conf + regexp: 'ForwardToSyslog=' + line: ForwardToSyslog=yes + +- name: restart journald service + systemd: + state: restarted + daemon_reload: yes + name: systemd-journald + enabled: yes \ No newline at end of file diff --git a/ansible/roles/sumologic-journalctl/tasks/main.yml.bak b/ansible/roles/sumologic-journalctl/tasks/main.yml.bak new file mode 100644 index 000000000..2cb782553 --- /dev/null +++ b/ansible/roles/sumologic-journalctl/tasks/main.yml.bak @@ -0,0 +1,39 @@ +--- +# tasks file for sumo-journalctl + +# - name: run docker role +# include_role: +# name: docker + +- name: install nc package + yum: + name: + - nc + state: installed + +- name: start sumologic docker collector + docker_container: + name: "{{env}}-neo4j-log" + image: sumologic/collector:latest-syslog + state: started + restart: yes + ports: + - "514:514/udp" + env: + SUMO_ACCESS_ID: "{{ sumo_access_id }}" + SUMO_ACCESS_KEY: "{{ sumo_access_key }}" + +# - name: start sumologic docker collector +# command: docker run --restart always -d -p 514:514/udp --name={{env}}-neo4j.log sumologic/collector:latest-syslog {{ access_id }} {{ access_key }} + +- name: configure systemd for sumo-journalctl + template: + src: journal_syslog.service.j2 + dest: /etc/systemd/system/journal_syslog.service + +- name: enable, start journal_syslog.service + systemd: + state: started + daemon_reload: yes + name: journal_syslog.service + enabled: yes \ No newline at end of file diff --git a/ansible/roles/sumologic-journalctl/templates/journal_syslog.service.j2 b/ansible/roles/sumologic-journalctl/templates/journal_syslog.service.j2 new file mode 100644 index 000000000..75a588394 --- /dev/null +++ b/ansible/roles/sumologic-journalctl/templates/journal_syslog.service.j2 @@ -0,0 +1,12 @@ +[Unit] +Description=Send Journalctl to Sumo + +[Service] +TimeoutStartSec=0 +ExecStart=/bin/sh -c '/usr/bin/journalctl -f | /usr/bin/ncat --udp localhost 514' + +Restart=always +RestartSec=5s + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/ansible/roles/sumologic-journalctl/tests/inventory b/ansible/roles/sumologic-journalctl/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/sumologic-journalctl/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/sumologic-journalctl/tests/test.yml b/ansible/roles/sumologic-journalctl/tests/test.yml new file mode 100644 index 000000000..d7bfbcc97 --- /dev/null +++ b/ansible/roles/sumologic-journalctl/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - sumo-journalctl \ No newline at end of file diff --git a/ansible/roles/sumologic-journalctl/vars/main.yml b/ansible/roles/sumologic-journalctl/vars/main.yml new file mode 100644 index 000000000..65355e3f9 --- /dev/null +++ b/ansible/roles/sumologic-journalctl/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for sumo-journalctl diff --git a/ansible/roles/sumologic/tasks/main.yml b/ansible/roles/sumologic/tasks/main.yml index 445d4923f..1f1485b69 100644 --- a/ansible/roles/sumologic/tasks/main.yml +++ b/ansible/roles/sumologic/tasks/main.yml @@ -1,22 +1,13 @@ --- -# tasks file for sumologic - -- name: check if sumologic is installed - yum: - list: 'SumoCollector' - register: sumo_installed - - name: download sumologic rpm get_url: url: https://collectors.sumologic.com/rest/download/rpm/64 dest: /tmp/sumologic.rpm - when: sumo_installed.results == [] - name: Install SumoCollector yum: name: '/tmp/sumologic.rpm' state: installed - when: sumo_installed.results == [] - name: copy user.properties and source configuration template: @@ -28,9 +19,8 @@ - {src: 'user.properties.j2',dest: '{{config}}/user.properties'} notify: restart collector - -- name: Start service +- name: Restart service service: name: collector - state: started + state: restarted enabled: yes diff --git a/ansible/roles/sumologic/templates/sources.json.j2 b/ansible/roles/sumologic/templates/sources.json.j2 index 4c96dd943..6f62bb29c 100644 --- a/ansible/roles/sumologic/templates/sources.json.j2 +++ b/ansible/roles/sumologic/templates/sources.json.j2 @@ -4,7 +4,7 @@ { "name": "Linux Secure Log", "description": "Security events and user logins", - "category": "{{env}}/OS/Linux", + "category": "{{env}}/OS/Linux/secure", "automaticDateParsing": false, "multilineProcessingEnabled": false, "useAutolineMatching": false, @@ -19,7 +19,7 @@ { "name": "Linux Message Log", "description": "System events, such as user creation, deletion, system start, shutdown, etc", - "category": "{{env}}/OS/Linux", + "category": "{{env}}/OS/Linux/message", "automaticDateParsing": false, "multilineProcessingEnabled": false, "useAutolineMatching": false, @@ -34,7 +34,7 @@ { "name": "Linux dmesg", "description": "Kernel messages", - "category": "{{env}}/OS/Linux", + "category": "{{env}}/OS/Linux/dmesg", "automaticDateParsing": false, "multilineProcessingEnabled": true, "useAutolineMatching": false, diff --git a/ansible/roles/sumologic/templates/user.properties.j2 b/ansible/roles/sumologic/templates/user.properties.j2 index 3e3d93f77..6085cdff5 100644 --- a/ansible/roles/sumologic/templates/user.properties.j2 +++ b/ansible/roles/sumologic/templates/user.properties.j2 @@ -1,5 +1,7 @@ name = {{ collector_name }} -accessid = {{ access_id }} -accesskey = {{ access_key }} +accessid = {{ sumo_access_id }} +accesskey = {{ sumo_access_key }} ephemeral = true -syncSources = /opt/SumoCollector/config/sources.json \ No newline at end of file +clobber = true +syncSources = /opt/SumoCollector/config/sources.json +skipAccessKeyRemoval = true diff --git a/ansible/roles/sumologic/vars/main.yml b/ansible/roles/sumologic/vars/main.yml index c0ea57ccc..e8ef94d10 100644 --- a/ansible/roles/sumologic/vars/main.yml +++ b/ansible/roles/sumologic/vars/main.yml @@ -1,6 +1,38 @@ --- # vars file for sumologic -accessid: "{{ access_id }}" -accesskey: "{{ access_key }}" config: /opt/SumoCollector/config - +platform: aws +collector_name: "{{ project }}-{{ platform }}-{{ env }}-{{ app_name }}" +sumo_access_id: "{{ lookup('aws_ssm', 'sumo_access_id', region='us-east-1' ) }}" +sumo_access_key: "{{ lookup('aws_ssm', 'sumo_access_key', region='us-east-1' ) }}" +tomcat_logs: + - name: "{{ env }} {{ app_name }} Access Logs" + description: "{{ env }} {{ app_name}} Logs" + category: "{{ env }}/{{ app_type }}/{{ app_name }}/Access" + path: "{{ log_path }}/localhost_access_log.*" + filters: "" + - name: "{{ env }} {{ app_name }} Catalina Logs" + description: "{{ env }} {{ app_name }} Catalina Logs" + category: "{{ env }}/{{ app_type }}/{{ app_name }}/Catalina" + path: "{{ log_path }}/catalina.*" + filters: "" + - name: "{{ env }} {{ app_name }} Localhost Logs" + description: "{{ env }} {{ app_name }} Localhost Logs" + category: "{{ env }}/{{ app_type }}/{{ app_name }}/Localhost" + path: "{{ log_path }}/localhost.*" + filters: "" +docker_logs: + - name: "{{ env }} Docker Logs" + description: "{{ env }} Docker Logs" + category: "{{ env }}/Docker/Logs" + - name: "{{ env }} Docker Stats" + description: "{{ env }} Docker Stats" + category: "{{ env }}/Docker/Stats" +neo4j_logs: + - name: "{{ env }} {{ app_name }} Logs" + description: "{{ env }} {{ app_name}} Logs" + category: "{{ env }}/{{ app_type }}/{{ app_name }}" + path: "{{ log_path }}" + filters: "" +additional_logs: "{{ tomcat_logs if app_name == 'Tomcat' else neo4j_logs }}" +additional_logs_docker: "{{ docker_logs if app_name == 'Tomcat' else '' }}" \ No newline at end of file diff --git a/ansible/roles/tomcat/README.md b/ansible/roles/tomcat/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/tomcat/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/tomcat/defaults/main.yml b/ansible/roles/tomcat/defaults/main.yml new file mode 100644 index 000000000..406cde47f --- /dev/null +++ b/ansible/roles/tomcat/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for k9dc diff --git a/ansible/roles/tomcat/handlers/main.yml b/ansible/roles/tomcat/handlers/main.yml new file mode 100644 index 000000000..31630a940 --- /dev/null +++ b/ansible/roles/tomcat/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for k9dc \ No newline at end of file diff --git a/ansible/roles/tomcat/meta/main.yml b/ansible/roles/tomcat/meta/main.yml new file mode 100644 index 000000000..5d50bf41b --- /dev/null +++ b/ansible/roles/tomcat/meta/main.yml @@ -0,0 +1,60 @@ +galaxy_info: + author: your name + description: your description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Some suggested licenses: + # - BSD (default) + # - MIT + # - GPLv2 + # - GPLv3 + # - Apache + # - CC-BY + license: license (GPLv2, CC-BY, etc) + + min_ansible_version: 2.4 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # Optionally specify the branch Galaxy will use when accessing the GitHub + # repo for this role. During role install, if no tags are available, + # Galaxy will use this branch. During import Galaxy will access files on + # this branch. If Travis integration is configured, only notifications for this + # branch will be accepted. Otherwise, in all cases, the repo's default branch + # (usually master) will be used. + #github_branch: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. \ No newline at end of file diff --git a/ansible/roles/tomcat/tasks/main.yml b/ansible/roles/tomcat/tasks/main.yml new file mode 100644 index 000000000..4c8e11fc0 --- /dev/null +++ b/ansible/roles/tomcat/tasks/main.yml @@ -0,0 +1,56 @@ +--- +- name: open port 8080 + firewalld: + port: 8080/tcp + zone: public + immediate: yes + permanent: yes + state: enabled + +- name: create tomcat group + group: + name: tomcat + gid: 3001 + state: present + +- name: create tomcat user + user: + name: tomcat + uid: 3001 + groups: tomcat,docker + append: yes + +- name: create k9dc deployments directory + file: + path: "{{ item }}" + state: directory + owner: tomcat + group: tomcat + loop: + - "{{ deployments }}" + - "{{ docker_home }}" + - "{{ k9dc_home }}/logs" + +- name: copy docker files + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + owner: tomcat + group: tomcat + loop: + - {src: 'docker-compose.yml.j2',dest: '{{docker_home}}/docker-compose.yml'} + +- name: start k9dc + command: docker-compose up -d + args: + chdir: "{{ docker_home }}" + warn: no + tags: + - cloudone + +- name: start the k9dc + docker_compose: + project_src: "{{ docker_home }}" + state: present + tags: + - sandbox \ No newline at end of file diff --git a/ansible/roles/tomcat/templates/docker-compose.yml.j2 b/ansible/roles/tomcat/templates/docker-compose.yml.j2 new file mode 100644 index 000000000..ada4294fa --- /dev/null +++ b/ansible/roles/tomcat/templates/docker-compose.yml.j2 @@ -0,0 +1,15 @@ +version: '3.1' +services: + match: + container_name: k9dc + image: cbiitssrepo/tomcat9 + ports: + - 8080:8080 + volumes: + - {{ k9dc_home }}/logs:/usr/local/tomcat/logs + restart: always + environment: + - NEW_RELIC_LICENSE_KEY={{ newrelic_license_key }} + - NEW_RELIC_APP_NAME={{ app_name }} + - NEW_RELIC_HOST=gov-collector.newrelic.com + diff --git a/ansible/roles/tomcat/tests/inventory b/ansible/roles/tomcat/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/tomcat/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/tomcat/tests/test.yml b/ansible/roles/tomcat/tests/test.yml new file mode 100644 index 000000000..bd61c2678 --- /dev/null +++ b/ansible/roles/tomcat/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - k9dc \ No newline at end of file diff --git a/ansible/roles/tomcat/vars/main.yml b/ansible/roles/tomcat/vars/main.yml new file mode 100644 index 000000000..d532df83d --- /dev/null +++ b/ansible/roles/tomcat/vars/main.yml @@ -0,0 +1,15 @@ +--- +# vars file for k9dc +docker_home: /local/content/docker +k9dc_home: /local/content/k9dc +deployments: /local/content/canine-data +container_name: k9dc +app_name: "{{env}}-icdc" +collector_name: "{{ env }}-k9dc" +newrelic_license_key: "{{ newrelic_key }}" +additional_logs: + - name: "{{ env }} k9dc Logs" + description: "{{ env }} k9dc logs" + category: "{{env }}/app/k9dc" + path: "/local/content/k9dc/logs/*.log" + filters: "" \ No newline at end of file diff --git a/ansible/roles/url-monitor/.travis.yml b/ansible/roles/url-monitor/.travis.yml new file mode 100644 index 000000000..36bbf6208 --- /dev/null +++ b/ansible/roles/url-monitor/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/ansible/roles/url-monitor/README.md b/ansible/roles/url-monitor/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/ansible/roles/url-monitor/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/url-monitor/defaults/main.yml b/ansible/roles/url-monitor/defaults/main.yml new file mode 100644 index 000000000..3f6e44bc4 --- /dev/null +++ b/ansible/roles/url-monitor/defaults/main.yml @@ -0,0 +1,6 @@ +--- +# defaults file for url-monitor +name: default +url: http://localhost +title: "{{item.value.tier}} Files Microservice URL Status" +text: "Current Status of {{item.value.url}} is " \ No newline at end of file diff --git a/ansible/roles/url-monitor/handlers/main.yml b/ansible/roles/url-monitor/handlers/main.yml new file mode 100644 index 000000000..c85ba3015 --- /dev/null +++ b/ansible/roles/url-monitor/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for url-monitor \ No newline at end of file diff --git a/ansible/roles/url-monitor/meta/main.yml b/ansible/roles/url-monitor/meta/main.yml new file mode 100644 index 000000000..227ad9c34 --- /dev/null +++ b/ansible/roles/url-monitor/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.9 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/ansible/roles/url-monitor/tasks/main.yml b/ansible/roles/url-monitor/tasks/main.yml new file mode 100644 index 000000000..a5ecd8db0 --- /dev/null +++ b/ansible/roles/url-monitor/tasks/main.yml @@ -0,0 +1,29 @@ +--- + +# tasks file for url-monitor + + +- name: "check {{app_name | default(name)}} for availability" + uri: + url: "{{item.value.url | default(url)}}" + follow_redirects: none + method: GET + status_code: [200,201,401,403,404,500,502,503] + register: result + until: ('status' in result) + retries: 100 + delay: 10 + +- name: send notification + slack: + token: "{{slack_token}}" + # icon_emoji: ":alert:" + attachments: + - text: "{{custom_text | default(text)}}{% if result.status == 200 %}*Up*{% else %}*Down*{% endif %}" + title: "{{custom_title | default(title)}}" + color: "{% if result.status == 200 %}good{% else %}danger{% endif %}" + footer: "icdc devops" + ts: "{{ansible_date_time.epoch }}" + mrkdwn_in: "{{mkdown}}" + fallback: "File Service Monitor" + when: result.status != 200 \ No newline at end of file diff --git a/ansible/roles/url-monitor/tests/inventory b/ansible/roles/url-monitor/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible/roles/url-monitor/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/roles/url-monitor/tests/test.yml b/ansible/roles/url-monitor/tests/test.yml new file mode 100644 index 000000000..c968edbe7 --- /dev/null +++ b/ansible/roles/url-monitor/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - url-monitor \ No newline at end of file diff --git a/ansible/roles/url-monitor/vars/main.yml b/ansible/roles/url-monitor/vars/main.yml new file mode 100644 index 000000000..75c505c59 --- /dev/null +++ b/ansible/roles/url-monitor/vars/main.yml @@ -0,0 +1,5 @@ +--- +# vars file for url-monitor +mkdown: + - footer + - title diff --git a/ansible/stop-bento-ctdc.yml b/ansible/stop-bento-ctdc.yml new file mode 100644 index 000000000..13f0e2507 --- /dev/null +++ b/ansible/stop-bento-ctdc.yml @@ -0,0 +1,11 @@ +--- +- name: deploy stage of bento pipeline + hosts: ctdc-{{tier}} + gather_facts: yes + + tasks: + - name: deploy bento-ctdc + include_role: + name: bento-ctdc + tasks_from: stop_site + \ No newline at end of file diff --git a/ansible/stop-bento-icdc.yml b/ansible/stop-bento-icdc.yml new file mode 100644 index 000000000..2f1d01bff --- /dev/null +++ b/ansible/stop-bento-icdc.yml @@ -0,0 +1,11 @@ +--- +- name: deploy stage of bento pipeline + hosts: icdc-{{tier}} + gather_facts: yes + + tasks: + - name: deploy bento-icdc + include_role: + name: bento-icdc + tasks_from: stop_site + \ No newline at end of file diff --git a/ansible/sumologic.yml b/ansible/sumologic.yml deleted file mode 100644 index 822f725e7..000000000 --- a/ansible/sumologic.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -- name: setup sumologic agent - hosts: sumologic - connection: local - become: yes - gather_facts: no - - roles: - - sumologic \ No newline at end of file diff --git a/ansible/test-deployment.yml b/ansible/test-deployment.yml new file mode 100644 index 000000000..6b840ed81 --- /dev/null +++ b/ansible/test-deployment.yml @@ -0,0 +1,11 @@ +--- +- name: deploy stage of bento pipeline + hosts: icdc-{{tier}} + gather_facts: yes + + tasks: + - name: deploy bento-icdc + include_role: + name: bento-icdc + tasks_from: deploy + \ No newline at end of file diff --git a/ansible/tomcat.yml b/ansible/tomcat.yml new file mode 100644 index 000000000..feec5d545 --- /dev/null +++ b/ansible/tomcat.yml @@ -0,0 +1,14 @@ +--- +- name: setup k9dc server + hosts: tomcat + become: yes + + vars_files: + - config/icdc-env-vars.yaml + + roles: + - common + - { role: docker,tags: ['sandbox'] } + - tomcat + # - sumologic + # - newrelic \ No newline at end of file diff --git a/bento/backend/Dockerfile b/bento/backend/Dockerfile new file mode 100644 index 000000000..2d8f9f75a --- /dev/null +++ b/bento/backend/Dockerfile @@ -0,0 +1,32 @@ +FROM alpine:latest +MAINTAINER ESI Devops Team + +ENV TOMCAT_MAJOR=9 +ENV TOMCAT_VERSION=9.0.56 +ENV CATALINA_HOME=/usr/local/tomcat +ENV TOMCAT_URL=https://archive.apache.org/dist/tomcat/tomcat-"$TOMCAT_MAJOR"/v"$TOMCAT_VERSION"/bin/apache-tomcat-"$TOMCAT_VERSION".tar.gz +ENV JAVA_HOME=/usr/lib/jvm/java-11-openjdk +ENV TOMCAT_USER=tomcat +ENV TOMCAT_GROUP=tomcat +ENV PATH=$CATALINA_HOME/bin:$JAVA_HOME/bin:$PATH +ENV JAVA_OPTS $JAVA_OPTS -XX:InitialRAMPercentage=25 -XX:MaxRAMPercentage=70 +ENV TZ America/New_York + +WORKDIR $CATALINA_HOME + +RUN apk upgrade --update \ + && apk --no-cache add openjdk11-jdk tzdata --repository=http://dl-cdn.alpinelinux.org/alpine/edge/community +RUN mkdir -p "$CATALINA_HOME" \ + && addgroup -g 3001 -S ${TOMCAT_GROUP} \ + && adduser --disabled-password --ingroup "${TOMCAT_USER}" --uid 3001 ${TOMCAT_USER} \ + && echo ${TOMCAT_URL} \ + && wget -O tomcat.tar.gz $TOMCAT_URL \ + && tar -xf tomcat.tar.gz --strip-components=1 -C $CATALINA_HOME \ + && rm -rf tomcat.tar.gz \ + && chown -R ${TOMCAT_USER}:${TOMCAT_GROUP} ${CATALINA_HOME} \ + && chmod +x ${CATALINA_HOME}/bin/*.sh + +USER ${TOMCAT_USER} + +EXPOSE 8080 +CMD ["catalina.sh","run"] \ No newline at end of file diff --git a/bento/backend/bento-icdc-backend b/bento/backend/bento-icdc-backend new file mode 100644 index 000000000..cecb4fec8 --- /dev/null +++ b/bento/backend/bento-icdc-backend @@ -0,0 +1,33 @@ +FROM alpine:3.13.6 +MAINTAINER ESI Devops Team + +ENV TOMCAT_MAJOR=9 +ENV TOMCAT_VERSION=9.0.56 +ENV CATALINA_HOME=/usr/local/tomcat +ENV TOMCAT_URL=https://archive.apache.org/dist/tomcat/tomcat-"$TOMCAT_MAJOR"/v"$TOMCAT_VERSION"/bin/apache-tomcat-"$TOMCAT_VERSION".tar.gz +ENV JAVA_HOME=/usr/lib/jvm/java-11-openjdk +ENV TOMCAT_USER=tomcat +ENV TOMCAT_GROUP=tomcat +ENV PATH=$CATALINA_HOME/bin:$JAVA_HOME/bin:$PATH +ENV JAVA_OPTS $JAVA_OPTS -XX:InitialRAMPercentage=25 -XX:MaxRAMPercentage=70 +ENV TZ America/New_York +ENV DOCKERIZE_VERSION v0.6.1 + +WORKDIR $CATALINA_HOME + +RUN apk upgrade --update \ + && apk --no-cache add openjdk11-jdk tzdata --repository=http://dl-cdn.alpinelinux.org/alpine/edge/community +RUN mkdir -p "$CATALINA_HOME" \ + && addgroup -g 3001 -S ${TOMCAT_GROUP} \ + && adduser --disabled-password --ingroup "${TOMCAT_USER}" --uid 3001 ${TOMCAT_USER} \ + && echo ${TOMCAT_URL} \ + && wget -O tomcat.tar.gz $TOMCAT_URL \ + && tar -xf tomcat.tar.gz --strip-components=1 -C $CATALINA_HOME \ + && rm -rf tomcat.tar.gz \ + && chown -R ${TOMCAT_USER}:${TOMCAT_GROUP} ${CATALINA_HOME} \ + && chmod +x ${CATALINA_HOME}/bin/*.sh + +USER ${TOMCAT_USER} + +EXPOSE 8080 +CMD ["catalina.sh","run"] \ No newline at end of file diff --git a/bento/backend/newrelic-infra.yml b/bento/backend/newrelic-infra.yml new file mode 100644 index 000000000..4400e53d6 --- /dev/null +++ b/bento/backend/newrelic-infra.yml @@ -0,0 +1,6 @@ +license_key: 0cfbfc0eeb64c861e7ab29794d8facbe0a2b2d7d +log_file: /var/log/newrelic-infra/newrelic-infra.log +display_name: nciws-d1092-c.nci.nih.gov +collector_url: https://gov-infra-api.newrelic.com +identity_url: https://gov-identity-api.newrelic.com +command_channel_url: https://gov-infrastructure-command-api.newrelic.com diff --git a/bento/docker-compose.yml b/bento/docker-compose.yml new file mode 100644 index 000000000..74e2281f0 --- /dev/null +++ b/bento/docker-compose.yml @@ -0,0 +1,42 @@ +version: '3.1' +services: + bento-backend: + container_name: backend + image: cbiitssrepo/bento-backend + ports: + - 8084:8080 + restart: always + + # logging: + # driver: sumologic + # options: + # sumo-url: "https://" + # environment: + # - NEW_RELIC_LICENSE_KEY={{ newrelic_license_key }} + # - NEW_RELIC_APP_NAME={{ app_name }} + bento-frontend: + container_name: frontend + image: cbiitssrepo/bento-frontend + ports: + - 8085:80 + restart: always + + # logging: + # driver: sumologic + # options: + # sumo-url: "https://" + + neo4: + container_name: neo4 + image: neo4j:enterprise + volumes: + - neo4j-data:/data + - neo4j-logs:/logs + ports: + - 7474:7474 + - 7687:7687 + + +volumes: + neo4j-data: + neo4j-logs: diff --git a/bento/frontend/Dockerfile b/bento/frontend/Dockerfile new file mode 100644 index 000000000..340dafe2d --- /dev/null +++ b/bento/frontend/Dockerfile @@ -0,0 +1,9 @@ +FROM nginx:alpine + +RUN apk upgrade --update \ + && apk --no-cache add tzdata --repository=http://dl-cdn.alpinelinux.org/alpine/edge/community + +ENV TZ America/New_York + +#COPY dist /usr/share/nginx/html + diff --git a/bento/frontend/frontend_with_dockerize b/bento/frontend/frontend_with_dockerize new file mode 100644 index 000000000..47c847ed6 --- /dev/null +++ b/bento/frontend/frontend_with_dockerize @@ -0,0 +1,14 @@ +FROM nginx:alpine + +ENV DOCKERIZE_VERSION v0.6.1 + +RUN apk upgrade --update \ + && apk --no-cache add tzdata --repository=http://dl-cdn.alpinelinux.org/alpine/edge/community \ + && wget https://github.com/jwilder/dockerize/releases/download/$DOCKERIZE_VERSION/dockerize-alpine-linux-amd64-$DOCKERIZE_VERSION.tar.gz \ + && tar -C /usr/local/bin -xzvf dockerize-alpine-linux-amd64-$DOCKERIZE_VERSION.tar.gz \ + && rm dockerize-alpine-linux-amd64-$DOCKERIZE_VERSION.tar.gz + +ENV TZ America/New_York + +#COPY dist /usr/share/nginx/html + diff --git a/cdk/awscdk/.env_example b/cdk/awscdk/.env_example new file mode 100644 index 000000000..0d61aab63 --- /dev/null +++ b/cdk/awscdk/.env_example @@ -0,0 +1,4 @@ +default_region=us-east-1 +default_account= +aws_access_key_id= +aws_secret_access_key= \ No newline at end of file diff --git a/cdk/awscdk/.gitignore b/cdk/awscdk/.gitignore new file mode 100644 index 000000000..f5ca49258 --- /dev/null +++ b/cdk/awscdk/.gitignore @@ -0,0 +1,12 @@ +dist/ +*.pyo +*.pyc +creds/* +imports/* +*.egg-info/ +!imports/__init__.py +.venv +.env +*/test.py +cdk.out +cdk.context.json \ No newline at end of file diff --git a/cdk/awscdk/README.md b/cdk/awscdk/README.md new file mode 100644 index 000000000..caf8198d9 --- /dev/null +++ b/cdk/awscdk/README.md @@ -0,0 +1,43 @@ +# Bento cdk project: Bento AWS + +## Prerequisites + +This project was built based on the python implementation detailed at: +- https://docs.aws.amazon.com/cdk/latest/guide/getting_started.html + +The project can be built using the included docker-compose file to install prerequisites or they can be installed locally. + + +### Using docker-compose + +Once the repo has been cloned a dev container can be started from the cdk/awscdk folder using the following command: + +```bash +docker-compose run aws-cdk sh +``` + +This will start a container with all required applications installed and map the awscdk/bento folder as its workspace. + + +## Initialize the bento cdk project + +In order to build the bento cdk files you will need to get the required python modules (this command should be run in the bento folder): + +```bash +pip3 install --ignore-installed -r requirements.txt +``` + + +## Build Cloudformation scripts for the bento cdk project + +After modules are installed you can run cdk commands on your stack: + +```bash +cdk synth -a "python3 app.py -t " +cdk bootstrap -a "python3 app.py -t " +cdk deploy -a "python3 app.py -t " +cdk diff -a "python3 app.py -t " +cdk destroy -a "python3 app.py -t " +``` + +* Note: an appropriate tier must be specified in bento.properties in order to build the bento scripts - if valid tiers are created or removed for this project getArgs.py must be updated to reflect these changes \ No newline at end of file diff --git a/cdk/awscdk/bento/README.md b/cdk/awscdk/bento/README.md new file mode 100644 index 000000000..17cb0bbc4 --- /dev/null +++ b/cdk/awscdk/bento/README.md @@ -0,0 +1,58 @@ + +# Welcome to your CDK Python project! + +This is a blank project for Python development with CDK. + +The `cdk.json` file tells the CDK Toolkit how to execute your app. + +This project is set up like a standard Python project. The initialization +process also creates a virtualenv within this project, stored under the `.venv` +directory. To create the virtualenv it assumes that there is a `python3` +(or `python` for Windows) executable in your path with access to the `venv` +package. If for any reason the automatic creation of the virtualenv fails, +you can create the virtualenv manually. + +To manually create a virtualenv on MacOS and Linux: + +``` +$ python3 -m venv .venv +``` + +After the init process completes and the virtualenv is created, you can use the following +step to activate your virtualenv. + +``` +$ source .venv/bin/activate +``` + +If you are a Windows platform, you would activate the virtualenv like this: + +``` +% .venv\Scripts\activate.bat +``` + +Once the virtualenv is activated, you can install the required dependencies. + +``` +$ pip install -r requirements.txt +``` + +At this point you can now synthesize the CloudFormation template for this code. + +``` +$ cdk synth +``` + +To add additional dependencies, for example other CDK libraries, just add +them to your `setup.py` file and rerun the `pip install -r requirements.txt` +command. + +## Useful commands + + * `cdk ls` list all stacks in the app + * `cdk synth` emits the synthesized CloudFormation template + * `cdk deploy` deploy this stack to your default AWS account/region + * `cdk diff` compare deployed stack with current state + * `cdk docs` open CDK documentation + +Enjoy! diff --git a/cdk/awscdk/bento/app.py b/cdk/awscdk/bento/app.py new file mode 100644 index 000000000..77ae3b531 --- /dev/null +++ b/cdk/awscdk/bento/app.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python3 +import os, sys + +from configparser import ConfigParser +from getArgs import getArgs +from aws_cdk import core as cdk +from aws_cdk import core + +from bento.bento_stack import BentoStack + +if __name__=="__main__": + tierName = getArgs.set_tier(sys.argv[1:]) + if not tierName: + print('Please specify the tier to build: awsApp.py -t ') + sys.exit(1) + + env = core.Environment(account=os.environ["AWS_DEFAULT_ACCOUNT"], region=os.environ["AWS_DEFAULT_REGION"]) + app = core.App() + bentoApp = BentoStack(app, tierName, env=env) + + bentoTags = dict(s.split(':') for s in bentoApp.config[tierName]['tags'].split(",")) + + for tag,value in bentoTags.items(): + core.Tags.of(bentoApp).add(tag, value) + + app.synth() \ No newline at end of file diff --git a/cdk/awscdk/bento/aws/alb.py b/cdk/awscdk/bento/aws/alb.py new file mode 100644 index 000000000..de4444223 --- /dev/null +++ b/cdk/awscdk/bento/aws/alb.py @@ -0,0 +1,12 @@ +from aws_cdk import core +from aws_cdk import aws_elasticloadbalancingv2 as elbv2 + +class ALBResources: + def createResources(self, ns): + + # Create ALB + self.bentoALB = elbv2.ApplicationLoadBalancer(self, + "{}-alb".format(ns), + vpc=self.bentoVPC, + load_balancer_name="{}-alb".format(ns), + internet_facing=True) \ No newline at end of file diff --git a/cdk/awscdk/bento/aws/albListener.py b/cdk/awscdk/bento/aws/albListener.py new file mode 100644 index 000000000..8fff2e80f --- /dev/null +++ b/cdk/awscdk/bento/aws/albListener.py @@ -0,0 +1,69 @@ +import boto3 +from aws_cdk import core +from aws_cdk import aws_elasticloadbalancingv2 as elbv2 +from aws_cdk import aws_certificatemanager as cfm + +class ALBListener: + def createResources(self, ns): + + # Attach ALB to ECS Service + be_health_check = elbv2.HealthCheck(interval=core.Duration.seconds(60), + path="/ping", + timeout=core.Duration.seconds(5)) + + self.bentoALB.add_redirect( + source_protocol=elbv2.ApplicationProtocol.HTTP, + source_port=80, + target_protocol=elbv2.ApplicationProtocol.HTTPS, + target_port=443) + + # Get certificate ARN for specified domain name + client = boto3.client('acm') + response = client.list_certificates( + CertificateStatuses=[ + 'ISSUED', + ], + ) + + for cert in response["CertificateSummaryList"]: + if ('*.{}'.format(self.config[ns]['domain_name']) in cert.values()): + certARN = cert['CertificateArn'] + + bento_cert = cfm.Certificate.from_certificate_arn(self, "{}-cert".format(ns), + certificate_arn=certARN) + + listener = self.bentoALB.add_listener("PublicListener", + certificates=[ + bento_cert + ], + port=443) + + frontendtarget = listener.add_targets("ECS-frontend-Target", + port=int(self.config[ns]['frontend_container_port']), + targets=[self.frontendService], + target_group_name="{}-frontend".format(ns)) + core.Tags.of(frontendtarget).add("Name", "{}-frontend-alb-target".format(ns)) + + backendtarget = listener.add_targets("ECS-backend-Target", + port=int(self.config[ns]['backend_container_port']), + targets=[self.backendService], + health_check=be_health_check, + target_group_name="{}-backend".format(ns)) + core.Tags.of(backendtarget).add("Name", "{}-backend-alb-target".format(ns)) + + # Add a fixed error message when browsing an invalid URL + listener.add_action("ECS-Content-Not-Found", + action=elbv2.ListenerAction.fixed_response(200, + message_body="The requested resource is not available")) + + elbv2.ApplicationListenerRule(self, id="alb_frontend_rule", + path_pattern="/*", + priority=1, + listener=listener, + target_groups=[frontendtarget]) + + elbv2.ApplicationListenerRule(self, id="alb_backend_rule", + path_pattern="/v1/graphql/*", + priority=2, + listener=listener, + target_groups=[backendtarget]) \ No newline at end of file diff --git a/cdk/awscdk/bento/aws/ec2.py b/cdk/awscdk/bento/aws/ec2.py new file mode 100644 index 000000000..917f055d5 --- /dev/null +++ b/cdk/awscdk/bento/aws/ec2.py @@ -0,0 +1,43 @@ +from aws_cdk import core +from aws_cdk import aws_ec2 as ec2 + +class EC2Resources: + def createResources(self, ns): + + # Database EC2 Instance + # AMI + neo4j_4 = ec2.MachineImage.generic_linux({ + "us-east-1": self.config[ns]['database_ami_id'] + }) + + # User Data Script + initFile = open("aws/scripts/db_init.sh") + initScript = initFile.read() + initFile.close() + + # Instance + self.DBInstance = ec2.Instance(self, + "{}-Database-Instance".format(ns), + instance_type=ec2.InstanceType(self.config[ns]['database_instance_type']), + machine_image=neo4j_4, + key_name=self.config[ns]['ssh_key_name'], + vpc=self.bentoVPC, + role=self.ecsInstanceRole) + self.DBInstance.add_user_data(initScript) + core.Tags.of(self.DBInstance).add("Name", "{}-neo4j-4".format(ns)) + + # Update DB Security Group + dbsg = self.DBInstance.connections.security_groups[0] + + dbsg.add_ingress_rule( + self.ecssg, + ec2.Port.tcp(7474) + ) + dbsg.add_ingress_rule( + self.ecssg, + ec2.Port.tcp(7687) + ) + dbsg.add_ingress_rule( + self.bastionsg, + ec2.Port.tcp(22) + ) \ No newline at end of file diff --git a/cdk/awscdk/bento/aws/ecr.py b/cdk/awscdk/bento/aws/ecr.py new file mode 100644 index 000000000..ff5838ca3 --- /dev/null +++ b/cdk/awscdk/bento/aws/ecr.py @@ -0,0 +1,13 @@ +from aws_cdk import aws_ecr as ecr + +class ECRResources: + def createResources(self, ns): + + # ECR Repository + self.bentoECR = ecr.Repository(self, + "{}-ecr".format(ns), + repository_name="{}-ecr".format(ns), + image_scan_on_push=True) + + # ECR Policy + self.bentoECR.add_to_resource_policy(self.ecrPolicyStatement) \ No newline at end of file diff --git a/cdk/awscdk/bento/aws/ecsCluster.py b/cdk/awscdk/bento/aws/ecsCluster.py new file mode 100644 index 000000000..ae2562369 --- /dev/null +++ b/cdk/awscdk/bento/aws/ecsCluster.py @@ -0,0 +1,69 @@ +from aws_cdk import aws_ecs as ecs +from aws_cdk import aws_ec2 as ec2 +from aws_cdk import aws_iam as iam +from aws_cdk import core as cdk + +class ECSCluster: + def createResources(self, ns): + + # ECS Cluster + self.bentoECS = ecs.Cluster(self, + "{}-ecs".format(ns), + cluster_name="{}".format(ns), + vpc=self.bentoVPC) + + self.bentoECS_ASG = self.bentoECS.add_capacity("{}-ecs-instance".format(ns), + instance_type=ec2.InstanceType(self.config[ns]['fronted_instance_type']), + key_name=self.config[ns]['ssh_key_name'], + auto_scaling_group_name="{}-frontend".format(ns), + task_drain_time=cdk.Duration.minutes(0), + min_capacity=int(self.config[ns]['min_size']), + max_capacity=int(self.config[ns]['max_size'])) + + ecsPolicyDocument = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "ecs:*", + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": "ecr:*", + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": "ssm:*", + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": "s3:*", + "Resource": "*" + } + ] + } + + clusterPolicy = iam.Policy(self, + "{}-cluster-policy".format(ns), + policy_name="{}-ecs-policy".format(ns), + document=iam.PolicyDocument.from_json(ecsPolicyDocument)) + #cdk.Tags.of(ecsPolicy).add("Name", "{}-ecs-policy".format(ns) + + self.bentoECS_ASG.role.attach_inline_policy(clusterPolicy) + self.bentoECS_ASG.role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AmazonEC2ContainerServiceforEC2Role')) + + # User Data Script for ECS + initFile = open("aws/scripts/ecs_init.sh") + initScript = initFile.read() + initFile.close() + + initScript = initScript.replace('CLUSTER_NAME', ns) + project = ns.split("-")[0] + initScript = initScript.replace('PROJECT', project) + tier = ns.split("-")[1] + initScript = initScript.replace("ENV_NAME", tier) + + self.bentoECS_ASG.add_user_data(initScript) \ No newline at end of file diff --git a/cdk/awscdk/bento/aws/ecsService.py b/cdk/awscdk/bento/aws/ecsService.py new file mode 100644 index 000000000..1b6bf4f6b --- /dev/null +++ b/cdk/awscdk/bento/aws/ecsService.py @@ -0,0 +1,82 @@ +import boto3 +from aws_cdk import aws_ecs as ecs +from aws_cdk import aws_ec2 as ec2 + +class ECSService: + def createResources(self, ns): + + # Security Group Updates + albsg = self.bentoALB.connections.security_groups[0] + self.ecssg = self.bentoECS_ASG.connections.security_groups[0] + + botoec2 = boto3.client('ec2') + group_name = 'bento-bastion-sg' + response = botoec2.describe_security_groups( + Filters=[ + dict(Name='group-name', Values=[group_name]) + ] + ) + bastion_group_id = response['SecurityGroups'][0]['GroupId'] + self.bastionsg = ec2.SecurityGroup.from_security_group_id(self, 'bastion-security-group', + security_group_id=bastion_group_id) + + self.ecssg.add_ingress_rule( + albsg, + ec2.Port.tcp(int(self.config[ns]['backend_container_port'])) + ) + self.ecssg.add_ingress_rule( + albsg, + ec2.Port.tcp(int(self.config[ns]['frontend_container_port'])) + ) + self.ecssg.add_ingress_rule( + self.bastionsg, + ec2.Port.tcp(22) + ) + + # Backend Task Definition + backendECSTask = ecs.Ec2TaskDefinition(self, + "{}-ecs-backend".format(ns), + network_mode=ecs.NetworkMode.AWS_VPC) + + backendECSContainer = backendECSTask.add_container('backend', + image = ecs.ContainerImage.from_registry("cbiitssrepo/bento-backend:latest"), + memory_reservation_mib=1024, + cpu=512) + + backend_port_mapping = ecs.PortMapping( + container_port=int(self.config[ns]['backend_container_port']), + host_port=int(self.config[ns]['backend_container_port']), + protocol=ecs.Protocol.TCP + ) + + backendECSContainer.add_port_mappings(backend_port_mapping) + + # Backend Service + self.backendService = ecs.Ec2Service(self, "{}-backend".format(ns), + service_name="{}-backend".format(ns), + task_definition=backendECSTask, + cluster=self.bentoECS) + + # Frontend Task Definition + frontendECSTask = ecs.Ec2TaskDefinition(self, + "{}-ecs-frontend".format(ns), + network_mode=ecs.NetworkMode.AWS_VPC) + + frontendECSContainer = frontendECSTask.add_container('frontend', + image = ecs.ContainerImage.from_registry("cbiitssrepo/bento-frontend:latest"), + memory_reservation_mib=1024, + cpu=512) + + frontend_port_mapping = ecs.PortMapping( + container_port=int(self.config[ns]['frontend_container_port']), + host_port=int(self.config[ns]['frontend_container_port']), + protocol=ecs.Protocol.TCP + ) + + frontendECSContainer.add_port_mappings(frontend_port_mapping) + + # Frontend Service + self.frontendService = ecs.Ec2Service(self, "{}-frontend".format(ns), + service_name="{}-frontend".format(ns), + task_definition=frontendECSTask, + cluster=self.bentoECS) \ No newline at end of file diff --git a/cdk/awscdk/bento/aws/iam.py b/cdk/awscdk/bento/aws/iam.py new file mode 100644 index 000000000..80340503b --- /dev/null +++ b/cdk/awscdk/bento/aws/iam.py @@ -0,0 +1,144 @@ +from aws_cdk import core +from aws_cdk import aws_iam as iam + +class IAMResources: + def createResources(self, ns): + + # ECS Instance Role + self.ecsInstanceRole = iam.Role(self, + "{}-ecs-instance-role".format(ns), + role_name="{}-ecs-instance-role".format(ns), + assumed_by=iam.ServicePrincipal("ec2.amazonaws.com")) + core.Tags.of(self.ecsInstanceRole).add("Name", "{}-ecs-instance-role".format(ns)) + + # ECS Service Role + self.ecsServiceRole = iam.Role(self, + "{}-ecs-service-role".format(ns), + role_name="{}-ecs-service-role".format(ns), + assumed_by=iam.ServicePrincipal("ecs.amazonaws.com")) + core.Tags.of(self.ecsServiceRole).add("Name", "{}-ecs-service-role".format(ns)) + + # SSM + ssmPolicyDocument = { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "10", + "Effect": "Allow", + "Action": [ + "cloudwatch:PutMetricData", + "ds:CreateComputer", + "ds:DescribeDirectories", + "ec2:DescribeInstanceStatus", + "logs:*", + "ssm:*", + "ec2messages:*" + ], + "Resource": "*" + }, + { + "Sid": "", + "Effect": "Allow", + "Action": "iam:CreateServiceLinkedRole", + "Resource": "arn:aws:iam::*:role/aws-service-role/ssm.amazonaws.com/AWSServiceRoleForAmazonSSM*", + "Condition": { + "StringLike": { + "iam:AWSServiceName": "ssm.amazonaws.com" + } + } + }, + { + "Sid": "", + "Effect": "Allow", + "Action": [ + "iam:DeleteServiceLinkedRole", + "iam:GetServiceLinkedRoleDeletionStatus" + ], + "Resource": "arn:aws:iam::*:role/aws-service-role/ssm.amazonaws.com/AWSServiceRoleForAmazonSSM*" + } + ] + } + + ssmPolicy = iam.Policy(self, + "{}-ssm-policy".format(ns), + policy_name="{}-ssm-policy".format(ns), + document=iam.PolicyDocument.from_json(ssmPolicyDocument)) + core.Tags.of(ssmPolicy).add("Name", "{}-ssm-policy".format(ns)) + + self.ecsInstanceRole.attach_inline_policy(ssmPolicy) + + # EC2 + ec2PolicyDocument = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "ec2:*", + "Resource": "*" + } + ] + } + + ec2Policy = iam.Policy(self, + "{}-ec2-policy".format(ns), + policy_name="{}-ec2-policy".format(ns), + document=iam.PolicyDocument.from_json(ec2PolicyDocument)) + core.Tags.of(ec2Policy).add("Name", "{}-ec2-policy".format(ns)) + + self.ecsInstanceRole.attach_inline_policy(ec2Policy) + + # ECS + ecsPolicyDocument = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "ecs:*", + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": "ecr:*", + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": "ssm:*", + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": "s3:*", + "Resource": "*" + } + ] + } + + ecsPolicy = iam.Policy(self, + "{}-ecs-policy".format(ns), + policy_name="{}-ecs-policy".format(ns), + document=iam.PolicyDocument.from_json(ecsPolicyDocument)) + core.Tags.of(ecsPolicy).add("Name", "{}-ecs-policy".format(ns)) + + self.ecsInstanceRole.attach_inline_policy(ecsPolicy) + self.ecsInstanceRole.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AmazonEC2ContainerServiceforEC2Role')) + + # ECR + self.ecrPolicyStatement = iam.PolicyStatement( + sid="ElasticContainerRegistryPushAndPull", + effect=iam.Effect.ALLOW, + actions=["ecr:GetDownloadUrlForLayer", + "ecr:BatchGetImage", + "ecr:BatchCheckLayerAvailability", + "ecr:PutImage", + "ecr:InitiateLayerUpload", + "ecr:UploadLayerPart", + "ecr:CompleteLayerUpload"], + principals=[iam.AccountRootPrincipal()]) + + # Opensearch + self.osPolicyStatement = iam.PolicyStatement( + effect=iam.Effect.ALLOW, + actions=["es:*"], + principals=[iam.AnyPrincipal()], + resources=["arn:aws:es:{}:{}:domain/{}-es/*".format(core.Stack.of(self).region, core.Stack.of(self).account, ns)]) \ No newline at end of file diff --git a/cdk/awscdk/bento/aws/osCluster.py b/cdk/awscdk/bento/aws/osCluster.py new file mode 100644 index 000000000..84395ea9d --- /dev/null +++ b/cdk/awscdk/bento/aws/osCluster.py @@ -0,0 +1,25 @@ +from aws_cdk import aws_opensearchservice as os +from aws_cdk import aws_ec2 as ec2 + +class OSCluster: + def createResources(self, ns): + + vpcPrivateSubnets = self.bentoVPC.select_subnets(subnet_type=ec2.SubnetType.PRIVATE) + + # OS Cluster + self.osDomain = os.Domain(self, "bento-{}-elasticsearch".format(ns), + version=os.EngineVersion.ELASTICSEARCH_7_10, + domain_name="{}-es".format(ns), + vpc=self.bentoVPC, + access_policies=[self.osPolicyStatement], + capacity={ + "data_node_instance_type": "t3.medium.search", + "data_nodes": 2 + }, + ebs={ + "volume_size": 120 + }, + zone_awareness={ + "availability_zone_count": 2 + } + ) \ No newline at end of file diff --git a/cdk/awscdk/bento/aws/redisCluster.py b/cdk/awscdk/bento/aws/redisCluster.py new file mode 100644 index 000000000..9b5442d48 --- /dev/null +++ b/cdk/awscdk/bento/aws/redisCluster.py @@ -0,0 +1,30 @@ +from aws_cdk import aws_elasticache as ec +from aws_cdk import aws_ec2 as ec2 + +class RedisCluster: + def createResources(self, ns): + + vpcPrivateSubnets = self.bentoVPC.select_subnets(subnet_type=ec2.SubnetType.PRIVATE) + subnetIds = [] + for subnet in vpcPrivateSubnets.subnets: + subnetIds.append(subnet.subnet_id) + + redisSubnetGroup = ec.CfnSubnetGroup(self, "Redis-{}-ClusterPrivateSubnetGroup".format(ns), + cache_subnet_group_name="{}-private".format(ns), + subnet_ids=subnetIds, + description="{} private subnets".format(ns) + ) + + # Redis Cluster + self.ecCluster = ec.CfnReplicationGroup(self, "bento-{}-redis-cluster".format(ns), + engine="redis", + cache_node_type="cache.t3.medium", + replicas_per_node_group=1, + #num_node_groups=1, + multi_az_enabled=False, + automatic_failover_enabled=True, + auto_minor_version_upgrade=True, + replication_group_description="redis bento-{} cluster".format(ns), + cache_subnet_group_name=redisSubnetGroup.cache_subnet_group_name + ) + self.ecCluster.add_depends_on(redisSubnetGroup) \ No newline at end of file diff --git a/cdk/awscdk/bento/aws/route53.py b/cdk/awscdk/bento/aws/route53.py new file mode 100644 index 000000000..9f2be15b6 --- /dev/null +++ b/cdk/awscdk/bento/aws/route53.py @@ -0,0 +1,16 @@ +from aws_cdk import aws_route53 as route53 +from aws_cdk import aws_route53_targets as targets + +class Route53Resources: + def createResources(self, ns): + + # Get Hosted Zone + hostedZone = route53.HostedZone.from_lookup(self, + 'Bento-Hosted-Zone', + domain_name=self.config[ns]['domain_name']) + + route53.ARecord(self, + "{}-Alias-Record".format(ns), + record_name=ns, + target=route53.RecordTarget.from_alias(targets.LoadBalancerTarget(self.bentoALB)), + zone=hostedZone) \ No newline at end of file diff --git a/cdk/awscdk/bento/aws/scripts/db_init.sh b/cdk/awscdk/bento/aws/scripts/db_init.sh new file mode 100644 index 000000000..bfe3dcf69 --- /dev/null +++ b/cdk/awscdk/bento/aws/scripts/db_init.sh @@ -0,0 +1,15 @@ +# Install neo4j db +set -ex +cd /tmp +rm -rf icdc-devops || true +yum -y install epel-release +yum -y install wget git python-setuptools python-pip +pip install --upgrade "pip < 21.0" +pip install ansible==2.8.0 boto boto3 botocore pyOpenSSL +git clone https://github.com/CBIIT/icdc-devops +cd icdc-devops/ansible && git checkout master +mkdir -p /var/lib/neo4j/conf +ansible-playbook community-neo4j.yml +systemctl restart neo4j + +echo "Userdata script complete" >> /tmp/script_confirmation.txt \ No newline at end of file diff --git a/cdk/awscdk/bento/aws/vpc.py b/cdk/awscdk/bento/aws/vpc.py new file mode 100644 index 000000000..c7380e049 --- /dev/null +++ b/cdk/awscdk/bento/aws/vpc.py @@ -0,0 +1,10 @@ +from aws_cdk import aws_ec2 as ec2 + +class VPCResources: + def createResources(self, ns): + + # VPC + self.bentoVPC = ec2.Vpc(self, + "{}-vpc".format(ns), + max_azs=2, + cidr=self.config[ns]['vpc_cidr_block']) \ No newline at end of file diff --git a/cdk/awscdk/bento/aws/vpcPeering.py b/cdk/awscdk/bento/aws/vpcPeering.py new file mode 100644 index 000000000..191932161 --- /dev/null +++ b/cdk/awscdk/bento/aws/vpcPeering.py @@ -0,0 +1,52 @@ +from aws_cdk import core +from aws_cdk import aws_ec2 as ec2 + +class VPCPeering: + def createResources(self, ns): + + # VPC Peering to Management VPC + mgtVPC = ec2.Vpc.from_lookup(self, + "{}-management-vpc".format(ns), + vpc_name='bento-management-vpc') + + vpc_peering = ec2.CfnVPCPeeringConnection(self, + "{}-vpc-peering".format(ns), + peer_vpc_id=self.bentoVPC.vpc_id, + vpc_id=mgtVPC.vpc_id) + core.Tags.of(vpc_peering).add("Name", "{}-vpc-peering".format(ns)) + + # Add peering routes from management VPC + mgtPrivateSubnets = mgtVPC.select_subnets(subnet_type=ec2.SubnetType.PRIVATE) + ec2.CfnRoute(self, + "mgt-vpc-peer-private", + route_table_id=mgtPrivateSubnets.subnets[0].route_table.route_table_id, + destination_cidr_block=self.bentoVPC.vpc_cidr_block, + vpc_peering_connection_id=vpc_peering.ref ) + + mgtPublicSubnets = mgtVPC.select_subnets(subnet_type=ec2.SubnetType.PUBLIC) + ec2.CfnRoute(self, + "mgt-vpc-peer-public", + route_table_id=mgtPublicSubnets.subnets[0].route_table.route_table_id, + destination_cidr_block=self.bentoVPC.vpc_cidr_block, + vpc_peering_connection_id=vpc_peering.ref ) + + # Add peering routes from Bento VPC + vpcPrivateSubnets = self.bentoVPC.select_subnets(subnet_type=ec2.SubnetType.PRIVATE) + subnetNum = 1 + for subnet in vpcPrivateSubnets.subnets: + ec2.CfnRoute(self, + "{}-vpc-peer-private-{}".format(ns, subnetNum), + route_table_id=subnet.route_table.route_table_id, + destination_cidr_block=mgtVPC.vpc_cidr_block, + vpc_peering_connection_id=vpc_peering.ref ) + subnetNum += 1 + + vpcPublicSubnets = self.bentoVPC.select_subnets(subnet_type=ec2.SubnetType.PUBLIC) + subnetNum = 1 + for subnet in vpcPublicSubnets.subnets: + ec2.CfnRoute(self, + "{}-vpc-peer-public-{}".format(ns, subnetNum), + route_table_id=subnet.route_table.route_table_id, + destination_cidr_block=mgtVPC.vpc_cidr_block, + vpc_peering_connection_id=vpc_peering.ref ) + subnetNum += 1 \ No newline at end of file diff --git a/cdk/awscdk/bento/bento.properties b/cdk/awscdk/bento/bento.properties new file mode 100644 index 000000000..09b95740e --- /dev/null +++ b/cdk/awscdk/bento/bento.properties @@ -0,0 +1,53 @@ +[DEFAULT] +#This is a port number for the bento-frontend +frontend_container_port = 80 + +#This a port number for bento-backend +backend_container_port = 8080 + +#name of the ssh key imported in the deployment instruction +ssh_key_name = devops + +#specify the aws compute instance type for the bento +fronted_instance_type = t3.medium + +#specify the aws compute instance type for the database +database_instance_type = c5.xlarge + +#id of the ami to use for the database instance +database_ami_id = ami-0affd4508a5d2481b + +#specify domain name +domain_name = bento-tools.org + +#specify the maximum and minimun number of instances in auto-scalling group +max_size = 1 +min_size = 1 + +[bento-dev] +#define any tags appropriate to your environment +ManagedBy:terraform,Project:Bento,Environment:dev,Region:us-east-1 + +#specify vpc cidr +vpc_cidr_block = 172.17.0.0/16 + +[bento-qa] +#define any tags appropriate to your environment +ManagedBy:terraform,Project:Bento,Environment:qa,Region:us-east-1 + +#specify vpc cidr +vpc_cidr_block = 172.17.0.0/16 + +[bento-cdk] +#define any tags appropriate to your environment +tags = ManagedBy:aws-cdk,Project:Bento,Environment:cdk,Region:us-east-1 + +#specify vpc cidr +vpc_cidr_block = 10.20.0.0/16 + +[bento-gitlab] +#define any tags appropriate to your environment +tags = ManagedBy:aws-cdk,Project:Bento,Environment:gitlab,Region:us-east-1,ShutdownInstance:Yes + +#specify vpc cidr +vpc_cidr_block = 10.20.0.0/16 \ No newline at end of file diff --git a/cdk/awscdk/bento/bento/__init__.py b/cdk/awscdk/bento/bento/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/cdk/awscdk/bento/bento/bento_stack.py b/cdk/awscdk/bento/bento/bento_stack.py new file mode 100644 index 000000000..d56792634 --- /dev/null +++ b/cdk/awscdk/bento/bento/bento_stack.py @@ -0,0 +1,71 @@ +import os +from configparser import ConfigParser +from aws_cdk import core +from aws_cdk import core as cdk +from aws import iam, vpc, ecr, ecsCluster, ecsService, alb, albListener, ec2, route53, vpcPeering, osCluster, redisCluster + + +class BentoStack(cdk.Stack): + def __init__(self, scope: cdk.Construct, ns: str, **kwargs) -> None: + super().__init__(scope, ns, **kwargs) + + config = ConfigParser() + config.read('bento.properties') + self.config = config + + # VPC + bentoVPC = vpc.VPCResources.createResources(self, ns) + + # IAM + bentoIAM = iam.IAMResources.createResources(self, ns) + + # ECR + bentoECR = ecr.ECRResources.createResources(self, ns) + + # ECS Cluster + bentoECSCluster = ecsCluster.ECSCluster.createResources(self, ns) + + # ALB + bentoALB = alb.ALBResources.createResources(self, ns) + + # ECS Services + bentoECSService = ecsService.ECSService.createResources(self, ns) + + # ALB Listener + bentoALBListener = albListener.ALBListener.createResources(self, ns) + + # EC2 + bentoEC2 = ec2.EC2Resources.createResources(self, ns) + + # Route53 + bentoDNS = route53.Route53Resources.createResources(self, ns) + + # VPC Peering + bentoVPCPeering = vpcPeering.VPCPeering.createResources(self, ns) + + # Redis + #bentoRedisCluster = redisCluster.RedisCluster.createResources(self, ns) + + # Opensearch + bentoOSCluster = osCluster.OSCluster.createResources(self, ns) + + # Outputs + cdk.CfnOutput(self, "Database-IP", + value=self.DBInstance.instance_private_ip, + description="The IP address assigned to the DB Instance", + export_name="dbipaddress") + + #cdk.CfnOutput(self, "Redis Endpoint", + # value=self.ecCluster.attr_primary_end_point_address, + # description="The Redis Endpoint for this stack", + # export_name="redisendpoint") + + #cdk.CfnOutput(self, "Redis Port", + # value=self.ecCluster.attr_primary_end_point_port, + # description="The Redis Port for this stack", + # export_name="redisport") + + cdk.CfnOutput(self, "Elasticsearch Endpoint", + value=self.osDomain.domain_endpoint, + description="The Elasticsearch Endpoint for this stack", + export_name="osendpoint") \ No newline at end of file diff --git a/cdk/awscdk/bento/cdk.json b/cdk/awscdk/bento/cdk.json new file mode 100644 index 000000000..68f471257 --- /dev/null +++ b/cdk/awscdk/bento/cdk.json @@ -0,0 +1,16 @@ +{ + "app": "python3 app.py", + "context": { + "@aws-cdk/aws-apigateway:usagePlanKeyOrderInsensitiveId": true, + "@aws-cdk/core:enableStackNameDuplicates": "true", + "aws-cdk:enableDiffNoFail": "true", + "@aws-cdk/core:stackRelativeExports": "true", + "@aws-cdk/aws-ecr-assets:dockerIgnoreSupport": true, + "@aws-cdk/aws-secretsmanager:parseOwnedSecretName": true, + "@aws-cdk/aws-kms:defaultKeyPolicies": true, + "@aws-cdk/aws-s3:grantWriteWithoutAcl": true, + "@aws-cdk/aws-ecs-patterns:removeDefaultDesiredCount": true, + "@aws-cdk/aws-rds:lowercaseDbIdentifier": true, + "@aws-cdk/aws-efs:defaultEncryptionAtRest": true + } +} diff --git a/cdk/awscdk/bento/getArgs.py b/cdk/awscdk/bento/getArgs.py new file mode 100644 index 000000000..79b5e4d01 --- /dev/null +++ b/cdk/awscdk/bento/getArgs.py @@ -0,0 +1,21 @@ +import sys, getopt + +class getArgs: + def set_tier(argv): + validTiers = {"bento-dev", "bento-qa", "bento-cdk", "bento-gitlab"} + + try: + opts, args = getopt.getopt(argv,"ht:",["tier="]) + except getopt.GetoptError: + print('awsApp.py -t ') + sys.exit(2) + for opt, arg in opts: + if opt == '-h': + print('To use these scripts please identify the tier: awsApp.py -t ') + sys.exit(1) + elif opt in ("-t", "--tier"): + if arg in validTiers: + return arg + else: + print("Please choose a valid tier: " + ','.join(validTiers)) + sys.exit(1) \ No newline at end of file diff --git a/cdk/awscdk/bento/requirements.txt b/cdk/awscdk/bento/requirements.txt new file mode 100644 index 000000000..760b187e6 --- /dev/null +++ b/cdk/awscdk/bento/requirements.txt @@ -0,0 +1,10 @@ +-e . +aws-cdk.core +aws-cdk.aws_iam +aws-cdk.aws_ec2 +aws-cdk.aws_ecr +aws-cdk.aws_ecs +aws-cdk.aws_opensearchservice +aws-cdk.aws_elasticache +configparser +boto3 \ No newline at end of file diff --git a/cdk/awscdk/bento/setup.py b/cdk/awscdk/bento/setup.py new file mode 100644 index 000000000..14580741c --- /dev/null +++ b/cdk/awscdk/bento/setup.py @@ -0,0 +1,45 @@ +import setuptools + + +with open("README.md") as fp: + long_description = fp.read() + + +setuptools.setup( + name="bento", + version="0.0.1", + + description="An empty CDK Python app", + long_description=long_description, + long_description_content_type="text/markdown", + + author="author", + + package_dir={"": "bento"}, + packages=setuptools.find_packages(where="bento"), + + install_requires=[ + "aws-cdk.core==1.126.0", + ], + + python_requires=">=3.6", + + classifiers=[ + "Development Status :: 4 - Beta", + + "Intended Audience :: Developers", + + "License :: OSI Approved :: Apache Software License", + + "Programming Language :: JavaScript", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + + "Topic :: Software Development :: Code Generators", + "Topic :: Utilities", + + "Typing :: Typed", + ], +) diff --git a/cdk/awscdk/bento/source.bat b/cdk/awscdk/bento/source.bat new file mode 100644 index 000000000..9e1a83442 --- /dev/null +++ b/cdk/awscdk/bento/source.bat @@ -0,0 +1,13 @@ +@echo off + +rem The sole purpose of this script is to make the command +rem +rem source .venv/bin/activate +rem +rem (which activates a Python virtualenv on Linux or Mac OS X) work on Windows. +rem On Windows, this command just runs this batch file (the argument is ignored). +rem +rem Now we don't need to document a Windows command for activating a virtualenv. + +echo Executing .venv\Scripts\activate.bat for you +.venv\Scripts\activate.bat diff --git a/cdk/awscdk/docker-compose.yml b/cdk/awscdk/docker-compose.yml new file mode 100644 index 000000000..8c984c993 --- /dev/null +++ b/cdk/awscdk/docker-compose.yml @@ -0,0 +1,14 @@ +version: '3.4' +services: + aws-cdk: + container_name: aws-cdk + build: + context: . + dockerfile: ./dockerfiles/cdk_python + environment: + - AWS_ACCESS_KEY_ID=${aws_access_key_id} + - AWS_SECRET_ACCESS_KEY=${aws_secret_access_key} + - AWS_DEFAULT_REGION=${default_region} + - AWS_DEFAULT_ACCOUNT=${default_account} + volumes: + - ./bento:/bento \ No newline at end of file diff --git a/cdk/awscdk/dockerfiles/cdk_python b/cdk/awscdk/dockerfiles/cdk_python new file mode 100644 index 000000000..263a1d4fa --- /dev/null +++ b/cdk/awscdk/dockerfiles/cdk_python @@ -0,0 +1,20 @@ +FROM node:16.3.0-alpine3.13 +MAINTAINER ESI Devops Team + +# Update to the latest npm +RUN npm install npm@latest -g + +# add python and pip +RUN apk add --no-cache python3 py3-pip + +RUN python3 -m ensurepip --upgrade +RUN python3 -m pip install --upgrade pip +RUN python3 -m pip install --upgrade virtualenv + +RUN python3 -V +RUN pip3 --version + +# install aws-cdk and set its log level to debug +RUN npm install -g aws-cdk@1.131.0 + +WORKDIR /bento \ No newline at end of file diff --git a/cdk/cdktf/README.md b/cdk/cdktf/README.md new file mode 100644 index 000000000..8a00fa6f5 --- /dev/null +++ b/cdk/cdktf/README.md @@ -0,0 +1,50 @@ +# Bento cdktf project: TEST + +## Prerequisites + +This project was built based on the python-pip implementation detailed at: +- https://github.com/hashicorp/terraform-cdk/blob/main/docs/getting-started/python.md + +The project can be built using the included docker-compose file to install prerequisites or they can be installed locally. + +### Using docker-compose + +Once the repo has been cloned a dev container can be started from the cdktf folder using the following command: + +```bash +docker-compose run cdktf sh +``` + +This will start a container with all required applications installed and map the cdktf/bento folder as its workspace. + +## Initialize the bento cdktf project + +In order to build the bento cdktf files you will need to get the required python modules: + +```bash +pip3 install --ignore-installed -r requirements.txt +``` + +And the cdktf modules and providers: + +```bash +cdktf get +``` + +## Build Terraform scripts for the bento cdktf project + +After modules are installed you can build terraform scripts from cdktf: + +```bash +cdktf synth -a "python3 bento-aws.py -t " +``` + +## Deploy Terraform scripts for the bento cdktf project + +Resources can also be deployed directly using cdktf: + +```bash +cdktf deploy -a "python3 bento-aws.py -t " +``` + +* Note: an appropriate tier must be specified to build the bento scripts - if valid tiers are created or removed for this project getArgs.py must be updated to reflect these changes \ No newline at end of file diff --git a/cdk/cdktf/bento/.gitignore b/cdk/cdktf/bento/.gitignore new file mode 100644 index 000000000..4dc21a746 --- /dev/null +++ b/cdk/cdktf/bento/.gitignore @@ -0,0 +1,10 @@ +dist/ +*.pyo +*.pyc +creds/* +imports/* +!imports/__init__.py +.terraform +cdktf.out +test.py +terraform.tfstate* \ No newline at end of file diff --git a/cdk/cdktf/bento/aws/ec2.py b/cdk/cdktf/bento/aws/ec2.py new file mode 100644 index 000000000..e618c7c70 --- /dev/null +++ b/cdk/cdktf/bento/aws/ec2.py @@ -0,0 +1,9 @@ +import json +import imports.aws as aws + +class EC2Resources: + def createResources(self, ns, config, bentoTags, bentoIAM): + + # EC2 + # EC2 Instance + testInstance = aws.Instance(self, "bento-test", ami="ami-2757f631", instance_type=config[ns]['fronted_instance_type'], iam_instance_profile=self.ecsInstanceProfile.name, tags=bentoTags) \ No newline at end of file diff --git a/cdk/cdktf/bento/aws/iam.py b/cdk/cdktf/bento/aws/iam.py new file mode 100644 index 000000000..cfb002188 --- /dev/null +++ b/cdk/cdktf/bento/aws/iam.py @@ -0,0 +1,161 @@ +import json +import imports.aws as aws + +class IAMResources: + def createResources(self, ns, bentoTags): + + # IAM + # ECS Instance + ecsInstancePolicyDoc = { + "Version": "2012-10-17", + "Statement": [ + { + "Action": [ "sts:AssumeRole" ], + "Effect": "Allow", + "Principal": { + "Service": [ + "ec2.amazonaws.com" + ] + } + } + ] + } + + self.ecsInstanceRole = aws.IamRole(self, "ecs-instance-role", name="{}-ecs-instance-role".format(ns), assume_role_policy=json.dumps(ecsInstancePolicyDoc), tags=bentoTags) + self.ecsInstanceRolePolicy = aws.IamRolePolicyAttachment(self, "ecs-instance-role-policy", role=self.ecsInstanceRole.name, policy_arn="arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role") + self.ecsInstanceProfile = aws.IamInstanceProfile(self, "ecs-instance-profile", name="{}-ecs-instance-profile".format(ns), path="/", role=self.ecsInstanceRole.id) + + # ECS Service + ecsServicePolicyDoc = { + "Version": "2012-10-17", + "Statement": [ + { + "Action": [ "sts:AssumeRole" ], + "Effect": "Allow", + "Principal": { + "Service": [ + "ecs.amazonaws.com" + ] + } + } + ] + } + + self.ecsServiceRole = aws.IamRole(self, "ecs-service-role", name="{}-ecs-service-role".format(ns), assume_role_policy=json.dumps(ecsServicePolicyDoc), tags=bentoTags) + self.ecsServiceRolePolicy = aws.IamRolePolicyAttachment(self, "ecs-service-role-policy", role=self.ecsServiceRole.name, policy_arn="arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceRole") + + # ECR Policy + ecrPolicyDocument = { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "ElasticContainerRegistryPushAndPull", + "Effect": "Allow", + "Principal": { + "AWS": [ + "local.my_account" + ], + }, + "Action": [ + "ecr:GetDownloadUrlForLayer", + "ecr:BatchGetImage", + "ecr:BatchCheckLayerAvailability", + "ecr:PutImage", + "ecr:InitiateLayerUpload", + "ecr:UploadLayerPart", + "ecr:CompleteLayerUpload" + ] + } + ] + } + + self.ecrPolicy = aws.IamPolicy(self, "ecr-policy", name="{}-ecr-policy".format(ns), path="/", policy=json.dumps(ecrPolicyDocument)) + + # SSM + ssmPolicyDocument = { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "10", + "Effect": "Allow", + "Action": [ + "cloudwatch:PutMetricData", + "ds:CreateComputer", + "ds:DescribeDirectories", + "ec2:DescribeInstanceStatus", + "logs:*", + "ssm:*", + "ec2messages:*" + ], + "Resource": "*" + }, + { + "Sid": "", + "Effect": "Allow", + "Action": "iam:CreateServiceLinkedRole", + "Resource": "arn:aws:iam::*:role/aws-service-role/ssm.amazonaws.com/AWSServiceRoleForAmazonSSM*", + "Condition": { + "StringLike": { + "iam:AWSServiceName": "ssm.amazonaws.com" + } + } + }, + { + "Sid": "", + "Effect": "Allow", + "Action": [ + "iam:DeleteServiceLinkedRole", + "iam:GetServiceLinkedRoleDeletionStatus" + ], + "Resource": "arn:aws:iam::*:role/aws-service-role/ssm.amazonaws.com/AWSServiceRoleForAmazonSSM*" + } + ] + } + + self.ssmPolicy = aws.IamPolicy(self, "ssm-policy", name="{}-ssm-policy".format(ns), path="/", policy=json.dumps(ssmPolicyDocument)) + self.ecsInstanceRoleSSMPolicy = aws.IamRolePolicyAttachment(self, "ecs-instance-role-ssm-policy", role=self.ecsInstanceRole.name, policy_arn=self.ssmPolicy.arn) + + # EC2 + ec2PolicyDocument = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "ec2:*", + "Resource": "*" + } + ] + } + + self.ec2Policy = aws.IamPolicy(self, "ec2-policy", name="{}-ec2-policy".format(ns), path="/", policy=json.dumps(ec2PolicyDocument)) + self.ecsInstanceRoleEC2Policy = aws.IamRolePolicyAttachment(self, "ecs-instance-role-ec2-policy", role=self.ecsInstanceRole.name, policy_arn=self.ec2Policy.arn) + + # ECS + ecsPolicyDocument = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "ecs:*", + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": "ecr:*", + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": "ssm:*", + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": "s3:*", + "Resource": "*" + } + ] + } + + self.ecsPolicy = aws.IamPolicy(self, "ecs-policy", name="{}-ecs-policy".format(ns), path="/", policy=json.dumps(ecsPolicyDocument)) + self.ecsInstanceRoleECSPolicy = aws.IamRolePolicyAttachment(self, "ecs-instance-role-ecs-policy", role=self.ecsInstanceRole.name, policy_arn=self.ecsPolicy.arn) \ No newline at end of file diff --git a/cdk/cdktf/bento/aws/vpc.py b/cdk/cdktf/bento/aws/vpc.py new file mode 100644 index 000000000..19190aa8d --- /dev/null +++ b/cdk/cdktf/bento/aws/vpc.py @@ -0,0 +1,15 @@ +import imports.aws as aws + +class VPCResources: + def createResources(self, ns, config, bentoTags): + + # VPC + bentoVPC = aws.Vpc(self, "bento-vpc", cidr_block=config[ns]['vpc_cidr_block'], tags=bentoTags) + + # Private Subnets + for subnet in config[ns]['private_subnets'].split(","): + aws.Subnet(self, '_' + subnet.replace('/', '_'), cidr_block=subnet, vpc_id=bentoVPC.id, tags=bentoTags) + + # Public Subnets + for subnet in config[ns]['public_subnets'].split(","): + aws.Subnet(self, '_' + subnet.replace('/', '_'), cidr_block=subnet, vpc_id=bentoVPC.id, tags=bentoTags) \ No newline at end of file diff --git a/cdk/cdktf/bento/bento-aws.py b/cdk/cdktf/bento/bento-aws.py new file mode 100644 index 000000000..5f365fb8f --- /dev/null +++ b/cdk/cdktf/bento/bento-aws.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python +import sys, json + +from imports.aws import AwsProvider +from constructs import Construct +from cdktf import App, TerraformStack +from configparser import ConfigParser +from getArgs import getArgs +from aws import iam, ec2, vpc, ecr + +class BentoStack(TerraformStack): + def __init__(self, scope: Construct, ns: str): + super().__init__(scope, ns) + + config = ConfigParser() + config.read('bento.properties') + + bentoProvider = AwsProvider(self, 'Aws', region=config[ns]['region'], profile=config[ns]['profile'], shared_credentials_file="/bento/creds/credentials") + bentoTags = json.loads(config[ns]['tags']) + + # VPC + bentoVPC = vpc.VPCResources.createResources(self, ns, config, bentoTags) + + # IAM + bentoIAM = iam.IAMResources.createResources(self, ns, bentoTags) + + # ECR + bentoECR = ecr.ECRResources.createResources(self, ns, bentoTags) + + # EC2 + #bentoEC2 = ec2.EC2Resources.createResources(self, ns, config, bentoTags, bentoIAM) + + +if __name__=="__main__": + tierName = getArgs.set_tier(sys.argv[1:]) + if not tierName: + print('Please specify the tier to build: awsApp.py -t ') + sys.exit(1) + + app = App() + BentoStack(app, tierName) + + app.synth() \ No newline at end of file diff --git a/cdk/cdktf/bento/bento.properties b/cdk/cdktf/bento/bento.properties new file mode 100644 index 000000000..5348ec481 --- /dev/null +++ b/cdk/cdktf/bento/bento.properties @@ -0,0 +1,155 @@ +[DEFAULT] +#enter the region in which your aws resources will be provisioned +region = us-east-1 + +#specify your aws credential profile. Note this is not IAM role but rather profile configured during AWS CLI installation +profile = icdc + +#specify the name you will like to call this project. +#stack_name = bento + +#provide the name of the ecs cluster +ecs_cluster_name = bento + +#specify the number of container replicas, minimum is 1 +container_replicas = 2 + +#This is a port number for the bento-frontend +frontend_container_port = 80 + +#This a port number for bento-backend +backend_container_port = 8080 + +#provide name for the auto-scalling-groups +frontend_asg_name = frontend + +#cutomize the volume size for all the instances created except database +instance_volume_size = 40 + +#name of the ssh key imported in the deployment instruction +ssh_key_name = devops + +#specify the aws compute instance type for the bento +fronted_instance_type = t3.medium + +#provide the name of the admin user for ssh login +ssh_user = bento + +#specify the aws compute instance type for the database +database_instance_type = c5.xlarge + +#name of the database +database_name = neo4j + +#specify the volume size for the database +db_instance_volume_size = 60 + +#alb priority rule number. This can be left as default +alb_rule_priority = 100 +frontend_rule_priority = 110 +backend_rule_priority = 90 + +#specify domain name +domain_name = bento-tools.org + +#name of the application +app_name = bento + +#the port on which the frontend app listens +app_port = 80 + +#the remote state bucket name +remote_state_bucket_name = bento-terraform-remote-state + +# the redis node group +redis_node_group = 1 + +[bento-dev] +#define any tags appropriate to your environment +tags = {"ManagedBy":"terraform","Project":"Bento","Environment":"dev","Region":"us-east-1"} + +#specify vpc cidr +vpc_cidr_block = 172.17.0.0/16 + +#define private subnet to use +private_subnets = [172.17.10.0/24,172.17.11.0/24] + +#define public subnets to use. Note you must specify at least two subnets +public_subnets = [172.17.0.0/24,172.17.1.0/24] + +#specify availability zones to provision your resources. Note the availability zone must match the number of public subnets. Also availability zones depends on the region. +#If you change the region use the corresponding availability zones +availability_zones = [us-east-1b,us-east-1c] + +#define environment name +env = dev + +#specify private ip of the db instance +db_private_ip = 172.17.11.25 + +#desired number fo ec2 instances for ecs +desired_ec2_instance_capacity = 1 + +#specify the maximum and minimun number of instances in auto-scalling group +max_size = 1 +min_size = 1 + +[bento-qa] +#define any tags appropriate to your environment +tags = {"ManagedBy":"terraform","Project":"Bento","Environment":"qa","Region":"us-east-1"} + +#specify vpc cidr +vpc_cidr_block = 172.17.0.0/16 + +#define private subnet to use +private_subnets = 172.17.10.0/24,172.17.11.0/24 + +#define public subnets to use. Note you must specify at least two subnets +public_subnets = 172.17.0.0/24,172.17.1.0/24 + +#specify availability zones to provision your resources. Note the availability zone must match the number of public subnets. Also availability zones depends on the region. +#If you change the region use the corresponding availability zones +availability_zones = [us-east-1b,us-east-1c] + +#define environment name +env = qa + +#specify private ip of the db instance +db_private_ip = 172.18.11.25 + +#desired number fo ec2 instances for ecs +desired_ec2_instance_capacity = 1 + +#specify the maximum and minimun number of instances in auto-scalling group +max_size = 1 +min_size = 1 + +[bento-cdktf] +#define any tags appropriate to your environment +tags = {"ManagedBy":"terraform","Project":"Bento","Environment":"cdktf","Region":"us-east-1"} + +#specify vpc cidr +vpc_cidr_block = 172.17.0.0/16 + +#define private subnet to use +private_subnets = 172.17.10.0/24,172.17.11.0/24 + +#define public subnets to use. Note you must specify at least two subnets +public_subnets = 172.17.0.0/24,172.17.1.0/24 + +#specify availability zones to provision your resources. Note the availability zone must match the number of public subnets. Also availability zones depends on the region. +#If you change the region use the corresponding availability zones +#availability_zones = [us-east-1b,us-east-1c] + +#define environment name +env = cdktf + +#specify private ip of the db instance +db_private_ip = 172.18.11.25 + +#desired number fo ec2 instances for ecs +desired_ec2_instance_capacity = 1 + +#specify the maximum and minimun number of instances in auto-scalling group +max_size = 1 +min_size = 1 \ No newline at end of file diff --git a/cdk/cdktf/bento/cdktf.json b/cdk/cdktf/bento/cdktf.json new file mode 100644 index 000000000..64db50a24 --- /dev/null +++ b/cdk/cdktf/bento/cdktf.json @@ -0,0 +1,11 @@ +{ + "language": "python", + "app": "python3 ./main.py", + "terraformProviders": ["aws@~> 2.0"], + "terraformModules": [], + "codeMakerOutput": "imports", + "context": { + "excludeStackIdFromLogicalIds": "true", +"allowSepCharsInLogicalIds": "true" + } +} diff --git a/cdk/cdktf/bento/getArgs.py b/cdk/cdktf/bento/getArgs.py new file mode 100644 index 000000000..b900e80cc --- /dev/null +++ b/cdk/cdktf/bento/getArgs.py @@ -0,0 +1,21 @@ +import sys, getopt + +class getArgs: + def set_tier(argv): + validTiers = {"bento-dev", "bento-qa", "bento-cdktf"} + + try: + opts, args = getopt.getopt(argv,"ht:",["tier="]) + except getopt.GetoptError: + print('awsApp.py -t ') + sys.exit(2) + for opt, arg in opts: + if opt == '-h': + print('To use these scripts please identify the tier: awsApp.py -t ') + sys.exit(1) + elif opt in ("-t", "--tier"): + if arg in validTiers: + return arg + else: + print("Please choose a valid tier: " + ','.join(validTiers)) + sys.exit(1) \ No newline at end of file diff --git a/cdk/cdktf/bento/help b/cdk/cdktf/bento/help new file mode 100644 index 000000000..22bb1e701 --- /dev/null +++ b/cdk/cdktf/bento/help @@ -0,0 +1,24 @@ +======================================================================================================== + + Your cdktf Python project is ready! + + cat help Prints this message + + Compile: + python3 ./main.py Compile and run the python code. + + Synthesize: + cdktf synth Synthesize Terraform resources to cdktf.out/ + + Diff: + cdktf diff Perform a diff (terraform plan) for the given stack + + Deploy: + cdktf deploy Deploy the given stack + + Destroy: + cdktf destroy Destroy the given stack + + Learn more about using modules and providers https://cdk.tf/modules-and-providers + +======================================================================================================== \ No newline at end of file diff --git a/cdk/cdktf/bento/requirements.txt b/cdk/cdktf/bento/requirements.txt new file mode 100644 index 000000000..a0fd80bcc --- /dev/null +++ b/cdk/cdktf/bento/requirements.txt @@ -0,0 +1,2 @@ +cdktf~=0.2.1 +configparser \ No newline at end of file diff --git a/cdk/cdktf/docker-compose.yml b/cdk/cdktf/docker-compose.yml new file mode 100644 index 000000000..546b5a0ee --- /dev/null +++ b/cdk/cdktf/docker-compose.yml @@ -0,0 +1,9 @@ +version: '3.4' +services: + cdktf: + container_name: terraform + build: + context: . + dockerfile: ./dockerfiles/cdktf_python + volumes: + - ./bento:/bento \ No newline at end of file diff --git a/cdk/cdktf/dockerfiles/cdktf_python b/cdk/cdktf/dockerfiles/cdktf_python new file mode 100644 index 000000000..afd6b218f --- /dev/null +++ b/cdk/cdktf/dockerfiles/cdktf_python @@ -0,0 +1,29 @@ +FROM node:current-alpine3.13 +MAINTAINER ESI Devops Team + +# Update to the latest npm +RUN npm install npm@latest -g + +# add python and pip +RUN apk add --no-cache python3 py3-pip unzip +RUN pip install pipenv +RUN python3 -V +RUN pip --version + +# install terraform +# Download terraform for linux +RUN wget https://releases.hashicorp.com/terraform/0.14.9/terraform_0.14.9_linux_amd64.zip + +# Unzip +RUN unzip terraform_0.14.9_linux_amd64.zip + +# Move to local bin +RUN mv terraform /usr/local/bin/ +# Check that it's installed +RUN terraform --version + +# install cdktf-cli and set its log level to debug +RUN npm install -g cdktf-cli +ENV CDKTF_LOG_LEVEL=debug + +WORKDIR /bento \ No newline at end of file diff --git a/cdk/pulumi/.env b/cdk/pulumi/.env new file mode 100644 index 000000000..1c84c2631 --- /dev/null +++ b/cdk/pulumi/.env @@ -0,0 +1,3 @@ +default_region=us-east-1 +aws_access_key_id= +aws_secret_access_key= \ No newline at end of file diff --git a/cdk/pulumi/README.md b/cdk/pulumi/README.md new file mode 100644 index 000000000..8935625af --- /dev/null +++ b/cdk/pulumi/README.md @@ -0,0 +1,68 @@ +# Bento cdktf project: TEST + +## Prerequisites + +This project was built based on the python implementation detailed at: +- https://docs.aws.amazon.com/cdk/latest/guide/getting_started.html + +The project can be built using the included docker-compose file to install prerequisites or they can be installed locally. + +### Using docker-compose + +Once the repo has been cloned a dev container can be started from the cdktf folder using the following command: + +```bash +docker-compose run aws-cdk sh +``` + +This will start a container with all required applications installed and map the awscdk/bento folder as its workspace. + +## Initialize the bento cdk project + +In order to build the bento cdktf files you will need to get the required python modules: + +```bash +pip3 install --ignore-installed -r requirements.txt +``` + +And the cdktf modules and providers: + +```bash +cdk get +``` + +## Build Cloudformation scripts for the bento cdk project + +After modules are installed you can build terraform scripts from cdk: + +```bash +cdk synth -a "python3 bento-aws.py -t " +``` + +* Note: an appropriate tier must be specified to build the bento scripts - if valid tiers are created or removed for this project getArgs.py must be updated to reflect these changes + + + + + + + + + +python3 -m venv .venv +``` + +After the init process completes and the virtualenv is created, you can use the following +step to activate your virtualenv. + +``` +$ source .venv/bin/activate +``` + + + +Once the virtualenv is activated, you can install the required dependencies. + +``` +$ pip install -r requirements.txt +``` \ No newline at end of file diff --git a/cdk/pulumi/docker-compose.yml b/cdk/pulumi/docker-compose.yml new file mode 100644 index 000000000..4b072f76c --- /dev/null +++ b/cdk/pulumi/docker-compose.yml @@ -0,0 +1,13 @@ +version: '3.4' +services: + pulumi: + container_name: aws-pulumi + build: + context: . + dockerfile: ./dockerfiles/pulumi_python_alpine + environment: + - AWS_ACCESS_KEY_ID=${aws_access_key_id} + - AWS_SECRET_ACCESS_KEY=${aws_secret_access_key} + - AWS_DEFAULT_REGION=${default_region} + volumes: + - ./bento:/bento \ No newline at end of file diff --git a/cdk/pulumi/dockerfiles/pulumi_python b/cdk/pulumi/dockerfiles/pulumi_python new file mode 100644 index 000000000..d50ad2b6d --- /dev/null +++ b/cdk/pulumi/dockerfiles/pulumi_python @@ -0,0 +1,26 @@ +FROM node:lts-buster-slim +MAINTAINER ESI Devops Team + +# Update to the latest npm +RUN npm install npm@latest -g + +# add python and pip +RUN apt-get update && apt-get install -y python3 python3-pip wget +RUN pip3 install pipenv +RUN python3 -V +RUN pip3 --version + +# install pulumi +# Download pulumi for linux +RUN wget https://get.pulumi.com/releases/sdk/pulumi-v2.25.0-linux-x64.tar.gz + +# Unzip +RUN tar -xvzf pulumi-v2.25.0-linux-x64.tar.gz +# Move to local bin +RUN chown -R root:root pulumi +RUN mv pulumi/* /usr/local/bin/ && rm -rf pulumi + +# Check that it's installed +RUN pulumi version + +WORKDIR /bento \ No newline at end of file diff --git a/cdk/pulumi/dockerfiles/pulumi_python_alpine b/cdk/pulumi/dockerfiles/pulumi_python_alpine new file mode 100644 index 000000000..934ea0115 --- /dev/null +++ b/cdk/pulumi/dockerfiles/pulumi_python_alpine @@ -0,0 +1,29 @@ +FROM node:lts-alpine3.13 + +# Passing --build-arg PULUMI_VERSION=vX.Y.Z will use that version +# of the SDK. Otherwise, we use whatever get.pulumi.com thinks is +# the latest +ARG PULUMI_VERSION=latest + +# Update to the latest npm +RUN npm install npm@latest -g + +# Install pulumi +ENV PATH=$PATH:/root/.pulumi/bin + +RUN echo 'nameserver 8.8.8.8' > /etc/resolv.conf + +RUN apk update && \ + apk add --no-cache curl libc6-compat python3 py3-pip && \ + if [ "$PULUMI_VERSION" = "latest" ]; then \ + curl -fsSL https://get.pulumi.com/ | sh; \ + else \ + curl -fsSL https://get.pulumi.com/ | sh -s -- --version $(echo $PULUMI_VERSION | cut -c 2-); \ + fi + +# add python and pip +RUN pip install pipenv +RUN python3 -V +RUN pip --version + +WORKDIR /bento \ No newline at end of file diff --git a/comets/ansible.cfg b/comets/ansible.cfg new file mode 100644 index 000000000..dc7559a21 --- /dev/null +++ b/comets/ansible.cfg @@ -0,0 +1,4 @@ +[defaults] +validate_certs = no +ansible_server_cert_validation = no +inventory = ./hosts \ No newline at end of file diff --git a/comets/deploy-comets-ecs.yml b/comets/deploy-comets-ecs.yml new file mode 100644 index 000000000..8ba773eab --- /dev/null +++ b/comets/deploy-comets-ecs.yml @@ -0,0 +1,17 @@ +--- +- name: setup ecs agent + hosts: all + connection: local + become: yes + gather_facts: yes + + tasks: + - name: build comets + include_role: + name: deploy-comets + tasks_from: build + - name: deploy comets + include_role: + name: deploy-comets + tasks_from: deploy + diff --git a/comets/deploy-comets.yml b/comets/deploy-comets.yml new file mode 100644 index 000000000..b4a7403b5 --- /dev/null +++ b/comets/deploy-comets.yml @@ -0,0 +1,16 @@ +--- +- name: setup ecs agent + hosts: comets + become: yes + gather_facts: yes + + tasks: + - name: build comets + include_role: + name: deploy-comets + tasks_from: build + # - name: deploy comets + # include_role: + # name: deploy-comets + # tasks_from: deploy + diff --git a/comets/docker.yml b/comets/docker.yml new file mode 100644 index 000000000..20587af12 --- /dev/null +++ b/comets/docker.yml @@ -0,0 +1,12 @@ +--- +- name: setup ecs agent + hosts: all + connection: local + become: yes + gather_facts: yes + + roles: + - common + - docker + + \ No newline at end of file diff --git a/comets/ecs-agent.yml b/comets/ecs-agent.yml new file mode 100644 index 000000000..e1166b7a4 --- /dev/null +++ b/comets/ecs-agent.yml @@ -0,0 +1,12 @@ +--- +- name: setup ecs agent + hosts: all + connection: local + become: yes + gather_facts: yes + + roles: + - common + - docker + - ecs-agent + \ No newline at end of file diff --git a/comets/group_vars/all.yml b/comets/group_vars/all.yml new file mode 100644 index 000000000..e69de29bb diff --git a/ansible/group_vars/k9dc.yml b/comets/group_vars/k9dc.yml similarity index 100% rename from ansible/group_vars/k9dc.yml rename to comets/group_vars/k9dc.yml diff --git a/ansible/group_vars/neo4j.yml b/comets/group_vars/neo4j.yml similarity index 100% rename from ansible/group_vars/neo4j.yml rename to comets/group_vars/neo4j.yml diff --git a/comets/hosts b/comets/hosts new file mode 100644 index 000000000..e12fc0cdc --- /dev/null +++ b/comets/hosts @@ -0,0 +1,4 @@ +[all] +127.0.0.1 + +[comets] diff --git a/comets/roles/auth0/.travis.yml b/comets/roles/auth0/.travis.yml new file mode 100644 index 000000000..36bbf6208 --- /dev/null +++ b/comets/roles/auth0/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/comets/roles/auth0/README.md b/comets/roles/auth0/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/comets/roles/auth0/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/comets/roles/auth0/defaults/main.yml b/comets/roles/auth0/defaults/main.yml new file mode 100644 index 000000000..5ce6d2b75 --- /dev/null +++ b/comets/roles/auth0/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for auth0 \ No newline at end of file diff --git a/comets/roles/auth0/handlers/main.yml b/comets/roles/auth0/handlers/main.yml new file mode 100644 index 000000000..49e2690ae --- /dev/null +++ b/comets/roles/auth0/handlers/main.yml @@ -0,0 +1,12 @@ +--- +# handlers file for auth0 + +- name: restart_firewalld + service: + name: firewalld + state: restarted + +- name: restart_apache + service: + name: firewalld + state: restarted \ No newline at end of file diff --git a/comets/roles/auth0/meta/main.yml b/comets/roles/auth0/meta/main.yml new file mode 100644 index 000000000..227ad9c34 --- /dev/null +++ b/comets/roles/auth0/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.9 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/comets/roles/auth0/tasks/main.yml b/comets/roles/auth0/tasks/main.yml new file mode 100644 index 000000000..52175c179 --- /dev/null +++ b/comets/roles/auth0/tasks/main.yml @@ -0,0 +1,116 @@ +--- +# tasks file for auth0 +- name: install packages needed for auth0 + yum: + state: installed + name: + - httpd + - mod_ssl + - mod_auth_openid + - firewalld + +- name: enable and start httpd + service: + name: httpd + state: started + enabled: yes + +- name: enable and start firewalld + service: + name: firewalld + state: started + enabled: yes + +- name: create directory to store self-signed certificate and logs + file: + state: directory + path: "{{item}}" + loop: + - /etc/httpd/ssl + - /etc/httpd/logs/comets + +- name: set fact about the qa environment + set_fact: + tier: -test + when: env == "qa" + +- name: set fact about the dev environment + set_fact: + tier: -dev + when: env == "dev" + +- name: set fact about the stage environment + set_fact: + tier: -stage + when: env == "stage" + +- name: set fact about the prod environment + set_fact: + tier: "" + when: env == "prod" + +- name: generate self-signed certificate + shell: > + openssl req -new -x509 -sha256 -days 36500 + -nodes -out /etc/httpd/ssl/httpd.pem + -keyout /etc/httpd/ssl/httpd.key + -subj "/C=US/ST=Maryland/L=Rockville/O=NCI/OU=ESI/CN=comets-analytics{{tier}}.org" + +- name: install openODIC module jose + yum: + name: "{{item}}" + state: present + loop: + - https://github.com/zmartzone/mod_auth_openidc/releases/download/v2.4.0/cjose-0.6.1.5-1.el7.x86_64.rpm + - https://github.com/zmartzone/mod_auth_openidc/releases/download/v2.4.3/mod_auth_openidc-2.4.3-1.el7.x86_64.rpm + +- name: add comets.conf + template: + src: comets.conf.j2 + dest: /etc/httpd/conf.d/comets.conf + notify: restart_apache + +- name: open https port + firewalld: + zone: dmz + service: https + permanent: yes + state: enabled + notify: restart_firewalld + +- name: open app port + firewalld: + zone: dmz + port: "{{item}}/tcp" + permanent: yes + state: enabled + loop: + - 8000 + - 61613 + notify: restart_firewalld + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/comets/roles/auth0/templates/comets.conf.j2 b/comets/roles/auth0/templates/comets.conf.j2 new file mode 100644 index 000000000..f3fdbc2bb --- /dev/null +++ b/comets/roles/auth0/templates/comets.conf.j2 @@ -0,0 +1,107 @@ + + + +ServerName comets-analytics{{tier}}.org +ServerAlias www.comets-analytics{{tier}}.org + + + SSLEngine On + SSLCertificateFile /etc/httpd/ssl/httpd.pem + SSLCertificateKeyFile /etc/httpd/ssl/httpd.key + + + CustomLog /etc/httpd/logs/comets/access.log combined env=!dontlog + ErrorLog /etc/httpd/logs/comets/error.log + + + RewriteEngine On + RewriteCond %{HTTP_HOST} !^www.comets-analytics{{tier}}.org$ + RewriteRule (.*) https://www.comets-analytics{{tier}}.org$1 [R=301,L] + + + SetHandler server-info + ProxyPass ! + + +RewriteEngine On +ProxyRequests Off + +ProxyPass / http://127.0.0.1:8000/ timeout=1800 +ProxyPassReverse / http://127.0.0.1:8000/ + +OIDCProviderIssuer {{open_id_issuer}} +OIDCProviderAuthorizationEndpoint {{open_id_issuer}}/authorize +OIDCProviderTokenEndpoint {{open_id_issuer}}/oauth/token +OIDCProviderTokenEndpointAuth client_secret_post +OIDCProviderUserInfoEndpoint {{open_id_issuer}}/userinfo +OIDCClientID {{open_client_id}} +OIDCClientSecret {{open_client_secret}} +OIDCDefaultURL https://comets-analytics{{tier}}.org/public/timeout.html +OIDCProviderJwksUri {{open_id_issuer}}/.well-known/jwks.json +OIDCSessionMaxDuration 0 +OIDCScope "openid email family_name given_name app_metadata user_metadata user_id comets" +OIDCRedirectURI https://comets-analytics{{tier}}.org/auth0_redirect +OIDCCryptoPassphrase "{{open_passphrase}}" +OIDCCookiePath / + +OIDCProviderMetadataURL {{open_id_issuer}}/.well-known/openid-configuration +OIDCPassIDTokenAs payload +OIDCUserInfoRefreshInterval 5 +OIDCSessionInactivityTimeout 1800 + + + AuthType openid-connect + + Require claim comets:active + Require claim comets:admin + + Header echo ^OIDC_id_token_payload$ + Header set Cache-Control "no-cache, no-store, must-revalidate" + Header set Pragma "no-cache" + Header set Expires 0 + ErrorDocument 401 /errordocs/unauthorized.html + LogLevel debug + + + + + Require claim comets:admin + + ErrorDocument 401 /errordocs/index.html + + + + + Require claim "comets~^\w+$" + + Require claim comets:active + Require claim comets:admin + + + ErrorDocument 401 /errordocs/registration.html + + + + + Require valid-user + + Require claim "comets~^\w+$" + + + ErrorDocument 401 /errordocs/index.html + + + + Require all denied + + + + AuthType none + Require all granted + + + + ErrorDocument 401 ! + + + diff --git a/comets/roles/auth0/tests/inventory b/comets/roles/auth0/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/comets/roles/auth0/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/comets/roles/auth0/tests/test.yml b/comets/roles/auth0/tests/test.yml new file mode 100644 index 000000000..4b53ea133 --- /dev/null +++ b/comets/roles/auth0/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - auth0 \ No newline at end of file diff --git a/comets/roles/auth0/vars/main.yml b/comets/roles/auth0/vars/main.yml new file mode 100644 index 000000000..fd2ce32f7 --- /dev/null +++ b/comets/roles/auth0/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for auth0 \ No newline at end of file diff --git a/comets/roles/common/README.md b/comets/roles/common/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/comets/roles/common/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/comets/roles/common/defaults/main.yml b/comets/roles/common/defaults/main.yml new file mode 100644 index 000000000..fa3055099 --- /dev/null +++ b/comets/roles/common/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for common \ No newline at end of file diff --git a/comets/roles/common/handlers/main.yml b/comets/roles/common/handlers/main.yml new file mode 100644 index 000000000..c6a8f0c7b --- /dev/null +++ b/comets/roles/common/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for common \ No newline at end of file diff --git a/comets/roles/common/meta/main.yml b/comets/roles/common/meta/main.yml new file mode 100644 index 000000000..5d50bf41b --- /dev/null +++ b/comets/roles/common/meta/main.yml @@ -0,0 +1,60 @@ +galaxy_info: + author: your name + description: your description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Some suggested licenses: + # - BSD (default) + # - MIT + # - GPLv2 + # - GPLv3 + # - Apache + # - CC-BY + license: license (GPLv2, CC-BY, etc) + + min_ansible_version: 2.4 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # Optionally specify the branch Galaxy will use when accessing the GitHub + # repo for this role. During role install, if no tags are available, + # Galaxy will use this branch. During import Galaxy will access files on + # this branch. If Travis integration is configured, only notifications for this + # branch will be accepted. Otherwise, in all cases, the repo's default branch + # (usually master) will be used. + #github_branch: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. \ No newline at end of file diff --git a/comets/roles/common/tasks/main.yml b/comets/roles/common/tasks/main.yml new file mode 100644 index 000000000..a97f88f47 --- /dev/null +++ b/comets/roles/common/tasks/main.yml @@ -0,0 +1,10 @@ +--- +# tasks file for common +- name: Set timezone to America/New_York + timezone: + name: America/New_York + +# - name: set hostname +# hostname: +# name: "{{ hostvars[inventory_hostname].group_names[0] }}-{{ env }}" + \ No newline at end of file diff --git a/comets/roles/common/tests/inventory b/comets/roles/common/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/comets/roles/common/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/comets/roles/common/tests/test.yml b/comets/roles/common/tests/test.yml new file mode 100644 index 000000000..8d24282da --- /dev/null +++ b/comets/roles/common/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - common \ No newline at end of file diff --git a/comets/roles/common/vars/main.yml b/comets/roles/common/vars/main.yml new file mode 100644 index 000000000..feaa92f9b --- /dev/null +++ b/comets/roles/common/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for common \ No newline at end of file diff --git a/comets/roles/deploy-comets-ecs/README.md b/comets/roles/deploy-comets-ecs/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/comets/roles/deploy-comets-ecs/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/comets/roles/deploy-comets-ecs/defaults/main.yml b/comets/roles/deploy-comets-ecs/defaults/main.yml new file mode 100644 index 000000000..cc3ff9ffa --- /dev/null +++ b/comets/roles/deploy-comets-ecs/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for deploy-comets \ No newline at end of file diff --git a/comets/roles/deploy-comets-ecs/handlers/main.yml b/comets/roles/deploy-comets-ecs/handlers/main.yml new file mode 100644 index 000000000..b30592c1a --- /dev/null +++ b/comets/roles/deploy-comets-ecs/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for deploy-comets diff --git a/comets/roles/deploy-comets-ecs/meta/main.yml b/comets/roles/deploy-comets-ecs/meta/main.yml new file mode 100644 index 000000000..3a212a936 --- /dev/null +++ b/comets/roles/deploy-comets-ecs/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.4 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/comets/roles/deploy-comets-ecs/tasks/build.yml b/comets/roles/deploy-comets-ecs/tasks/build.yml new file mode 100644 index 000000000..b44328d4a --- /dev/null +++ b/comets/roles/deploy-comets-ecs/tasks/build.yml @@ -0,0 +1,103 @@ +--- + +- name: gather comets-app facts + ec2_instance_facts: + region: us-east-1 + filters: + "tag:Environment": "{{env}}" + "tag:Name": comets-frontend + "instance-state-name": running + register: frontend + +- name: set instance ip + set_fact: + queue_host: "{{ frontend.instances[0].network_interfaces[0].private_ip_address }}" + +- name: Check if comets and exits + stat: + path: /tmp/comets + register: comets_result + +- name: remove old comets repo + file: + path: /tmp/comets + state: absent + when: comets_result.stat.exists + +- name: Check if R-cometsAnalytics exits + stat: + path: /tmp/R-cometsAnalytics + register: r_result + +- name: remove old r repo + file: + path: /tmp/R-cometsAnalytics + state: absent + when: r_result.stat.exists + +- name: checkout comets repo + git: + repo: "{{comets_repo}}" + dest: /tmp/comets + version: master + force: yes + +- name: checkout R repo + git: + repo: "{{R_repo}}" + dest: /tmp/R-cometsAnalytics + version: comets_1.6.0_20191205 + force: yes + +- name: copy R code + synchronize: + dest: /tmp/comets/comets/restricted/rcode/ + src: /tmp/R-cometsAnalytics/ + delete: yes + recursive: true + rsync_opts: + - "--exclude=.git" + +- name: update settings + template: + dest: /tmp/comets/comets/restricted/settings.yml + src: settings.yml.j2 + +- name: add httpd.conf Dockerfile + template: src={{item.src}} dest={{item.dest}} + with_items: + - { src: 'Dockerfile.j2', dest: '/tmp/comets/Dockerfile' } + - { src: 'httpd.conf.j2', dest: '/tmp/comets/comets-httpd.conf' } + +- name: login into ecr + shell: "$(/bin/aws ecr get-login-password --region us-east-1 | docker login --username AWS --password-stdin {{ecr}})" + ignore_errors: True + register: ecr_login + + +# - name: build apache image +# docker_image: +# path: "/tmp/comets" +# pull: yes +# name: "{{ecr}}/comets" +# tag: "apache-{{version}}" +# push: yes + +- name: build {{ecr}}/comets:app image + docker_image: + path: "/tmp/comets" + dockerfile: "/tmp/comets/docker/comets.app.dockerfile" + pull: yes + name: "{{ecr}}/comets" + tag: "app-{{version}}" + push: yes + + +- name: build {{ecr}}/comets:processor image + docker_image: + path: "/tmp/comets" + dockerfile: "/tmp/comets/docker/comets.app.processor.dockerfile" + pull: yes + name: "{{ecr}}/comets" + tag: "processor-{{version}}" + push: yes diff --git a/comets/roles/deploy-comets-ecs/tasks/deploy.yml b/comets/roles/deploy-comets-ecs/tasks/deploy.yml new file mode 100644 index 000000000..a1902ac5c --- /dev/null +++ b/comets/roles/deploy-comets-ecs/tasks/deploy.yml @@ -0,0 +1,196 @@ +--- +- name: create task comets definition + ecs_taskdefinition: + network_mode: bridge + family: comets-{{env}}-comets + memory: '1024' + cpu: '1024' + state: present + region: "{{region}}" + containers: + - name: comets + essential: true + image: "{{ecr}}/comets:app-{{version}}" + portMappings: + - containerPort: "8000" + hostPort: "8000" + protocol: tcp + links: + - activemq + dependsOn: + - containerName: activemq + condition: START + logConfiguration: + logDriver: awslogs + options: + awslogs-group: "comets-app-logs" + awslogs-region: "us-east-1" + awslogs-stream-prefix: "comets-{{env}}" + - name: activemq + image: cbiitss/activemq:latest + essential: true + portMappings: + - protocol: tcp + containerPort: 61613 + hostPort: 61613 + - protocol: tcp + containerPort: 8161 + hostPort: 8161 + + register: task_output + +- name: create processor task definition + ecs_taskdefinition: + network_mode: bridge + family: comets-{{env}}-processor + state: present + memory: '2048' + cpu: '1024' + region: "{{region}}" + containers: + - name: processor + essential: true + image: "{{ecr}}/comets:processor-{{version}}" + logConfiguration: + logDriver: awslogs + options: + awslogs-group: "comets-processor-logs" + awslogs-region: "us-east-1" + awslogs-stream-prefix: "comets-{{env}}" + register: task_output + +# - name: create apach task definition +# ecs_taskdefinition: +# network_mode: bridge +# family: comets-{{env}}-apache +# state: present +# memory: '512' +# cpu: '512' +# region: "{{region}}" +# containers: +# - name: apache +# essential: true +# image: "{{ecr}}/comets:apache-{{version}}" +# portMappings: +# - protocol: tcp +# containerPort: 80 +# hostPort: 80 +# logConfiguration: +# logDriver: awslogs +# options: +# awslogs-group: "comets-apache-logs" +# awslogs-region: "us-east-1" +# awslogs-stream-prefix: "comets-{{env}}" +# register: task_output + + + +# - name: query task definition apache +# ecs_taskdefinition_facts: +# task_definition: comets-{{env}}-apache +# region: "{{region}}" +# register: task_apache + +- name: query task definition comets + ecs_taskdefinition_facts: + task_definition: comets-{{env}}-comets + region: "{{region}}" + register: task_comets + +- name: query task definition processor + ecs_taskdefinition_facts: + task_definition: comets-{{env}}-processor + region: "{{region}}" + register: task_processor + +# - name: query ecs service apache +# ecs_service_facts: +# cluster: comets-{{env}} +# service: comets-{{env}}-apache +# details: true +# region: "{{region}}" +# register: service_apache + +- name: query ecs service comets + ecs_service_facts: + cluster: comets-{{env}} + service: comets-{{env}}-comets + details: true + region: "{{region}}" + register: service_comets + +- name: query ecs service processor + ecs_service_facts: + cluster: comets-{{env}} + service: comets-{{env}}-processor + details: true + region: "{{region}}" + register: service_processor + + +- name: set facts + set_fact: + comets_revision: "{{task_comets.revision}}" + # apache_revision: "{{task_apache.revision}}" + processor_revision: "{{task_processor.revision}}" + task_processor_name: "{{task_processor.family}}" + # task_apache_name: "{{task_apache.family}}" + task_comets_name: "{{task_comets.family}}" + lb_frontend: "{{service_comets.services[0].loadBalancers}}" + role_arn: "{{service_comets.services[0].roleArn}}" + + +# - debug: +# msg: "{{service_comets}}" +# - debug: +# msg: "{{service_processor}}" + +# - name: update apache service +# ecs_service: +# state: present +# name: comets-{{env}}-apache +# cluster: comets-{{env}} +# task_definition: "{{task_apache_name}}:{{apache_revision}}" +# role: "{{role_arn}}" +# # force_new_deployment: yes +# deployment_configuration: +# minimum_healthy_percent: 0 +# maximum_percent: 100 +# desired_count: 1 +# load_balancers: "{{ lb_frontend }}" +# region: "{{region}}" +# register: service_apache_output + + +- name: update comets service + ecs_service: + state: present + name: comets-{{env}}-comets + cluster: comets-{{env}} + task_definition: "{{task_comets_name}}:{{comets_revision}}" + role: "{{role_arn}}" + # force_new_deployment: yes + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + load_balancers: "{{ lb_frontend }}" + region: "{{region}}" + register: service_comets_output + + +- name: update processor service + ecs_service: + state: present + name: comets-{{env}}-processor + cluster: comets-{{env}} + task_definition: "{{task_processor_name}}:{{processor_revision}}" + # role: "{{role_arn}}" + # force_new_deployment: yes + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + region: "{{region}}" + register: service_processor_output + diff --git a/comets/roles/deploy-comets-ecs/tasks/main.yml b/comets/roles/deploy-comets-ecs/tasks/main.yml new file mode 100644 index 000000000..ff048c165 --- /dev/null +++ b/comets/roles/deploy-comets-ecs/tasks/main.yml @@ -0,0 +1,89 @@ +--- +# tasks file for deploy-comets + +- name: gather comets-app facts + ec2_instance_facts: + region: us-east-1 + filters: + "tag:Environment": "{{env}}" + "tag:Name": comets-frontend + "instance-state-name": running + register: frontend + +- name: set instance ip + set_fact: + queue_host: "{{ frontend.instances[0].network_interfaces[0].private_ip_address }}" + +- name: Check if comets and exits + stat: + path: /tmp/comets + register: comets_result + +- name: remove old comets repo + file: + path: /tmp/comets + state: absent + when: comets_result.stat.exists + +- name: Check if R-cometsAnalytics exits + stat: + path: /tmp/R-cometsAnalytics + register: r_result + +- name: remove old r repo + file: + path: /tmp/R-cometsAnalytics + state: absent + when: r_result.stat.exists + +#create app and logs directory +- name: create app and logs directory + file: + path: "{{app_base_directory}}/{{item}}" + state: directory + loop: + - app + - logs + +- name: checkout comets repo + git: + repo: "{{comets_repo}}" + dest: /tmp/comets + version: master + force: yes + +- name: checkout R repo + git: + repo: "{{R_repo}}" + dest: /tmp/R-cometsAnalytics + version: comets_1.6.0_20191205 + force: yes + +- name: copy comets app + synchronize: + dest: /local/content/docker/comets/app/ + src: /tmp/comets/comets/ + delete: yes + recursive: true + rsync_opts: + - "--exclude=.git" + - "--exclude=settings.yml" + +- name: copy R code + synchronize: + dest: /local/content/docker/comets/app/restricted/rcode/ + src: /tmp/R-cometsAnalytics/ + delete: yes + recursive: true + rsync_opts: + - "--exclude=.git" + +- name: update settings + template: + dest: /local/content/docker/comets/app/restricted/settings.yml + src: settings.yml.j2 + +- name: restart docker + service: + name: docker + state: restarted \ No newline at end of file diff --git a/comets/roles/deploy-comets-ecs/templates/Dockerfile.j2 b/comets/roles/deploy-comets-ecs/templates/Dockerfile.j2 new file mode 100644 index 000000000..ca37f173b --- /dev/null +++ b/comets/roles/deploy-comets-ecs/templates/Dockerfile.j2 @@ -0,0 +1,9 @@ +FROM httpd:2.4.43 + +RUN apt-get update \ + && apt-get install -y apt-utils libapache2-mod-auth-openidc apache2-bin \ + && ln -s /usr/lib/apache2/modules/mod_auth_openidc.so /usr/local/apache2/modules/mod_auth_openidc.so + +COPY ./comets-httpd.conf /usr/local/apache2/conf/httpd.conf + + \ No newline at end of file diff --git a/comets/roles/deploy-comets-ecs/templates/httpd.conf.j2 b/comets/roles/deploy-comets-ecs/templates/httpd.conf.j2 new file mode 100644 index 000000000..26644a739 --- /dev/null +++ b/comets/roles/deploy-comets-ecs/templates/httpd.conf.j2 @@ -0,0 +1,648 @@ +# +# This is the main Apache HTTP server configuration file. It contains the +# configuration directives that give the server its instructions. +# See for detailed information. +# In particular, see +# +# for a discussion of each configuration directive. +# +# Do NOT simply read the instructions in here without understanding +# what they do. They're here only as hints or reminders. If you are unsure +# consult the online docs. You have been warned. +# +# Configuration and logfile names: If the filenames you specify for many +# of the server's control files begin with "/" (or "drive:/" for Win32), the +# server will use that explicit path. If the filenames do *not* begin +# with "/", the value of ServerRoot is prepended -- so "logs/access_log" +# with ServerRoot set to "/usr/local/apache2" will be interpreted by the +# server as "/usr/local/apache2/logs/access_log", whereas "/logs/access_log" +# will be interpreted as '/logs/access_log'. + +# +# ServerRoot: The top of the directory tree under which the server's +# configuration, error, and log files are kept. +# +# Do not add a slash at the end of the directory path. If you point +# ServerRoot at a non-local disk, be sure to specify a local disk on the +# Mutex directive, if file-based mutexes are used. If you wish to share the +# same ServerRoot for multiple httpd daemons, you will need to change at +# least PidFile. +# +ServerRoot "/usr/local/apache2" + +# +# Mutex: Allows you to set the mutex mechanism and mutex file directory +# for individual mutexes, or change the global defaults +# +# Uncomment and change the directory if mutexes are file-based and the default +# mutex file directory is not on a local disk or is not appropriate for some +# other reason. +# +# Mutex default:logs + +# +# Listen: Allows you to bind Apache to specific IP addresses and/or +# ports, instead of the default. See also the +# directive. +# +# Change this to Listen on specific IP addresses as shown below to +# prevent Apache from glomming onto all bound IP addresses. +# +#Listen 12.34.56.78:80 +Listen 80 + +# +# Dynamic Shared Object (DSO) Support +# +# To be able to use the functionality of a module which was built as a DSO you +# have to place corresponding `LoadModule' lines at this location so the +# directives contained in it are actually available _before_ they are used. +# Statically compiled modules (those listed by `httpd -l') do not need +# to be loaded here. +# +# Example: +# LoadModule foo_module modules/mod_foo.so +# +LoadModule mpm_event_module modules/mod_mpm_event.so +LoadModule mpm_prefork_module modules/mod_mpm_prefork.so +LoadModule mpm_worker_module modules/mod_mpm_worker.so +LoadModule authn_file_module modules/mod_authn_file.so +#LoadModule authn_dbm_module modules/mod_authn_dbm.so +#LoadModule authn_anon_module modules/mod_authn_anon.so +#LoadModule authn_dbd_module modules/mod_authn_dbd.so +#LoadModule authn_socache_module modules/mod_authn_socache.so +LoadModule authn_core_module modules/mod_authn_core.so +LoadModule authz_host_module modules/mod_authz_host.so +LoadModule authz_groupfile_module modules/mod_authz_groupfile.so +LoadModule authz_user_module modules/mod_authz_user.so +#LoadModule authz_dbm_module modules/mod_authz_dbm.so +#LoadModule authz_owner_module modules/mod_authz_owner.so +#LoadModule authz_dbd_module modules/mod_authz_dbd.so +LoadModule authz_core_module modules/mod_authz_core.so +#LoadModule authnz_ldap_module modules/mod_authnz_ldap.so +#LoadModule authnz_fcgi_module modules/mod_authnz_fcgi.so +LoadModule access_compat_module modules/mod_access_compat.so +LoadModule auth_basic_module modules/mod_auth_basic.so +#LoadModule auth_form_module modules/mod_auth_form.so +#LoadModule auth_digest_module modules/mod_auth_digest.so +#LoadModule allowmethods_module modules/mod_allowmethods.so +#LoadModule isapi_module modules/mod_isapi.so +#LoadModule file_cache_module modules/mod_file_cache.so +#LoadModule cache_module modules/mod_cache.so +#LoadModule cache_disk_module modules/mod_cache_disk.so +#LoadModule cache_socache_module modules/mod_cache_socache.so +#LoadModule socache_shmcb_module modules/mod_socache_shmcb.so +#LoadModule socache_dbm_module modules/mod_socache_dbm.so +#LoadModule socache_memcache_module modules/mod_socache_memcache.so +#LoadModule socache_redis_module modules/mod_socache_redis.so +#LoadModule watchdog_module modules/mod_watchdog.so +#LoadModule macro_module modules/mod_macro.so +#LoadModule dbd_module modules/mod_dbd.so +#LoadModule bucketeer_module modules/mod_bucketeer.so +#LoadModule dumpio_module modules/mod_dumpio.so +#LoadModule echo_module modules/mod_echo.so +#LoadModule example_hooks_module modules/mod_example_hooks.so +#LoadModule case_filter_module modules/mod_case_filter.so +#LoadModule case_filter_in_module modules/mod_case_filter_in.so +#LoadModule example_ipc_module modules/mod_example_ipc.so +#LoadModule buffer_module modules/mod_buffer.so +#LoadModule data_module modules/mod_data.so +#LoadModule ratelimit_module modules/mod_ratelimit.so +LoadModule reqtimeout_module modules/mod_reqtimeout.so +#LoadModule ext_filter_module modules/mod_ext_filter.so +#LoadModule request_module modules/mod_request.so +#LoadModule include_module modules/mod_include.so +LoadModule filter_module modules/mod_filter.so +#LoadModule reflector_module modules/mod_reflector.so +#LoadModule substitute_module modules/mod_substitute.so +#LoadModule sed_module modules/mod_sed.so +#LoadModule charset_lite_module modules/mod_charset_lite.so +#LoadModule deflate_module modules/mod_deflate.so +#LoadModule xml2enc_module modules/mod_xml2enc.so +#LoadModule proxy_html_module modules/mod_proxy_html.so +#LoadModule brotli_module modules/mod_brotli.so +LoadModule mime_module modules/mod_mime.so +#LoadModule ldap_module modules/mod_ldap.so +LoadModule log_config_module modules/mod_log_config.so +#LoadModule log_debug_module modules/mod_log_debug.so +#LoadModule log_forensic_module modules/mod_log_forensic.so +#LoadModule logio_module modules/mod_logio.so +#LoadModule lua_module modules/mod_lua.so +LoadModule env_module modules/mod_env.so +#LoadModule mime_magic_module modules/mod_mime_magic.so +#LoadModule cern_meta_module modules/mod_cern_meta.so +#LoadModule expires_module modules/mod_expires.so +LoadModule headers_module modules/mod_headers.so +#LoadModule ident_module modules/mod_ident.so +#LoadModule usertrack_module modules/mod_usertrack.so +#LoadModule unique_id_module modules/mod_unique_id.so +LoadModule setenvif_module modules/mod_setenvif.so +LoadModule version_module modules/mod_version.so +LoadModule remoteip_module modules/mod_remoteip.so +LoadModule proxy_module modules/mod_proxy.so +LoadModule proxy_connect_module modules/mod_proxy_connect.so +LoadModule proxy_ftp_module modules/mod_proxy_ftp.so +LoadModule proxy_http_module modules/mod_proxy_http.so +LoadModule proxy_fcgi_module modules/mod_proxy_fcgi.so +#LoadModule proxy_scgi_module modules/mod_proxy_scgi.so +LoadModule proxy_uwsgi_module modules/mod_proxy_uwsgi.so +#LoadModule proxy_fdpass_module modules/mod_proxy_fdpass.so +#LoadModule proxy_wstunnel_module modules/mod_proxy_wstunnel.so +LoadModule proxy_ajp_module modules/mod_proxy_ajp.so +LoadModule proxy_balancer_module modules/mod_proxy_balancer.so +LoadModule proxy_express_module modules/mod_proxy_express.so +LoadModule proxy_hcheck_module modules/mod_proxy_hcheck.so +#LoadModule session_module modules/mod_session.so +#LoadModule session_cookie_module modules/mod_session_cookie.so +#LoadModule session_crypto_module modules/mod_session_crypto.so +#LoadModule session_dbd_module modules/mod_session_dbd.so +#LoadModule slotmem_shm_module modules/mod_slotmem_shm.so +#LoadModule slotmem_plain_module modules/mod_slotmem_plain.so +#LoadModule ssl_module modules/mod_ssl.so +#LoadModule optional_hook_export_module modules/mod_optional_hook_export.so +#LoadModule optional_hook_import_module modules/mod_optional_hook_import.so +#LoadModule optional_fn_import_module modules/mod_optional_fn_import.so +#LoadModule optional_fn_export_module modules/mod_optional_fn_export.so +#LoadModule dialup_module modules/mod_dialup.so +LoadModule http2_module modules/mod_http2.so +#LoadModule proxy_http2_module modules/mod_proxy_http2.so +#LoadModule md_module modules/mod_md.so +#LoadModule lbmethod_byrequests_module modules/mod_lbmethod_byrequests.so +#LoadModule lbmethod_bytraffic_module modules/mod_lbmethod_bytraffic.so +#LoadModule lbmethod_bybusyness_module modules/mod_lbmethod_bybusyness.so +#LoadModule lbmethod_heartbeat_module modules/mod_lbmethod_heartbeat.so +LoadModule unixd_module modules/mod_unixd.so +#LoadModule heartbeat_module modules/mod_heartbeat.so +#LoadModule heartmonitor_module modules/mod_heartmonitor.so +#LoadModule dav_module modules/mod_dav.so +LoadModule status_module modules/mod_status.so +LoadModule autoindex_module modules/mod_autoindex.so +#LoadModule asis_module modules/mod_asis.so +#LoadModule info_module modules/mod_info.so +#LoadModule suexec_module modules/mod_suexec.so + + #LoadModule cgid_module modules/mod_cgid.so + + + #LoadModule cgi_module modules/mod_cgi.so + +#LoadModule dav_fs_module modules/mod_dav_fs.so +#LoadModule dav_lock_module modules/mod_dav_lock.so +#LoadModule vhost_alias_module modules/mod_vhost_alias.so +#LoadModule negotiation_module modules/mod_negotiation.so +LoadModule dir_module modules/mod_dir.so +#LoadModule imagemap_module modules/mod_imagemap.so +#LoadModule actions_module modules/mod_actions.so +#LoadModule speling_module modules/mod_speling.so +LoadModule userdir_module modules/mod_userdir.so +LoadModule alias_module modules/mod_alias.so +#LoadModule rewrite_module modules/mod_rewrite.so + + +# +# If you wish httpd to run as a different user or group, you must run +# httpd as root initially and it will switch. +# +# User/Group: The name (or #number) of the user/group to run httpd as. +# It is usually good practice to create a dedicated user and group for +# running httpd, as with most system services. +# +User daemon +Group daemon + + + +# 'Main' server configuration +# +# The directives in this section set up the values used by the 'main' +# server, which responds to any requests that aren't handled by a +# definition. These values also provide defaults for +# any containers you may define later in the file. +# +# All of these directives may appear inside containers, +# in which case these default settings will be overridden for the +# virtual host being defined. +# + +# +# ServerAdmin: Your address, where problems with the server should be +# e-mailed. This address appears on some server-generated pages, such +# as error documents. e.g. admin@your-domain.com +# +ServerAdmin you@example.com + +# +# ServerName gives the name and port that the server uses to identify itself. +# This can often be determined automatically, but we recommend you specify +# it explicitly to prevent problems during startup. +# +# If your host doesn't have a registered DNS name, enter its IP address here. +# +#ServerName www.example.com:80 + +# +# Deny access to the entirety of your server's filesystem. You must +# explicitly permit access to web content directories in other +# blocks below. +# + + AllowOverride none + Require all denied + + +# +# Note that from this point forward you must specifically allow +# particular features to be enabled - so if something's not working as +# you might expect, make sure that you have specifically enabled it +# below. +# + +# +# DocumentRoot: The directory out of which you will serve your +# documents. By default, all requests are taken from this directory, but +# symbolic links and aliases may be used to point to other locations. +# +DocumentRoot "/usr/local/apache2/htdocs" + + # + # Possible values for the Options directive are "None", "All", + # or any combination of: + # Indexes Includes FollowSymLinks SymLinksifOwnerMatch ExecCGI MultiViews + # + # Note that "MultiViews" must be named *explicitly* --- "Options All" + # doesn't give it to you. + # + # The Options directive is both complicated and important. Please see + # http://httpd.apache.org/docs/2.4/mod/core.html#options + # for more information. + # + Options Indexes FollowSymLinks + + # + # AllowOverride controls what directives may be placed in .htaccess files. + # It can be "All", "None", or any combination of the keywords: + # AllowOverride FileInfo AuthConfig Limit + # + AllowOverride None + + # + # Controls who can get stuff from this server. + # + Require all granted + + +# +# DirectoryIndex: sets the file that Apache will serve if a directory +# is requested. +# + + DirectoryIndex index.html + + +# +# The following lines prevent .htaccess and .htpasswd files from being +# viewed by Web clients. +# + + Require all denied + + +# +# ErrorLog: The location of the error log file. +# If you do not specify an ErrorLog directive within a +# container, error messages relating to that virtual host will be +# logged here. If you *do* define an error logfile for a +# container, that host's errors will be logged there and not here. +# +ErrorLog /proc/self/fd/2 + +# +# LogLevel: Control the number of messages logged to the error_log. +# Possible values include: debug, info, notice, warn, error, crit, +# alert, emerg. +# +LogLevel warn + + + # + # The following directives define some format nicknames for use with + # a CustomLog directive (see below). + # + LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined + LogFormat "%h %l %u %t \"%r\" %>s %b" common + + + # You need to enable mod_logio.c to use %I and %O + LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %I %O" combinedio + + + # + # The location and format of the access logfile (Common Logfile Format). + # If you do not define any access logfiles within a + # container, they will be logged here. Contrariwise, if you *do* + # define per- access logfiles, transactions will be + # logged therein and *not* in this file. + # + CustomLog /proc/self/fd/1 common + + # + # If you prefer a logfile with access, agent, and referer information + # (Combined Logfile Format) you can use the following directive. + # + #CustomLog "logs/access_log" combined + + + + # + # Redirect: Allows you to tell clients about documents that used to + # exist in your server's namespace, but do not anymore. The client + # will make a new request for the document at its new location. + # Example: + # Redirect permanent /foo http://www.example.com/bar + + # + # Alias: Maps web paths into filesystem paths and is used to + # access content that does not live under the DocumentRoot. + # Example: + # Alias /webpath /full/filesystem/path + # + # If you include a trailing / on /webpath then the server will + # require it to be present in the URL. You will also likely + # need to provide a section to allow access to + # the filesystem path. + + # + # ScriptAlias: This controls which directories contain server scripts. + # ScriptAliases are essentially the same as Aliases, except that + # documents in the target directory are treated as applications and + # run by the server when requested rather than as documents sent to the + # client. The same rules about trailing "/" apply to ScriptAlias + # directives as to Alias. + # + ScriptAlias /cgi-bin/ "/usr/local/apache2/cgi-bin/" + + + + + # + # ScriptSock: On threaded servers, designate the path to the UNIX + # socket used to communicate with the CGI daemon of mod_cgid. + # + #Scriptsock cgisock + + +# +# "/usr/local/apache2/cgi-bin" should be changed to whatever your ScriptAliased +# CGI directory exists, if you have that configured. +# + + AllowOverride None + Options None + Require all granted + + + + # + # Avoid passing HTTP_PROXY environment to CGI's on this or any proxied + # backend servers which have lingering "httpoxy" defects. + # 'Proxy' request header is undefined by the IETF, not listed by IANA + # + RequestHeader unset Proxy early + + + + # + # TypesConfig points to the file containing the list of mappings from + # filename extension to MIME-type. + # + TypesConfig conf/mime.types + + # + # AddType allows you to add to or override the MIME configuration + # file specified in TypesConfig for specific file types. + # + #AddType application/x-gzip .tgz + # + # AddEncoding allows you to have certain browsers uncompress + # information on the fly. Note: Not all browsers support this. + # + #AddEncoding x-compress .Z + #AddEncoding x-gzip .gz .tgz + # + # If the AddEncoding directives above are commented-out, then you + # probably should define those extensions to indicate media types: + # + AddType application/x-compress .Z + AddType application/x-gzip .gz .tgz + + # + # AddHandler allows you to map certain file extensions to "handlers": + # actions unrelated to filetype. These can be either built into the server + # or added with the Action directive (see below) + # + # To use CGI scripts outside of ScriptAliased directories: + # (You will also need to add "ExecCGI" to the "Options" directive.) + # + #AddHandler cgi-script .cgi + + # For type maps (negotiated resources): + #AddHandler type-map var + + # + # Filters allow you to process content before it is sent to the client. + # + # To parse .shtml files for server-side includes (SSI): + # (You will also need to add "Includes" to the "Options" directive.) + # + #AddType text/html .shtml + #AddOutputFilter INCLUDES .shtml + + +# +# The mod_mime_magic module allows the server to use various hints from the +# contents of the file itself to determine its type. The MIMEMagicFile +# directive tells the module where the hint definitions are located. +# +#MIMEMagicFile conf/magic + +# +# Customizable error responses come in three flavors: +# 1) plain text 2) local redirects 3) external redirects +# +# Some examples: +#ErrorDocument 500 "The server made a boo boo." +#ErrorDocument 404 /missing.html +#ErrorDocument 404 "/cgi-bin/missing_handler.pl" +#ErrorDocument 402 http://www.example.com/subscription_info.html +# + +# +# MaxRanges: Maximum number of Ranges in a request before +# returning the entire resource, or one of the special +# values 'default', 'none' or 'unlimited'. +# Default setting is to accept 200 Ranges. +#MaxRanges unlimited + +# +# EnableMMAP and EnableSendfile: On systems that support it, +# memory-mapping or the sendfile syscall may be used to deliver +# files. This usually improves server performance, but must +# be turned off when serving from networked-mounted +# filesystems or if support for these functions is otherwise +# broken on your system. +# Defaults: EnableMMAP On, EnableSendfile Off +# +#EnableMMAP off +#EnableSendfile on + +# Supplemental configuration +# +# The configuration files in the conf/extra/ directory can be +# included to add extra features or to modify the default configuration of +# the server, or you may simply copy their contents here and change as +# necessary. + +# Server-pool management (MPM specific) +#Include conf/extra/httpd-mpm.conf + +# Multi-language error messages +#Include conf/extra/httpd-multilang-errordoc.conf + +# Fancy directory listings +#Include conf/extra/httpd-autoindex.conf + +# Language settings +#Include conf/extra/httpd-languages.conf + +# User home directories +#Include conf/extra/httpd-userdir.conf + +# Real-time info on requests and configuration +#Include conf/extra/httpd-info.conf + +# Virtual hosts +#Include conf/extra/httpd-vhosts.conf + +# Local access to the Apache HTTP Server Manual +#Include conf/extra/httpd-manual.conf + +# Distributed authoring and versioning (WebDAV) +#Include conf/extra/httpd-dav.conf + +# Various default settings +#Include conf/extra/httpd-default.conf + +# Configure mod_proxy_html to understand HTML4/XHTML1 + +Include conf/extra/proxy-html.conf + + +# Secure (SSL/TLS) connections +#Include conf/extra/httpd-ssl.conf +# +# Note: The following must must be present to support +# starting without SSL on platforms with no /dev/random equivalent +# but a statically compiled-in mod_ssl. +# + +SSLRandomSeed startup builtin +SSLRandomSeed connect builtin + + +#comets conf + +LoadModule auth_openidc_module modules/mod_auth_openidc.so +LoadModule rewrite_module modules/mod_rewrite.so + +ServerName localhost:80 + + CustomLog logs/access.log combined env=!dontlog + ErrorLog logs/error.log + + + SetHandler server-info + ProxyPass ! + + +RewriteEngine On +ProxyRequests Off + +ProxyPass / http://{{queue_host}}:8000/ timeout=1800 +ProxyPassReverse / http://{{queue_host}}:8000/ + +OIDCProviderIssuer {{open_id_issuer}} +OIDCProviderAuthorizationEndpoint {{open_id_issuer}}/authorize +OIDCProviderTokenEndpoint {{open_id_issuer}}/oauth/token +OIDCProviderTokenEndpointAuth client_secret_post +OIDCProviderUserInfoEndpoint {{open_id_issuer}}/userinfo +OIDCClientID {{open_client_id}} +OIDCClientSecret {{open_client_secret}} +OIDCDefaultURL https://comets-analytics-{{env}}.org/public/timeout.html +OIDCProviderJwksUri {{open_id_issuer}}/.well-known/jwks.json +OIDCSessionMaxDuration 0 +OIDCScope "openid email family_name given_name app_metadata user_metadata user_id comets" +OIDCRedirectURI https://comets-analytics-{{env}}.org/auth0_redirect +OIDCCryptoPassphrase "{{open_passphrase}}" +OIDCCookiePath / + +OIDCProviderMetadataURL {{open_id_issuer}}/.well-known/openid-configuration +OIDCPassIDTokenAs payload +OIDCUserInfoRefreshInterval 5 +OIDCSessionInactivityTimeout 1800 + + + AuthType openid-connect + + Require claim comets:active + Require claim comets:admin + + Header echo ^OIDC_id_token_payload$ + Header set Cache-Control "no-cache, no-store, must-revalidate" + Header set Pragma "no-cache" + Header set Expires 0 + ErrorDocument 401 /errordocs/unauthorized.html + LogLevel debug + + + + + Require claim comets:admin + + ErrorDocument 401 /errordocs/index.html + + + + + Require claim "comets~^\w+$" + + Require claim comets:active + Require claim comets:admin + + + ErrorDocument 401 /errordocs/registration.html + + + + + Require valid-user + + Require claim "comets~^\w+$" + + + ErrorDocument 401 /errordocs/index.html + + + + Require all denied + + + + AuthType none + Require all granted + + + + ErrorDocument 401 ! + + + diff --git a/comets/roles/deploy-comets-ecs/templates/settings.yml.j2 b/comets/roles/deploy-comets-ecs/templates/settings.yml.j2 new file mode 100644 index 000000000..6afae9e44 --- /dev/null +++ b/comets/roles/deploy-comets-ecs/templates/settings.yml.j2 @@ -0,0 +1,20 @@ +auth0: + token: "{{token}}" + domain: "ncicbiit" +email: + admin: [ "{{admin_email}}" ] + sender: "{{sender}}" + host: "{{host}}" + port: 465 + auth: true + username: "{{username}}" + password: "{{password}}" +queue: + host: "{{queue_host}}" + port: 61613 +s3: + bucket: "cbiit-tools-data" + username: "{{s3_username}}" + password: "{{s3_password}}" + input_folder: "comets-test/input/" + output_folder: "comets-test/output/" \ No newline at end of file diff --git a/comets/roles/deploy-comets-ecs/tests/inventory b/comets/roles/deploy-comets-ecs/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/comets/roles/deploy-comets-ecs/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/comets/roles/deploy-comets-ecs/tests/test.yml b/comets/roles/deploy-comets-ecs/tests/test.yml new file mode 100644 index 000000000..d2ca8956f --- /dev/null +++ b/comets/roles/deploy-comets-ecs/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - deploy-comets \ No newline at end of file diff --git a/comets/roles/deploy-comets-ecs/vars/main.yml b/comets/roles/deploy-comets-ecs/vars/main.yml new file mode 100644 index 000000000..6dd13579e --- /dev/null +++ b/comets/roles/deploy-comets-ecs/vars/main.yml @@ -0,0 +1,23 @@ +--- +# vars file for deploy-comets +sender: "{{ lookup('aws_ssm', 'sender', region='us-east-1' ) }}" +host: "{{ lookup('aws_ssm', 'host', region='us-east-1' ) }}" +s3_password: "{{ lookup('aws_ssm', 's3_password', region='us-east-1' ) }}" +s3_username: "{{ lookup('aws_ssm', 's3_username', region='us-east-1' ) }}" +admin_email: "{{ lookup('aws_ssm', 'admin_email', region='us-east-1' ) }}" +username: "{{ lookup('aws_ssm', 'username', region='us-east-1' ) }}" +password: "{{ lookup('aws_ssm', 'password', region='us-east-1' ) }}" +token: "{{ lookup('aws_ssm', 'token', region='us-east-1' ) }}" +app_base_directory: /local/content/docker/comets +comets_repo: https://github.com/CBIIT/nci-webtools-comets-analytics +R_repo: https://github.com/CBIIT/R-cometsAnalytics +ecr: "{{ lookup('aws_ssm', 'ecr', region='us-east-1' ) }}" +version: 1.8.0 +env: "{{env}}" +region: us-east-1 + +#openid +open_id_issuer: "{{ lookup('aws_ssm', 'open_id_issuer', region='us-east-1' ) }}" +open_client_id: "{{ lookup('aws_ssm', 'open_client_id', region='us-east-1' ) }}" +open_client_secret: "{{ lookup('aws_ssm', 'open_client_secret', region='us-east-1' ) }}" +open_passphrase: "{{ lookup('aws_ssm', 'open_passphrase', region='us-east-1' ) }}" diff --git a/comets/roles/deploy-comets/README.md b/comets/roles/deploy-comets/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/comets/roles/deploy-comets/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/comets/roles/deploy-comets/defaults/main.yml b/comets/roles/deploy-comets/defaults/main.yml new file mode 100644 index 000000000..cc3ff9ffa --- /dev/null +++ b/comets/roles/deploy-comets/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for deploy-comets \ No newline at end of file diff --git a/comets/roles/deploy-comets/handlers/main.yml b/comets/roles/deploy-comets/handlers/main.yml new file mode 100644 index 000000000..b30592c1a --- /dev/null +++ b/comets/roles/deploy-comets/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for deploy-comets diff --git a/comets/roles/deploy-comets/meta/main.yml b/comets/roles/deploy-comets/meta/main.yml new file mode 100644 index 000000000..3a212a936 --- /dev/null +++ b/comets/roles/deploy-comets/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.4 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/comets/roles/deploy-comets/tasks/build.yml b/comets/roles/deploy-comets/tasks/build.yml new file mode 100644 index 000000000..56e2fccff --- /dev/null +++ b/comets/roles/deploy-comets/tasks/build.yml @@ -0,0 +1,212 @@ +--- +- name: upgrade pip + pip: + name: pip + state: forcereinstall + +- name: install docker and docker-compose + pip: + name: "{{item}}" + state: present + loop: + - docker + - docker-compose + +#create app and logs directory +- name: create app and logs directory + file: + path: "{{app_base_directory}}/{{item}}" + state: directory + loop: + - app + - logs + +- name: gather comets-processor facts + ec2_instance_facts: + region: us-east-1 + filters: + "tag:Environment": "{{env}}" + "tag:Name": comets-{{env}}-processor + "instance-state-name": running + register: processor + + +- name: gather comets-app facts + ec2_instance_facts: + region: us-east-1 + filters: + "tag:Environment": "{{env}}" + "tag:Name": comets-{{env}}-app + "instance-state-name": running + register: app + +- name: set instance ip + set_fact: + queue_host: "{{ processor.instances[0].network_interfaces[0].private_ip_address }}" + app_host: "{{ app.instances[0].network_interfaces[0].private_ip_address }}" + +- name: Start service firewalld if not started + service: + name: firewalld + state: started + enabled: yes + +- name: open port 61613 + firewalld: + port: 61613/tcp + zone: public + permanent: yes + immediate: yes + state: enabled + when: + - queue_host == inventory_hostname + +- name: open 8000 + firewalld: + port: "{{item}}/tcp" + zone: public + permanent: yes + immediate: yes + state: enabled + loop: + - 8000 + - 80 + when: + - app_host == inventory_hostname + +- name: Check if comets directory exist + stat: + path: /tmp/comets + register: comets_result + +- name: remove old comets repo + file: + path: /tmp/comets + state: absent + register: delete_folder + # retries: 5 + # delay: 2 + # until: delete_folder is success + when: comets_result.stat.exists + +- name: Check if R-cometsAnalytics exists + stat: + path: /tmp/R-cometsAnalytics + register: comets_analytics + +- name: remove old r repo + file: + path: /tmp/R-cometsAnalytics + state: absent + register: delete_folder + # retries: 5 + # delay: 2 + # until: delete_folder is success + when: comets_analytics.stat.exists + +- name: checkout comets repo + git: + repo: "{{comets_repo}}" + dest: /tmp/comets + version: master + force: yes + +- name: checkout comets_analytics repo + git: + repo: "{{R_repo}}" + dest: /tmp/R-cometsAnalytics + version: comets_1.6.0_20191205 + force: yes + +# - name: copy R code +# synchronize: +# dest: /tmp/comets/comets/restricted/rcode/ +# src: /tmp/R-cometsAnalytics/ +# delete: yes +# recursive: true +# remote_src: yes +# rsync_opts: +# - "--exclude=.git" + +- name: copy R code + shell: rsync -iRcCvh --exclude=.git /tmp/R-cometsAnalytics/ /tmp/comets/comets/restricted/rcode + +- name: update settings + template: + dest: /tmp/comets/comets/restricted/settings.yml + src: settings.yml.j2 + +- name: copy docker-compse files + template: src={{item.src}} dest={{item.dest}} + with_items: + - { src: 'processor-docker-compose.yml.j2', dest: '{{app_base_directory}}/processor-docker-compose.yml' } + - { src: 'app-docker-compose.yml.j2', dest: '{{app_base_directory}}/app-docker-compose.yml' } + - { src: 'Dockerfile.j2', dest: '/tmp/comets/Dockerfile' } + - { src: 'comets-httpd.conf.j2', dest: '/tmp/comets/comets-httpd.conf' } + +# - name: login into ecr +# shell: "$(/usr/bin/aws ecr get-login-password --region us-east-1 | docker login --username AWS --password-stdin {{ecr}})" +# ignore_errors: True + + +- name: build {{ecr}}/comets:app image + docker_image: + build: + path: "/tmp/comets" + dockerfile: "/tmp/comets/docker/comets.app.dockerfile" + pull: yes + nocache: yes + # name: "{{ecr}}/comets" + name: comets/app + # tag: "app-{{version}}" + # push: yes + source: build + when: + - app_host == inventory_hostname + +- name: build apache image + docker_image: + build: + path: "/tmp/comets" + pull: yes + nocache: yes + name: "comets/auth0" + # tag: "apache-{{version}}" + # push: yes + source: build + when: + - app_host == inventory_hostname + +- name: build {{ecr}}/comets:processor image + docker_image: + build: + path: "/tmp/comets" + dockerfile: "/tmp/comets/docker/comets.app.processor.dockerfile" + pull: yes + nocache: yes + name: "comets/processor" + # tag: "processor-{{version}}" + # push: yes + source: build + when: + - queue_host == inventory_hostname + +- name: start app + docker_compose: + project_src: "{{app_base_directory}}" + files: + - app-docker-compose.yml + state: present + recreate: always + when: + - app_host == inventory_hostname + +- name: start processor + docker_compose: + project_src: "{{app_base_directory}}" + files: + - processor-docker-compose.yml + state: present + recreate: always + when: + - queue_host == inventory_hostname diff --git a/comets/roles/deploy-comets/tasks/deploy.yml b/comets/roles/deploy-comets/tasks/deploy.yml new file mode 100644 index 000000000..dc6d22b46 --- /dev/null +++ b/comets/roles/deploy-comets/tasks/deploy.yml @@ -0,0 +1,11 @@ +- name: deploy docker + docker_compose: + project_name: comets + definition: + version: '3' + services: + app: + image: "{{ecr}}/comets:app-{{version}}" + activemq: + image: cbiitss/activemq:latest + \ No newline at end of file diff --git a/comets/roles/deploy-comets/tasks/main.yml b/comets/roles/deploy-comets/tasks/main.yml new file mode 100644 index 000000000..ff048c165 --- /dev/null +++ b/comets/roles/deploy-comets/tasks/main.yml @@ -0,0 +1,89 @@ +--- +# tasks file for deploy-comets + +- name: gather comets-app facts + ec2_instance_facts: + region: us-east-1 + filters: + "tag:Environment": "{{env}}" + "tag:Name": comets-frontend + "instance-state-name": running + register: frontend + +- name: set instance ip + set_fact: + queue_host: "{{ frontend.instances[0].network_interfaces[0].private_ip_address }}" + +- name: Check if comets and exits + stat: + path: /tmp/comets + register: comets_result + +- name: remove old comets repo + file: + path: /tmp/comets + state: absent + when: comets_result.stat.exists + +- name: Check if R-cometsAnalytics exits + stat: + path: /tmp/R-cometsAnalytics + register: r_result + +- name: remove old r repo + file: + path: /tmp/R-cometsAnalytics + state: absent + when: r_result.stat.exists + +#create app and logs directory +- name: create app and logs directory + file: + path: "{{app_base_directory}}/{{item}}" + state: directory + loop: + - app + - logs + +- name: checkout comets repo + git: + repo: "{{comets_repo}}" + dest: /tmp/comets + version: master + force: yes + +- name: checkout R repo + git: + repo: "{{R_repo}}" + dest: /tmp/R-cometsAnalytics + version: comets_1.6.0_20191205 + force: yes + +- name: copy comets app + synchronize: + dest: /local/content/docker/comets/app/ + src: /tmp/comets/comets/ + delete: yes + recursive: true + rsync_opts: + - "--exclude=.git" + - "--exclude=settings.yml" + +- name: copy R code + synchronize: + dest: /local/content/docker/comets/app/restricted/rcode/ + src: /tmp/R-cometsAnalytics/ + delete: yes + recursive: true + rsync_opts: + - "--exclude=.git" + +- name: update settings + template: + dest: /local/content/docker/comets/app/restricted/settings.yml + src: settings.yml.j2 + +- name: restart docker + service: + name: docker + state: restarted \ No newline at end of file diff --git a/comets/roles/deploy-comets/templates/Dockerfile.j2 b/comets/roles/deploy-comets/templates/Dockerfile.j2 new file mode 100644 index 000000000..904c4b874 --- /dev/null +++ b/comets/roles/deploy-comets/templates/Dockerfile.j2 @@ -0,0 +1,11 @@ +FROM httpd:2.4.43 + +RUN apt-get update \ + && apt-get install -y apt-utils ca-certificates libapache2-mod-auth-openidc apache2-bin openssl \ + && ln -s /usr/lib/apache2/modules/mod_auth_openidc.so /usr/local/apache2/modules/mod_auth_openidc.so + +RUN echo "Include conf/extra/comets-vhosts.conf" >> /usr/local/apache2/conf/httpd.conf + +COPY ./comets-httpd.conf /usr/local/apache2/conf/extra/comets-vhosts.conf + + \ No newline at end of file diff --git a/comets/roles/deploy-comets/templates/app-docker-compose.yml.j2 b/comets/roles/deploy-comets/templates/app-docker-compose.yml.j2 new file mode 100644 index 000000000..9583bd8b7 --- /dev/null +++ b/comets/roles/deploy-comets/templates/app-docker-compose.yml.j2 @@ -0,0 +1,30 @@ +version: '3' +services: + auth0: + image: comets/auth0 + container_name: auth0 + ports: + - "80:80" + links: + - app + depends_on: + - app + app: + image: comets/app + container_name: app + restart: always + depends_on: + - activemq + links: + - activemq + ports: + - "8000:8000" + volumes: + - {{app_base_directory}}/logs:/deploy/logs + activemq: + image: "cbiitss/activemq:latest" + container_name: activemq + restart: always + ports: + - "8161:8161" + - "61613:61613" \ No newline at end of file diff --git a/comets/roles/deploy-comets/templates/comets-httpd.conf.j2 b/comets/roles/deploy-comets/templates/comets-httpd.conf.j2 new file mode 100644 index 000000000..d79127922 --- /dev/null +++ b/comets/roles/deploy-comets/templates/comets-httpd.conf.j2 @@ -0,0 +1,112 @@ + +#comets conf + + +LoadModule proxy_module modules/mod_proxy.so +LoadModule auth_openidc_module modules/mod_auth_openidc.so +LoadModule rewrite_module modules/mod_rewrite.so +LoadModule ssl_module modules/mod_ssl.so +LoadModule proxy_http_module modules/mod_proxy_http.so +LoadModule proxy_connect_module modules/mod_proxy_connect.so + + + + ServerName www.comets-analytics-{{env}}.com + ServerAlias comets-analytics-{{env}}.com + Loglevel INFO + + CustomLog logs/access.log combined env=!dontlog + ErrorLog logs/error.log + + + SetHandler server-info + ProxyPass ! + + + +RewriteEngine On +RewriteCond %{HTTP_HOST} !^www.comets-analytics-{{env}}.org$ +RewriteRule (.*) https://www.comets-analytics-{{env}}.org$1 [R=301,L] + + + +OIDCProviderIssuer {{open_id_issuer}} +OIDCProviderAuthorizationEndpoint {{open_id_issuer}}/authorize +OIDCProviderTokenEndpoint {{open_id_issuer}}/oauth/token +OIDCProviderTokenEndpointAuth client_secret_post +OIDCProviderUserInfoEndpoint {{open_id_issuer}}/userinfo +OIDCClientID {{open_client_id}} +OIDCClientSecret {{open_client_secret}} +OIDCDefaultURL https://comets-analytics-{{env}}.org/public/timeout.html +OIDCProviderJwksUri {{open_id_issuer}}/.well-known/jwks.json +OIDCSessionMaxDuration 0 +OIDCScope "openid email family_name given_name app_metadata user_metadata user_id comets" +OIDCRedirectURI https://comets-analytics-{{env}}.org/auth0_redirect +OIDCCryptoPassphrase "{{open_passphrase}}" +OIDCCookiePath / +OIDCProviderMetadataURL {{open_id_issuer}}/.well-known/openid-configuration +OIDCPassIDTokenAs payload +OIDCUserInfoRefreshInterval 5 +OIDCSessionInactivityTimeout 1800 + + + AuthType openid-connect + + Require claim comets:active + Require claim comets:admin + + Header echo ^OIDC_id_token_payload$ + Header set Cache-Control "no-cache, no-store, must-revalidate" + Header set Pragma "no-cache" + Header set Expires 0 + ErrorDocument 401 /errordocs/unauthorized.html + LogLevel debug + + + + + Require claim comets:admin + + ErrorDocument 401 /errordocs/index.html + + + + + Require claim "comets~^\w+$" + + Require claim comets:active + Require claim comets:admin + + + ErrorDocument 401 /errordocs/registration.html + + + + + Require valid-user + + Require claim "comets~^\w+$" + + + ErrorDocument 401 /errordocs/index.html + + + + Require all denied + + + + AuthType none + Require all granted + + + + ErrorDocument 401 ! + + +RewriteEngine On +ProxyRequests Off +ProxyPass / http://{{app_host}}:8000/ timeout=1800 +ProxyPassReverse / http://{{app_host}}:8000/ + + \ No newline at end of file diff --git a/comets/roles/deploy-comets/templates/processor-docker-compose.yml.j2 b/comets/roles/deploy-comets/templates/processor-docker-compose.yml.j2 new file mode 100644 index 000000000..5400e1f6e --- /dev/null +++ b/comets/roles/deploy-comets/templates/processor-docker-compose.yml.j2 @@ -0,0 +1,10 @@ +version: '3' +services: + processor: + image: comets/processor + container_name: processor + volumes: + - "{{app_base_directory}}/logs:/deploy/logs" + restart: always + ports: + - "61613:61613" \ No newline at end of file diff --git a/comets/roles/deploy-comets/templates/settings.yml.j2 b/comets/roles/deploy-comets/templates/settings.yml.j2 new file mode 100644 index 000000000..6afae9e44 --- /dev/null +++ b/comets/roles/deploy-comets/templates/settings.yml.j2 @@ -0,0 +1,20 @@ +auth0: + token: "{{token}}" + domain: "ncicbiit" +email: + admin: [ "{{admin_email}}" ] + sender: "{{sender}}" + host: "{{host}}" + port: 465 + auth: true + username: "{{username}}" + password: "{{password}}" +queue: + host: "{{queue_host}}" + port: 61613 +s3: + bucket: "cbiit-tools-data" + username: "{{s3_username}}" + password: "{{s3_password}}" + input_folder: "comets-test/input/" + output_folder: "comets-test/output/" \ No newline at end of file diff --git a/comets/roles/deploy-comets/tests/inventory b/comets/roles/deploy-comets/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/comets/roles/deploy-comets/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/comets/roles/deploy-comets/tests/test.yml b/comets/roles/deploy-comets/tests/test.yml new file mode 100644 index 000000000..d2ca8956f --- /dev/null +++ b/comets/roles/deploy-comets/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - deploy-comets \ No newline at end of file diff --git a/comets/roles/deploy-comets/vars/main.yml b/comets/roles/deploy-comets/vars/main.yml new file mode 100644 index 000000000..4bf395def --- /dev/null +++ b/comets/roles/deploy-comets/vars/main.yml @@ -0,0 +1,23 @@ +--- +# vars file for deploy-comets +sender: "{{ lookup('aws_ssm', 'sender', region='us-east-1' ) }}" +host: "{{ lookup('aws_ssm', 'host', region='us-east-1' ) }}" +s3_password: "{{ lookup('aws_ssm', 's3_password', region='us-east-1' ) }}" +s3_username: "{{ lookup('aws_ssm', 's3_username', region='us-east-1' ) }}" +admin_email: "{{ lookup('aws_ssm', 'admin_email', region='us-east-1' ) }}" +username: "{{ lookup('aws_ssm', 'username', region='us-east-1' ) }}" +password: "{{ lookup('aws_ssm', 'password', region='us-east-1' ) }}" +token: "{{ lookup('aws_ssm', 'token', region='us-east-1' ) }}" +app_base_directory: /local/content/docker/comets +comets_repo: https://github.com/CBIIT/nci-webtools-comets-analytics +R_repo: https://github.com/CBIIT/R-cometsAnalytics +ecr: "{{ lookup('aws_ssm', 'ecr', region='us-east-1' ) }}" +version: 1.9.0 +env: "{{env}}" +region: us-east-1 + +#openid +open_id_issuer: "{{ lookup('aws_ssm', 'open_id_issuer', region='us-east-1' ) }}" +open_client_id: "{{ lookup('aws_ssm', 'open_client_id', region='us-east-1' ) }}" +open_client_secret: "{{ lookup('aws_ssm', 'open_client_secret', region='us-east-1' ) }}" +open_passphrase: "{{ lookup('aws_ssm', 'open_passphrase', region='us-east-1' ) }}" diff --git a/comets/roles/docker/README.md b/comets/roles/docker/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/comets/roles/docker/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/comets/roles/docker/defaults/main.yml b/comets/roles/docker/defaults/main.yml new file mode 100644 index 000000000..c45677333 --- /dev/null +++ b/comets/roles/docker/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for docker \ No newline at end of file diff --git a/comets/roles/docker/handlers/main.yml b/comets/roles/docker/handlers/main.yml new file mode 100644 index 000000000..11684bcd3 --- /dev/null +++ b/comets/roles/docker/handlers/main.yml @@ -0,0 +1,6 @@ +--- +# handlers file for docker +- name: restart docker + service: + name: docker + state: restarted diff --git a/comets/roles/docker/meta/main.yml b/comets/roles/docker/meta/main.yml new file mode 100644 index 000000000..5d50bf41b --- /dev/null +++ b/comets/roles/docker/meta/main.yml @@ -0,0 +1,60 @@ +galaxy_info: + author: your name + description: your description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Some suggested licenses: + # - BSD (default) + # - MIT + # - GPLv2 + # - GPLv3 + # - Apache + # - CC-BY + license: license (GPLv2, CC-BY, etc) + + min_ansible_version: 2.4 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # Optionally specify the branch Galaxy will use when accessing the GitHub + # repo for this role. During role install, if no tags are available, + # Galaxy will use this branch. During import Galaxy will access files on + # this branch. If Travis integration is configured, only notifications for this + # branch will be accepted. Otherwise, in all cases, the repo's default branch + # (usually master) will be used. + #github_branch: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. \ No newline at end of file diff --git a/comets/roles/docker/tasks/main.yml b/comets/roles/docker/tasks/main.yml new file mode 100644 index 000000000..fdd19002d --- /dev/null +++ b/comets/roles/docker/tasks/main.yml @@ -0,0 +1,102 @@ +--- +# - name: Remove other Docker versions +# yum: +# name: +# - docker +# - docker-client +# - docker-client-latest +# - docker-common +# - docker-latest +# - docker-latest-logrotate +# - docker-logrotate +# - docker-engine +# - docker-compose +# state: absent + +# tasks file for docker +- name: install epel-release + yum: + name: + - epel-release +- name: install systems packages needed for docker + yum: + name: + - yum-utils + - device-mapper-persistent-data + - lvm2 + - python-setuptools + - firewalld + - python-pip + - docker-compose + state: installed + +- name: install docker python module + pip: + name: docker + +- name: enable and start firewalld + service: + name: firewalld + state: started + enabled: yes + tags: + - master + +- name: open tcp port 2375 and 2376 + firewalld: + state: enabled + permanent: yes + port: "{{item}}/tcp" + immediate: yes + zone: public + loop: + - 2375 + - 2376 + tags: + - master + +# - name: enable extra repos when running on red hat +# command: subscription-manager repos --enable "rhel-*-optional-rpms" --enable "rhel-*-extras-rpms" +# when: ansible_distribution == 'Red Hat Enterprise Linux' + +- name: add docker repo + command: > + yum-config-manager --add-repo + https://download.docker.com/linux/centos/docker-ce.repo + +- name: install docker + yum: + name: ['docker-ce', 'docker-ce-cli', 'containerd.io'] + state: installed + +- name: enable and start docker + service: + name: docker + enabled: yes + state: restarted + +- name: create docker systemd options directory + file: + path: /etc/systemd/system/docker.service.d + state: directory + tags: + - master + +- name: configure docker startup options + template: + src: startup-options.conf.j2 + dest: /etc/systemd/system/docker.service.d/startup_options.conf + notify: + - restart docker + tags: + - master + +- name: reload systemctl daemon + systemd: + daemon_reload: yes + + + + + + diff --git a/comets/roles/docker/templates/startup-options.conf.j2 b/comets/roles/docker/templates/startup-options.conf.j2 new file mode 100644 index 000000000..afa83a0aa --- /dev/null +++ b/comets/roles/docker/templates/startup-options.conf.j2 @@ -0,0 +1,3 @@ +[Service] +ExecStart= +ExecStart=/usr/bin/dockerd -H fd:// -H tcp://0.0.0.0:2375 \ No newline at end of file diff --git a/comets/roles/docker/tests/inventory b/comets/roles/docker/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/comets/roles/docker/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/comets/roles/docker/tests/test.yml b/comets/roles/docker/tests/test.yml new file mode 100644 index 000000000..2c81ca427 --- /dev/null +++ b/comets/roles/docker/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - docker \ No newline at end of file diff --git a/comets/roles/docker/vars/main.yml b/comets/roles/docker/vars/main.yml new file mode 100644 index 000000000..dc934ce9c --- /dev/null +++ b/comets/roles/docker/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for docker \ No newline at end of file diff --git a/comets/roles/ecs-agent/README.md b/comets/roles/ecs-agent/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/comets/roles/ecs-agent/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/comets/roles/ecs-agent/defaults/main.yml b/comets/roles/ecs-agent/defaults/main.yml new file mode 100644 index 000000000..3bc56b749 --- /dev/null +++ b/comets/roles/ecs-agent/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for ecs-agent \ No newline at end of file diff --git a/comets/roles/ecs-agent/files/docker-container@ecs-agent.service b/comets/roles/ecs-agent/files/docker-container@ecs-agent.service new file mode 100644 index 000000000..733f682bf --- /dev/null +++ b/comets/roles/ecs-agent/files/docker-container@ecs-agent.service @@ -0,0 +1,22 @@ +[Unit] +Description=Docker Container %I +Requires=docker.service +After=cloud-final.service + +[Service] +Restart=always +ExecStartPre=-/usr/bin/docker rm -f %i +ExecStart=/usr/bin/docker run --name %i \ +--privileged \ +--restart=on-failure:10 \ +--volume=/var/run:/var/run \ +--volume=/var/log/ecs/:/log:Z \ +--volume=/var/lib/ecs/data:/data:Z \ +--volume=/etc/ecs:/etc/ecs \ +--net=host \ +--env-file=/etc/ecs/ecs.config \ +amazon/amazon-ecs-agent:latest +ExecStop=/usr/bin/docker stop %i + +[Install] +WantedBy=default.target \ No newline at end of file diff --git a/comets/roles/ecs-agent/handlers/main.yml b/comets/roles/ecs-agent/handlers/main.yml new file mode 100644 index 000000000..502c66361 --- /dev/null +++ b/comets/roles/ecs-agent/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for ecs-agent \ No newline at end of file diff --git a/comets/roles/ecs-agent/meta/main.yml b/comets/roles/ecs-agent/meta/main.yml new file mode 100644 index 000000000..3a212a936 --- /dev/null +++ b/comets/roles/ecs-agent/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.4 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/comets/roles/ecs-agent/tasks/main.yml b/comets/roles/ecs-agent/tasks/main.yml new file mode 100644 index 000000000..8d3ea973f --- /dev/null +++ b/comets/roles/ecs-agent/tasks/main.yml @@ -0,0 +1,93 @@ +--- +# tasks file for ecs-agent +- name: gather instance facts + ec2_instance_facts: + region: us-east-1 + filters: + "tag:Name": bento-frontend-{{env}} + "instance-state-name": running + "tag:Environment": "{{env}}" + register: frontend + +- name: set instance name + set_fact: + frontend_ip: "{{ frontend.instances[0].network_interfaces[0].private_ip_address }}" + +#set local routing +- name: set localhost routing + sysctl: + name: net.ipv4.conf.all.route_localnet + value: '1' + sysctl_set: yes + state: present + reload: yes + +- name: install iptables + yum: + name: + - iptables-services + state: present + +- name: start iptables service + service: + name: iptables + state: started + enabled: yes + +- name: configure ecs-agent routing + iptables: + table: nat + chain: PREROUTING + protocol: tcp + destination: 169.254.170.2 + destination_port: '80' + jump: DNAT + to_destination: 127.0.0.1:51679 + comment: configure nat + +- name: configure ecs-agent redirect + iptables: + table: nat + chain: OUTPUT + protocol: tcp + match: tcp + destination: 169.254.170.2 + destination_port: '80' + jump: REDIRECT + to_ports: '51679' + comment: Redirect web traffic to port 51679 + +- name: save iptables + command: service iptables save + args: + warn: false + +- name: reload iptables + command: service iptables reload + args: + warn: false + +- name: create ecs directory + file: + path: "{{item}}" + state: directory + loop: + - "/etc/ecs" + - "/var/log/ecs" + - "/var/lib/ecs/data" + +- name: copy file ecs.config to /etc/ecs/ecs.config + template: + src: ecs.config.j2 + dest: /etc/ecs/ecs.config + +- name: copy docker service to systemd directory + copy: + src: docker-container@ecs-agent.service + dest: /etc/systemd/system/docker-container@ecs-agent.service + +- name: enable and start docker-container@ecs-agent.service + service: + name: docker-container@ecs-agent.service + state: started + enabled: yes \ No newline at end of file diff --git a/comets/roles/ecs-agent/templates/ecs.config.j2 b/comets/roles/ecs-agent/templates/ecs.config.j2 new file mode 100644 index 000000000..be588c842 --- /dev/null +++ b/comets/roles/ecs-agent/templates/ecs.config.j2 @@ -0,0 +1,12 @@ +ECS_DATADIR=/data +ECS_ENABLE_TASK_IAM_ROLE=true +ECS_ENABLE_TASK_IAM_ROLE_NETWORK_HOST=true +ECS_LOGFILE=/log/ecs-agent.log +ECS_AVAILABLE_LOGGING_DRIVERS=["json-file","awslogs"] +ECS_LOGLEVEL=info +ECS_CLUSTER={{ecs_cluster_name}} +{% if ansible_default_ipv4.address == frontend_ip %} +ECS_INSTANCE_ATTRIBUTES={"role": "frontend"} +{% else %} +ECS_INSTANCE_ATTRIBUTES={"role": "backend"} +{% endif %} diff --git a/comets/roles/ecs-agent/tests/inventory b/comets/roles/ecs-agent/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/comets/roles/ecs-agent/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/comets/roles/ecs-agent/tests/test.yml b/comets/roles/ecs-agent/tests/test.yml new file mode 100644 index 000000000..bd797d6fb --- /dev/null +++ b/comets/roles/ecs-agent/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - ecs-agent \ No newline at end of file diff --git a/comets/roles/ecs-agent/vars/main.yml b/comets/roles/ecs-agent/vars/main.yml new file mode 100644 index 000000000..c73e942d3 --- /dev/null +++ b/comets/roles/ecs-agent/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for ecs-agent \ No newline at end of file diff --git a/comets/setup-auth0.yml b/comets/setup-auth0.yml new file mode 100644 index 000000000..933d8b04e --- /dev/null +++ b/comets/setup-auth0.yml @@ -0,0 +1,11 @@ +--- +- name: setup ecs agent + hosts: all + connection: local + become: yes + gather_facts: yes + + roles: + - auth0 + + \ No newline at end of file diff --git a/conf-files/docker/docker-compose-selenium.yml b/conf-files/docker/docker-compose-selenium.yml new file mode 100644 index 000000000..7236508f4 --- /dev/null +++ b/conf-files/docker/docker-compose-selenium.yml @@ -0,0 +1,19 @@ + +version: "3" +services: + selenium-hub: + image: selenium/hub:3.141.59-zinc + container_name: selenium-hub + ports: + - "4444:4444" + chrome: + image: selenium/node-chrome:3.141.59-zinc + volumes: + - /dev/shm:/dev/shm + depends_on: + - selenium-hub + environment: + - HUB_HOST=selenium-hub + - HUB_PORT=4444 + - NODE_MAX_INSTANCES=3 + - NODE_MAX_SESSIONS=3 \ No newline at end of file diff --git a/dashboards/newrelic/dashboards.json b/dashboards/newrelic/dashboards.json new file mode 100644 index 000000000..d8d0d7d03 --- /dev/null +++ b/dashboards/newrelic/dashboards.json @@ -0,0 +1,135 @@ +{ + "mydashboard": { + "metadata": { "version": 1 }, + "title": "API Widget Sample", + "icon":"none|archive|bar-chart|line-chart|bullseye|user|usd|money|thumbs-up|thumbs-down|cloud|bell|bullhorn|comments-o|envelope|globe|shopping-cart|sitemap|clock-o|crosshairs|rocket|users|mobile|tablet|adjust|dashboard|flag|flask|road|bolt|cog|leaf|magic|puzzle-piece|bug|fire|legal|trophy|pie-chart|sliders|paper-plane|life-ring|heart", + "visibility": "owner|all", + "editable": "read_only|editable_by_owner|editable_by_all", + "filter": { + "event_types": [ + "Transaction" + ], + "attributes": [ + "appName" + ] + }, + "widgets": [ + { + "visualization": "billboard|gauge|billboard_comparison", + "account_id": 2292606, + "data": [ + { + "nrql": "SELECT count(*) from Transaction since 5 minutes ago" + } + ], + "presentation": { + "title": "Threshold Event Chart", + "notes": null, + "threshold": { + "red": 18000000, + "yellow": 8000000 + } + }, + "layout": { + "width": 1, + "height": 1, + "row": 1, + "column": 1 + } + }, + { + "visualization": "facet_bar_chart|faceted_line_chart|facet_pie_chart|facet_table|faceted_area_chart|heatmap", + "account_id": 2292606, + "data": [ + { + "nrql": "SELECT count(*) from Transaction since 5 minutes ago facet appName" + } + ], + "presentation": { + "title": "Facet Chart", + "notes": null, + "drilldown_dashboard_id": 64 + }, + "layout": { + "width": 1, + "height": 1, + "row": 1, + "column": 2 + } + }, + { + "visualization": "attribute_sheet|single_event|histogram|funnel|raw_json|event_feed|event_table|uniques_list|line_chart|comparison_line_chart", + "account_id": 2292606, + "data": [ + { + "nrql": "SELECT latest(appName), latest(duration) from Transaction since 5 minutes ago" + } + ], + "presentation": { + "title": "Simple Event Chart", + "notes": null + }, + "layout": { + "width": 1, + "height": 1, + "row": 1, + "column": 3 + } + }, + { + "visualization": "markdown", + "account_id": 2292606, + "data": [ + { + "source": "# Dashboard Note\n\n[link goes here](https://www.newrelic.com)" + } + ], + "presentation": { + "title": "", + "notes": null + }, + "layout": { + "width": 1, + "height": 1, + "row": 2, + "column": 1 + } + }, + { + "visualization": "metric_line_chart", + "account_id": 2292606, + "data": [ + { + "duration": 1800000, + "end_time": null, + "entity_ids": [ + 238575 + ], + "metrics": [ + { + "name": "Apdex", + "units": null, + "scope": "", + "values": [ + "score" + ] + } + ], + "order_by": "score", + "limit": 10 + } + ], + "presentation": { + "title": "Metric Line Chart", + "notes": null + }, + "layout": { + "width": 1, + "height": 1, + "row": 2, + "column": 2 + } + }, + ] + } +} diff --git a/deploy-backend.yml b/deploy-backend.yml new file mode 100644 index 000000000..9d2b00c41 --- /dev/null +++ b/deploy-backend.yml @@ -0,0 +1,11 @@ +--- +- name: deploy stage of cicd pipeline + hosts: cicd + connection: local + gather_facts: no + + tasks: + - name: deploy stage + include_role: + name: cicd + tasks_from: deploy \ No newline at end of file diff --git a/docker/dockerfiles/backend-ctdc-dockerfile b/docker/dockerfiles/backend-ctdc-dockerfile new file mode 100644 index 000000000..c8a786397 --- /dev/null +++ b/docker/dockerfiles/backend-ctdc-dockerfile @@ -0,0 +1,6 @@ +FROM ncidockerhub.nci.nih.gov/icdc/bento-icdc-backend:release +MAINTAINER icdc devops team + +RUN rm -rf /usr/local/tomcat/webapps/ROOT +COPY target/ROOT.war /usr/local/tomcat/webapps/ + diff --git a/docker/dockerfiles/backend-dockerfile b/docker/dockerfiles/backend-dockerfile new file mode 100644 index 000000000..4c8455a0a --- /dev/null +++ b/docker/dockerfiles/backend-dockerfile @@ -0,0 +1,6 @@ +FROM cbiitssrepo/bento-backend:release +MAINTAINER icdc devops team + +RUN rm -rf /usr/local/tomcat/webapps/ROOT +COPY target/ROOT.war /usr/local/tomcat/webapps/ + diff --git a/docker/dockerfiles/backend-icdc-dockerfile b/docker/dockerfiles/backend-icdc-dockerfile new file mode 100644 index 000000000..c8a786397 --- /dev/null +++ b/docker/dockerfiles/backend-icdc-dockerfile @@ -0,0 +1,6 @@ +FROM ncidockerhub.nci.nih.gov/icdc/bento-icdc-backend:release +MAINTAINER icdc devops team + +RUN rm -rf /usr/local/tomcat/webapps/ROOT +COPY target/ROOT.war /usr/local/tomcat/webapps/ + diff --git a/docker/dockerfiles/ccdc-backend-dockerfile b/docker/dockerfiles/ccdc-backend-dockerfile new file mode 100644 index 000000000..339553bca --- /dev/null +++ b/docker/dockerfiles/ccdc-backend-dockerfile @@ -0,0 +1,18 @@ +FROM node:16-alpine3.11 + +ENV PORT 8080 +ENV NODE_ENV production + +WORKDIR /usr/src/app + +COPY package*.json ./ + +RUN npm ci --only=production + +#USER node + +COPY --chown=node:node . . + +EXPOSE 8080 9200 3306 + +CMD [ "node", "app.js" ] \ No newline at end of file diff --git a/docker/dockerfiles/ccdc-database-dockerfile b/docker/dockerfiles/ccdc-database-dockerfile new file mode 100644 index 000000000..28eca2a2d --- /dev/null +++ b/docker/dockerfiles/ccdc-database-dockerfile @@ -0,0 +1,18 @@ +FROM node:16-alpine3.11 + +ENV PORT 8081 +ENV NODE_ENV production + +WORKDIR /usr/src/app + +COPY package*.json ./ + +RUN npm ci --only=production + +#USER node + +COPY --chown=node:node . . + +EXPOSE 8081 + +CMD [ "node", "index.js" ] \ No newline at end of file diff --git a/docker/dockerfiles/ccdc-frontend-dockerfile b/docker/dockerfiles/ccdc-frontend-dockerfile new file mode 100644 index 000000000..58e3a3de6 --- /dev/null +++ b/docker/dockerfiles/ccdc-frontend-dockerfile @@ -0,0 +1,16 @@ +FROM nginx:alpine + +COPY nginx.conf /etc/nginx/conf.d/configfile.template + +ENV PORT 80 + +ENV HOST 0.0.0.0 + +RUN sh -c "envsubst '\$PORT' < /etc/nginx/conf.d/configfile.template > /etc/nginx/conf.d/default.conf" + +COPY ./dist /usr/share/nginx/html +COPY ./nginx-entrypoint.sh / + +EXPOSE 80 + +ENTRYPOINT [ "sh", "/nginx-entrypoint.sh" ] \ No newline at end of file diff --git a/docker/dockerfiles/data-dictionary-dockerfile b/docker/dockerfiles/data-dictionary-dockerfile new file mode 100644 index 000000000..aa7410149 --- /dev/null +++ b/docker/dockerfiles/data-dictionary-dockerfile @@ -0,0 +1,16 @@ +FROM nginx:alpine + +COPY nginx.conf /etc/nginx/conf.d/configfile.template + +ENV PORT 81 + +ENV HOST 0.0.0.0 + +RUN sh -c "envsubst '\$PORT' < /etc/nginx/conf.d/configfile.template > /etc/nginx/conf.d/default.conf" + +COPY ./dist /usr/share/nginx/html +COPY ./nginx-entrypoint.sh / + +EXPOSE 81 + +ENTRYPOINT [ "sh", "/nginx-entrypoint.sh" ] \ No newline at end of file diff --git a/docker/dockerfiles/data-dictionary-nginx.conf b/docker/dockerfiles/data-dictionary-nginx.conf new file mode 100644 index 000000000..0acc70c31 --- /dev/null +++ b/docker/dockerfiles/data-dictionary-nginx.conf @@ -0,0 +1,38 @@ +server { + listen $PORT; + server_name localhost; + root /usr/share/nginx/html; + index index.html index.htm ; + location / { + try_files $uri $uri/ /index.html; + } + + location /nginx_status { + stub_status; + } + location = /data-dictionary { + + root /usr/share/nginx/html; + try_files /index.html =404; + } + + location ~ ^/data-dictionary(.*) { + + root /usr/share/nginx/html; + try_files $1 $1/ /index.html =404; + } + location ~* (serviceworker\.js)$ { + add_header 'Cache-Control' 'no-store, no-cache, must-revalidate, proxy-revalidate, max-age=0'; + default_type application/octet-stream; + expires off; + proxy_no_cache 1; + } + error_page 404 /404.html; + gzip on; + gzip_vary on; + gzip_min_length 10240; + gzip_proxied expired no-cache no-store private auth; + gzip_types text/plain text/css text/xml text/javascript application/x-javascript application/xml; + gzip_disable "MSIE [1-6]\."; + +} \ No newline at end of file diff --git a/docker/dockerfiles/filedownload-dockerfile b/docker/dockerfiles/filedownload-dockerfile new file mode 100644 index 000000000..a1d95c98c --- /dev/null +++ b/docker/dockerfiles/filedownload-dockerfile @@ -0,0 +1,18 @@ +FROM node:16-alpine3.11 + +ENV PORT 8081 +ENV NODE_ENV production + +WORKDIR /usr/src/app + +COPY package*.json ./ + +RUN npm ci --only=production + +#USER node + +COPY --chown=node:node . . + +EXPOSE 8081 + +CMD [ "node", "./bin/www" ] diff --git a/docker/dockerfiles/frontend-cloudrun-dockerfile b/docker/dockerfiles/frontend-cloudrun-dockerfile new file mode 100644 index 000000000..743fa5da4 --- /dev/null +++ b/docker/dockerfiles/frontend-cloudrun-dockerfile @@ -0,0 +1,18 @@ +FROM nginx:alpine + +COPY nginx.conf /etc/nginx/conf.d/configfile.template + +ENV PORT 8080 + +ENV HOST 0.0.0.0 + +RUN sh -c "envsubst '\$PORT' < /etc/nginx/conf.d/configfile.template > /etc/nginx/conf.d/default.conf" + +COPY ./dist /usr/share/nginx/html + +EXPOSE 8080 + +CMD ["nginx", "-g", "daemon off;"] + + + diff --git a/docker/dockerfiles/frontend-ctdc-dockerfile b/docker/dockerfiles/frontend-ctdc-dockerfile new file mode 100644 index 000000000..58e3a3de6 --- /dev/null +++ b/docker/dockerfiles/frontend-ctdc-dockerfile @@ -0,0 +1,16 @@ +FROM nginx:alpine + +COPY nginx.conf /etc/nginx/conf.d/configfile.template + +ENV PORT 80 + +ENV HOST 0.0.0.0 + +RUN sh -c "envsubst '\$PORT' < /etc/nginx/conf.d/configfile.template > /etc/nginx/conf.d/default.conf" + +COPY ./dist /usr/share/nginx/html +COPY ./nginx-entrypoint.sh / + +EXPOSE 80 + +ENTRYPOINT [ "sh", "/nginx-entrypoint.sh" ] \ No newline at end of file diff --git a/docker/dockerfiles/frontend-dockerfile b/docker/dockerfiles/frontend-dockerfile new file mode 100644 index 000000000..476151938 --- /dev/null +++ b/docker/dockerfiles/frontend-dockerfile @@ -0,0 +1,4 @@ +FROM nginx:alpine + +COPY ./dist /usr/share/nginx/html + diff --git a/docker/dockerfiles/frontend-icdc-dockerfile b/docker/dockerfiles/frontend-icdc-dockerfile new file mode 100644 index 000000000..58e3a3de6 --- /dev/null +++ b/docker/dockerfiles/frontend-icdc-dockerfile @@ -0,0 +1,16 @@ +FROM nginx:alpine + +COPY nginx.conf /etc/nginx/conf.d/configfile.template + +ENV PORT 80 + +ENV HOST 0.0.0.0 + +RUN sh -c "envsubst '\$PORT' < /etc/nginx/conf.d/configfile.template > /etc/nginx/conf.d/default.conf" + +COPY ./dist /usr/share/nginx/html +COPY ./nginx-entrypoint.sh / + +EXPOSE 80 + +ENTRYPOINT [ "sh", "/nginx-entrypoint.sh" ] \ No newline at end of file diff --git a/docker/dockerfiles/frontend-ppdc-dockerfile b/docker/dockerfiles/frontend-ppdc-dockerfile new file mode 100644 index 000000000..58e3a3de6 --- /dev/null +++ b/docker/dockerfiles/frontend-ppdc-dockerfile @@ -0,0 +1,16 @@ +FROM nginx:alpine + +COPY nginx.conf /etc/nginx/conf.d/configfile.template + +ENV PORT 80 + +ENV HOST 0.0.0.0 + +RUN sh -c "envsubst '\$PORT' < /etc/nginx/conf.d/configfile.template > /etc/nginx/conf.d/default.conf" + +COPY ./dist /usr/share/nginx/html +COPY ./nginx-entrypoint.sh / + +EXPOSE 80 + +ENTRYPOINT [ "sh", "/nginx-entrypoint.sh" ] \ No newline at end of file diff --git a/docker/dockerfiles/github-actions-runner-dockerfile b/docker/dockerfiles/github-actions-runner-dockerfile new file mode 100644 index 000000000..7b37900ed --- /dev/null +++ b/docker/dockerfiles/github-actions-runner-dockerfile @@ -0,0 +1,31 @@ +FROM centos:7 + +ENV LANG=en_US.UTF-8 +ARG RUNNER_VERSION="2.283.3" + +RUN yum -y update && yum -y install epel-release +RUN yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo \ + && yum install -y https://download.docker.com/linux/centos/7/x86_64/stable/Packages/containerd.io-1.2.6-3.3.el7.x86_64.rpm \ + && yum -y install docker-ce docker-ce-cli containerd.io \ + && useradd -m bento \ + && usermod -aG docker bento + +RUN yum -y install jq python3 python3-pip git \ + && /usr/bin/pip3 install pip -U \ + && /usr/bin/pip3 install ansible==2.10.0 awscli jmespath docker boto botocore boto + +RUN yum clean all +RUN cd /home/bento && mkdir actions-runner && cd actions-runner \ + && curl -O -L https://github.com/actions/runner/releases/download/v${RUNNER_VERSION}/actions-runner-linux-x64-${RUNNER_VERSION}.tar.gz \ + && tar xzf ./actions-runner-linux-x64-${RUNNER_VERSION}.tar.gz + +RUN chown -R bento /home/bento && /home/bento/actions-runner/bin/installdependencies.sh + + + +COPY start.sh start.sh + +RUN chmod +x start.sh && chown bento start.sh +USER bento + +ENTRYPOINT ["./start.sh"] \ No newline at end of file diff --git a/docker/dockerfiles/icdc-nginx.conf b/docker/dockerfiles/icdc-nginx.conf new file mode 100644 index 000000000..81b681466 --- /dev/null +++ b/docker/dockerfiles/icdc-nginx.conf @@ -0,0 +1,24 @@ +server { + listen $PORT; + server_name localhost; + root /usr/share/nginx/html; + + location / { + root /usr/share/nginx/html; + index index.html index.htm; + try_files $uri $uri/ /index.html; + proxy_set_header Host $host; + } + location /nginx_status { + stub_status; + } + + error_page 404 /404.html; + gzip on; + gzip_vary on; + gzip_min_length 10240; + gzip_proxied expired no-cache no-store private auth; + gzip_types text/plain text/css text/xml text/javascript application/x-javascript application/xml; + gzip_disable "MSIE [1-6]\."; + +} \ No newline at end of file diff --git a/docker/jenkins-agent b/docker/dockerfiles/jenkins-agent similarity index 100% rename from docker/jenkins-agent rename to docker/dockerfiles/jenkins-agent diff --git a/docker/dockerfiles/jenkins-agent-ansible b/docker/dockerfiles/jenkins-agent-ansible new file mode 100644 index 000000000..53282ab09 --- /dev/null +++ b/docker/dockerfiles/jenkins-agent-ansible @@ -0,0 +1,9 @@ +FROM vdonkor/jenkins-agent + +MAINTAINER vincent donkor + +USER root +RUN dnf -y install python3 python3-pip && pip3 install ansible +USER jenkins + + \ No newline at end of file diff --git a/docker/jenkins-agent-maven b/docker/dockerfiles/jenkins-agent-maven similarity index 95% rename from docker/jenkins-agent-maven rename to docker/dockerfiles/jenkins-agent-maven index 7d3161c52..8c758b9ae 100644 --- a/docker/jenkins-agent-maven +++ b/docker/dockerfiles/jenkins-agent-maven @@ -6,7 +6,7 @@ ENV JAVA_HOME /usr/lib/jvm/jre-11-openjdk ENV MAVEN_HOME /usr/local/maven ENV PATH $MAVEN_HOME/bin:$JAVA_HOME/bin:$PATH -ARG MAVEN_VERSION=3.6.1 +ARG MAVEN_VERSION=3.6.3 USER root RUN yum -y install java-11-openjdk-devel wget which \ && wget https://www-us.apache.org/dist/maven/maven-3/$MAVEN_VERSION/binaries/apache-maven-$MAVEN_VERSION-bin.tar.gz -P /tmp \ diff --git a/docker/dockerfiles/jenkins-agent-microservices b/docker/dockerfiles/jenkins-agent-microservices new file mode 100644 index 000000000..99a1a1f0c --- /dev/null +++ b/docker/dockerfiles/jenkins-agent-microservices @@ -0,0 +1,41 @@ +FROM vdonkor/jenkins-agent + +MAINTAINER vincent donkor + +ENV JAVA_HOME /usr/lib/jvm/jre-11-openjdk +ENV MAVEN_HOME /usr/local/maven +ENV PATH $MAVEN_HOME/bin:$JAVA_HOME/bin:$PATH +ENV NODE_VERSION=15.13.0 +ARG MAVEN_VERSION=3.6.3 +USER root +RUN yum -y update \ + && yum -y install epel-release \ + && yum -y install java-11-openjdk-devel wget which yum-utils device-mapper-persistent-data lvm2 redis\ + && wget https://www-us.apache.org/dist/maven/maven-3/$MAVEN_VERSION/binaries/apache-maven-$MAVEN_VERSION-bin.tar.gz -P /tmp \ + && tar xf /tmp/apache-maven-$MAVEN_VERSION-bin.tar.gz -C /usr/local \ + && ln -s /usr/local/apache-maven-$MAVEN_VERSION /usr/local/maven + +RUN yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo \ + && yum install -y https://download.docker.com/linux/centos/7/x86_64/stable/Packages/containerd.io-1.2.6-3.3.el7.x86_64.rpm \ + && yum -y install docker-ce docker-ce-cli containerd.io \ + && curl -sL https://dl.yarnpkg.com/rpm/yarn.repo -o /etc/yum.repos.d/yarn.repo \ + && curl -sL https://rpm.nodesource.com/setup_15.x | bash - \ + && yum install -y nodejs \ + && npm install -g npm n\ + && npm install -g yarn\ + && n $NODE_VERSION \ + && usermod -aG docker jenkins + +RUN yum -y install jq python3 python3-pip \ + && /usr/bin/pip3 install pip -U \ + && /usr/bin/pip3 install ansible==2.9.0 awscli jmespath docker boto botocore boto3\ + && mkdir -p /var/cert +RUN yum clean all + + + + + + + + diff --git a/docker/dockerfiles/jenkins-agent-microservices-ccdc b/docker/dockerfiles/jenkins-agent-microservices-ccdc new file mode 100644 index 000000000..649c45aab --- /dev/null +++ b/docker/dockerfiles/jenkins-agent-microservices-ccdc @@ -0,0 +1,36 @@ +FROM vdonkor/jenkins-agent + +MAINTAINER vincent donkor + +ENV JAVA_HOME /usr/lib/jvm/jre-11-openjdk +ENV NODE_VERSION=14.18.1 +USER root +RUN yum -y update \ + && yum -y install epel-release \ + && yum -y install java-11-openjdk-devel wget which yum-utils device-mapper-persistent-data lvm2 + +RUN yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo \ + && yum install -y https://download.docker.com/linux/centos/7/x86_64/stable/Packages/containerd.io-1.2.6-3.3.el7.x86_64.rpm \ + && yum -y install docker-ce docker-ce-cli containerd.io \ + && curl -sL https://dl.yarnpkg.com/rpm/yarn.repo -o /etc/yum.repos.d/yarn.repo \ + && curl -sL https://rpm.nodesource.com/setup_15.x | bash - \ + && yum install -y nodejs \ + && npm install -g npm n\ + && npm install -g yarn\ + && n $NODE_VERSION \ + && curl --compressed -o- -L https://yarnpkg.com/install.sh | bash \ + && usermod -aG docker jenkins + +RUN yum -y install jq python3 python3-pip \ + && /usr/bin/pip3 install pip -U \ + && /usr/bin/pip3 install ansible==2.9.0 awscli jmespath docker boto botocore boto3\ + && mkdir -p /var/cert +RUN yum clean all + + + + + + + + diff --git a/docker/dockerfiles/jenkins-agent-microservices_ppdc b/docker/dockerfiles/jenkins-agent-microservices_ppdc new file mode 100644 index 000000000..87d72d14d --- /dev/null +++ b/docker/dockerfiles/jenkins-agent-microservices_ppdc @@ -0,0 +1,36 @@ +FROM vdonkor/jenkins-agent + +MAINTAINER vincent donkor + +ENV JAVA_HOME /usr/lib/jvm/jre-11-openjdk +ENV NODE_VERSION=14.17.0 +USER root +RUN yum -y update \ + && yum -y install epel-release \ + && yum -y install java-11-openjdk-devel wget which yum-utils device-mapper-persistent-data lvm2 + +RUN yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo \ + && yum install -y https://download.docker.com/linux/centos/7/x86_64/stable/Packages/containerd.io-1.2.6-3.3.el7.x86_64.rpm \ + && yum -y install docker-ce docker-ce-cli containerd.io \ + && curl -sL https://dl.yarnpkg.com/rpm/yarn.repo -o /etc/yum.repos.d/yarn.repo \ + && curl -sL https://rpm.nodesource.com/setup_15.x | bash - \ + && yum install -y nodejs \ + && npm install -g npm n\ + && npm install -g yarn\ + && n $NODE_VERSION \ + && curl --compressed -o- -L https://yarnpkg.com/install.sh | bash \ + && usermod -aG docker jenkins + +RUN yum -y install jq python3 python3-pip \ + && /usr/bin/pip3 install pip -U \ + && /usr/bin/pip3 install ansible==2.9.0 awscli jmespath docker boto botocore boto3\ + && mkdir -p /var/cert +RUN yum clean all + + + + + + + + diff --git a/docker/dockerfiles/jenkins-agent-scala b/docker/dockerfiles/jenkins-agent-scala new file mode 100644 index 000000000..a7548254e --- /dev/null +++ b/docker/dockerfiles/jenkins-agent-scala @@ -0,0 +1,31 @@ +FROM vdonkor/jenkins-agent + +MAINTAINER icdc devops + +ENV JAVA_HOME /usr/lib/jvm/jre-11-openjdk +ENV PATH $MAVEN_HOME/bin:$JAVA_HOME/bin:$PATH +ENV SCALA_VERSION=2.13.5 +USER root +RUN yum -y update \ + && yum -y install epel-release \ + && yum -y install java-11-openjdk-devel wget which yum-utils device-mapper-persistent-data lvm2\ + && yum -y install http://www.scala-lang.org/files/archive/scala-${SCALA_VERSION}.rpm \ + && curl https://bintray.com/sbt/rpm/rpm > /etc/yum.repos.d/sbt.repo + +RUN yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo \ + && yum install -y https://download.docker.com/linux/centos/7/x86_64/stable/Packages/containerd.io-1.2.6-3.3.el7.x86_64.rpm \ + && yum -y install docker-ce docker-ce-cli containerd.io \ + && yum -y install sbt + +RUN yum -y install jq python3 python3-pip \ + && /usr/bin/pip3 install pip -U \ + && /usr/bin/pip3 install ansible==2.9.0 awscli jmespath docker +RUN yum clean all + + + + + + + + diff --git a/docker/dockerfiles/jenkins-agent-selenium b/docker/dockerfiles/jenkins-agent-selenium new file mode 100644 index 000000000..6f77befcf --- /dev/null +++ b/docker/dockerfiles/jenkins-agent-selenium @@ -0,0 +1,17 @@ +FROM vdonkor/cicd-maven + +MAINTAINER vincent donkor + +USER root + +RUN cd /tmp \ + && echo -e "[google-chrome]\nname=google-chrome\nbaseurl=http://dl.google.com/linux/chrome/rpm/stable/x86_64\nenabled=1\ngpgcheck=1\ngpgkey=https://dl-ssl.google.com/linux/linux_signing_key.pub" | tee /etc/yum.repos.d/chrome.repo \ + && yum -y install unzip google-chrome-stable which xorg-x11-server-Xvfb unzip which zip libxi6 libgconf-2-4 libXfont \ + && yum -y groupinstall "Gnome Desktop" \ + && wget -N "https://chromedriver.storage.googleapis.com/76.0.3809.68/chromedriver_linux64.zip" -P /tmp/ \ + && unzip /tmp/chromedriver_linux64.zip -d /usr/local/bin/ \ + && rm /tmp/chromedriver_linux64.zip \ + && chmod 755 /usr/local/bin/chromedriver \ + && chmod 777 /run/user/ + + \ No newline at end of file diff --git a/docker/dockerfiles/new-base-tomcat9 b/docker/dockerfiles/new-base-tomcat9 new file mode 100644 index 000000000..774c3af97 --- /dev/null +++ b/docker/dockerfiles/new-base-tomcat9 @@ -0,0 +1,37 @@ +FROM cbiitssrepo/centos_base7.7 + +MAINTAINER icdc devops + +ENV JAVA_HOME /usr/lib/jvm/jre-11-openjdk +ENV CATALINA_HOME /usr/local/tomcat +ENV PATH $CATALINA_HOME/bin:$JAVA_HOME/bin:$PATH +ENV TOMCAT_MAJOR 9 +ENV JAVA_OPTS $JAVA_OPTS -javaagent:/usr/local/tomcat/newrelic/newrelic.jar -XX:InitialRAMPercentage=25 -XX:MaxRAMPercentage=70 +ENV NEW_RELIC_LOG /usr/local/tomcat/newrelic/logs +ENV TOMCAT_VERSION 9.0.31 +ENV TOMCAT_USER tomcat +ENV TOMCAT_GROUP tomcat +ENV TOMCAT_URL http://apache.cs.utah.edu/tomcat/tomcat-${TOMCAT_MAJOR}/v${TOMCAT_VERSION}/bin/apache-tomcat-${TOMCAT_VERSION}.tar.gz + +WORKDIR $CATALINA_HOME + +RUN yum -y update +RUN yum -y install java-11-openjdk wget which unzip \ + && groupadd -g 3001 tomcat \ + && useradd -u 3001 -g 3001 ${TOMCAT_USER} \ + && curl -O "http://download.newrelic.com/newrelic/java-agent/newrelic-agent/current/newrelic-java.zip" \ + && unzip newrelic-java.zip \ + && mkdir newrelic/logs \ + && rm -rf newrelic-java.zip \ + && mkdir -p "$CATALINA_HOME" \ + && wget -O tomcat.tar.gz $TOMCAT_URL \ + && tar -xf tomcat.tar.gz --strip-components=1 -C $CATALINA_HOME \ + && rm -rf tomcat.tar.gz \ + && chown -R ${TOMCAT_USER}:${TOMCAT_GROUP} ${CATALINA_HOME} \ + && chmod +x ${CATALINA_HOME}/bin/*.sh \ + && yum -y clean all + +USER ${TOMCAT_USER} + +EXPOSE 8080 +CMD ["catalina.sh","run"] \ No newline at end of file diff --git a/docker/dockerfiles/nginx-entrypoint.sh b/docker/dockerfiles/nginx-entrypoint.sh new file mode 100755 index 000000000..70875490e --- /dev/null +++ b/docker/dockerfiles/nginx-entrypoint.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +WWW_DIR=/usr/share/nginx/html +INJECT_FILE_SRC="${WWW_DIR}/inject.template.js" +INJECT_FILE_DST="${WWW_DIR}/injectEnv.js" +envsubst < "${INJECT_FILE_SRC}" > "${INJECT_FILE_DST}" + +[ -z "$@" ] && nginx -g 'daemon off;' || $@ \ No newline at end of file diff --git a/docker/dockerfiles/nginx-ppdc.conf b/docker/dockerfiles/nginx-ppdc.conf new file mode 100644 index 000000000..326354b1c --- /dev/null +++ b/docker/dockerfiles/nginx-ppdc.conf @@ -0,0 +1,21 @@ +server { + listen $PORT; + server_name localhost; + + location / { + root /usr/share/nginx/html; + index index.html index.htm; + try_files $uri /index.html; + } + location /nginx_status { + stub_status; + } + error_page 404 /404.html; + gzip on; + gzip_vary on; + gzip_min_length 10240; + gzip_proxied expired no-cache no-store private auth; + gzip_types text/plain text/css text/xml text/javascript application/x-javascript application/xml; + gzip_disable "MSIE [1-6]\."; + +} \ No newline at end of file diff --git a/docker/dockerfiles/nginx.conf b/docker/dockerfiles/nginx.conf new file mode 100644 index 000000000..6ec70bb84 --- /dev/null +++ b/docker/dockerfiles/nginx.conf @@ -0,0 +1,20 @@ +server { + listen $PORT; + server_name localhost; + root /usr/share/nginx/html; + + location / { + try_files $uri $uri/ /index.html; + } + location /nginx_status { + stub_status; + } + error_page 404 /404.html; + gzip on; + gzip_vary on; + gzip_min_length 10240; + gzip_proxied expired no-cache no-store private auth; + gzip_types text/plain text/css text/xml text/javascript application/x-javascript application/xml; + gzip_disable "MSIE [1-6]\."; + +} \ No newline at end of file diff --git a/docker/dockerfiles/open-target-backend b/docker/dockerfiles/open-target-backend new file mode 100644 index 000000000..d6f647a2d --- /dev/null +++ b/docker/dockerfiles/open-target-backend @@ -0,0 +1,21 @@ +FROM alpine:latest + +ENV JAVA_HOME="/usr/lib/jvm/default-jvm/" +RUN apk add openjdk11 bash curl + +ENV PATH=$PATH:${JAVA_HOME}/bin +ENV JAVA_OPTS="$JAVA_OPTS -javaagent:/usr/local/tomcat/newrelic/newrelic.jar" + +RUN cd /tmp \ +&& curl -O https://download.newrelic.com/newrelic/java-agent/newrelic-agent/current/newrelic-java.zip \ +&& unzip /tmp/newrelic-java.zip \ +&& mkdir -p /usr/local/tomcat/newrelic \ +&& cp /tmp/newrelic/newrelic.jar /usr/local/tomcat/newrelic/newrelic.jar \ +&& cp /tmp/newrelic/newrelic.yml /usr/local/tomcat/newrelic/newrelic.yml \ +&& sed -i -e "s/license_key:.*/license_key: ${NEW_RELIC_LICENSE_KEY}/g" /usr/local/tomcat/newrelic/newrelic.yml \ +&& sed -i -e "s/app_name:.*/app_name: ${NEW_RELIC_APP_NAME}/g" /usr/local/tomcat/newrelic/newrelic.yml + + +COPY app /app +EXPOSE 8080 +CMD /app/bin/start -Dhttp.port="8080" -Dplay.http.secret.key="chslslkdangethisssksksaosometkdakdhingsecretkslakdd" -Dconfig.file=/app/conf/production.conf -Dlogger.file=/app/conf/production.xml -Dlog4j2.formatMsgNoLookups=true -Dnewrelic.config.app_name='PPDC_AWS_BACKEND_DEV' -J-javaagent:/usr/local/tomcat/newrelic/newrelic.jar diff --git a/docker/dockerfiles/ppdc-otp-entrypoint.sh b/docker/dockerfiles/ppdc-otp-entrypoint.sh new file mode 100755 index 000000000..39699dc7d --- /dev/null +++ b/docker/dockerfiles/ppdc-otp-entrypoint.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +WWW_DIR=/usr/share/nginx/html +INJECT_FILE_SRC="${WWW_DIR}/inject.template.js" +INJECT_FILE_DST="${WWW_DIR}/config.js" +envsubst < "${INJECT_FILE_SRC}" > "${INJECT_FILE_DST}" + +[ -z "$@" ] && nginx -g 'daemon off;' || $@ \ No newline at end of file diff --git a/docker/dockerfiles/reverseproxy-dockerfile b/docker/dockerfiles/reverseproxy-dockerfile new file mode 100644 index 000000000..f7b4a98ef --- /dev/null +++ b/docker/dockerfiles/reverseproxy-dockerfile @@ -0,0 +1,3 @@ +FROM nginx +RUN rm /etc/nginx/conf.d/default.conf +ADD ./default.conf /etc/nginx/conf.d/ diff --git a/docker/dockerfiles/sbt.repo b/docker/dockerfiles/sbt.repo new file mode 100644 index 000000000..482539714 --- /dev/null +++ b/docker/dockerfiles/sbt.repo @@ -0,0 +1,7 @@ +#bintray--sbt-rpm - packages by from Bintray +[bintray--sbt-rpm] +name=bintray--sbt-rpm +baseurl=https://sbt.bintray.com/rpm +gpgcheck=0 +repo_gpgcheck=0 +enabled=1 diff --git a/docker/dockerfiles/start.sh b/docker/dockerfiles/start.sh new file mode 100755 index 000000000..7468c9abf --- /dev/null +++ b/docker/dockerfiles/start.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +REPO=$REPO +TOKEN=$TOKEN + +cd /home/bento/actions-runner + +./config.sh --url https://github.com/CBIIT/${REPO} --token ${TOKEN} + +cleanup() { + echo "Removing runner..." + ./config.sh remove --unattended --token ${REG_TOKEN} --name bento-runner-0 --labels bento-runner +} + +trap 'cleanup; exit 130' INT +trap 'cleanup; exit 143' TERM + +./run.sh & wait $! \ No newline at end of file diff --git a/docker/tomcat9 b/docker/dockerfiles/tomcat9 similarity index 62% rename from docker/tomcat9 rename to docker/dockerfiles/tomcat9 index b8876afde..554235b6d 100644 --- a/docker/tomcat9 +++ b/docker/dockerfiles/tomcat9 @@ -1,4 +1,4 @@ -FROM centos:latest +FROM centos:7 MAINTAINER icdc devops @@ -6,18 +6,19 @@ ENV JAVA_HOME /usr/lib/jvm/jre-11-openjdk ENV CATALINA_HOME /usr/local/tomcat ENV PATH $CATALINA_HOME/bin:$JAVA_HOME/bin:$PATH ENV TOMCAT_MAJOR 9 -ENV JAVA_OPTS $JAVA_OPTS -javaagent:/usr/local/tomcat/newrelic/newrelic.jar +ENV JAVA_OPTS $JAVA_OPTS -javaagent:/usr/local/tomcat/newrelic/newrelic.jar -XX:InitialRAMPercentage=25 -XX:MaxRAMPercentage=70 ENV NEW_RELIC_LOG /usr/local/tomcat/newrelic/logs -ENV TOMCAT_VERSION 9.0.20 -ENV TOMCAT_USER tomcat +ENV TOMCAT_VERSION 9.0.31 +ENV TOMCAT_USER tomcat ENV TOMCAT_GROUP tomcat -ENV TOMCAT_URL http://apache.mirror.gtcomm.net/tomcat/tomcat-${TOMCAT_MAJOR}/v${TOMCAT_VERSION}/bin/apache-tomcat-${TOMCAT_VERSION}.tar.gz +ENV TOMCAT_URL http://apache.cs.utah.edu/tomcat/tomcat-${TOMCAT_MAJOR}/v${TOMCAT_VERSION}/bin/apache-tomcat-${TOMCAT_VERSION}.tar.gz WORKDIR $CATALINA_HOME +RUN yum -y update RUN yum -y install java-11-openjdk wget which unzip \ - && groupadd -g 46 tomcat \ - && useradd -u 46 -g 46 ${TOMCAT_USER} \ + && groupadd -g 3001 tomcat \ + && useradd -u 3001 -g 3001 ${TOMCAT_USER} \ && curl -O "http://download.newrelic.com/newrelic/java-agent/newrelic-agent/current/newrelic-java.zip" \ && unzip newrelic-java.zip \ && mkdir newrelic/logs \ @@ -27,7 +28,8 @@ RUN yum -y install java-11-openjdk wget which unzip \ && tar -xf tomcat.tar.gz --strip-components=1 -C $CATALINA_HOME \ && rm -rf tomcat.tar.gz \ && chown -R ${TOMCAT_USER}:${TOMCAT_GROUP} ${CATALINA_HOME} \ - && chmod +x ${CATALINA_HOME}/bin/*.sh + && chmod +x ${CATALINA_HOME}/bin/*.sh \ + && yum -y clean all USER ${TOMCAT_USER} diff --git a/docker/jenkins-agent-ansible b/docker/jenkins-agent-ansible deleted file mode 100644 index 002f2f6c0..000000000 --- a/docker/jenkins-agent-ansible +++ /dev/null @@ -1,9 +0,0 @@ -FROM vdonkor/jenkins-agent - -MAINTAINER vincent donkor - -USER root -RUN yum install -y epel-release python-setuptools -RUN easy_install pip && pip install ansible - - \ No newline at end of file diff --git a/icrp/agents.yml b/icrp/agents.yml new file mode 100644 index 000000000..407974754 --- /dev/null +++ b/icrp/agents.yml @@ -0,0 +1,25 @@ +--- +- name: setup ecs agent + hosts: all + connection: local + become: yes + gather_facts: yes + + tasks: + - name: install newrelic + include_role: + name: newrelic + - name: define logs when app is ecs + set_fact: + additional_logs: + - name: "{{ env }} ECS Logs" + description: "{{ env }} ECS logs" + category: "{{ env }}/ecs/logs" + path: "/var/log/ecs/ecs-agent.log" + filters: "" + when: app == "ecs" + + - name: install sumologic + include_role: + name: sumologic + \ No newline at end of file diff --git a/icrp/ansible.cfg b/icrp/ansible.cfg new file mode 100644 index 000000000..dc7559a21 --- /dev/null +++ b/icrp/ansible.cfg @@ -0,0 +1,4 @@ +[defaults] +validate_certs = no +ansible_server_cert_validation = no +inventory = ./hosts \ No newline at end of file diff --git a/icrp/bento-data-loader.yml b/icrp/bento-data-loader.yml new file mode 100644 index 000000000..0f2b4fb3f --- /dev/null +++ b/icrp/bento-data-loader.yml @@ -0,0 +1,11 @@ +--- +- name: load data to neo4j db + hosts: all + connection: local + gather_facts: yes + + tasks: + - name: perform data loading + include_role: + name: data-loader + # tasks_from: icdc-data-loader \ No newline at end of file diff --git a/icrp/community-neo4j.yml b/icrp/community-neo4j.yml new file mode 100644 index 000000000..39aa16f79 --- /dev/null +++ b/icrp/community-neo4j.yml @@ -0,0 +1,11 @@ +--- +- name: setup neo4j database + hosts: all + connection: local + become: yes + gather_facts: yes + + roles: + - common + - community-neo4j + diff --git a/icrp/deploy-comets.yml b/icrp/deploy-comets.yml new file mode 100644 index 000000000..8ba773eab --- /dev/null +++ b/icrp/deploy-comets.yml @@ -0,0 +1,17 @@ +--- +- name: setup ecs agent + hosts: all + connection: local + become: yes + gather_facts: yes + + tasks: + - name: build comets + include_role: + name: deploy-comets + tasks_from: build + - name: deploy comets + include_role: + name: deploy-comets + tasks_from: deploy + diff --git a/icrp/deploy-custodian.yml b/icrp/deploy-custodian.yml new file mode 100644 index 000000000..37c65fdd5 --- /dev/null +++ b/icrp/deploy-custodian.yml @@ -0,0 +1,17 @@ +--- +- name: setup deploy custodian + hosts: all + connection: local + become: yes + gather_facts: yes + + tasks: + - name: build custodian + include_role: + name: deploy-custodian + tasks_from: build + - name: deploy custodian + include_role: + name: deploy-custodian + tasks_from: deploy + diff --git a/icrp/ecs-agent.yml b/icrp/ecs-agent.yml new file mode 100644 index 000000000..e1166b7a4 --- /dev/null +++ b/icrp/ecs-agent.yml @@ -0,0 +1,12 @@ +--- +- name: setup ecs agent + hosts: all + connection: local + become: yes + gather_facts: yes + + roles: + - common + - docker + - ecs-agent + \ No newline at end of file diff --git a/icrp/frontend.zip b/icrp/frontend.zip new file mode 100644 index 000000000..8a8727ef3 Binary files /dev/null and b/icrp/frontend.zip differ diff --git a/icrp/group_vars/all.yml b/icrp/group_vars/all.yml new file mode 100644 index 000000000..e69de29bb diff --git a/icrp/group_vars/k9dc.yml b/icrp/group_vars/k9dc.yml new file mode 100644 index 000000000..52fdacfb7 --- /dev/null +++ b/icrp/group_vars/k9dc.yml @@ -0,0 +1,7 @@ +collector_name: "{{ env }}-k9dc" +additional_logs: + - name: "{{ env }} k9dc Logs" + description: "{{ env }} k9dc logs" + category: "{{env }}/app/k9dc" + path: "/local/k9dc/logs/*.log" + filters: "" \ No newline at end of file diff --git a/icrp/group_vars/neo4j.yml b/icrp/group_vars/neo4j.yml new file mode 100644 index 000000000..de27a2986 --- /dev/null +++ b/icrp/group_vars/neo4j.yml @@ -0,0 +1,7 @@ +collector_name: "{{ env }}-neo4j" +additional_logs: + - name: "{{ env }} Neo4j Logs" + description: "{{ env }} neo4j logs" + category: "{{env }}/db/neo4j" + path: "/var/log/neo4j/*.log" + filters: "" \ No newline at end of file diff --git a/icrp/hosts b/icrp/hosts new file mode 100644 index 000000000..37f0e0f67 --- /dev/null +++ b/icrp/hosts @@ -0,0 +1,2 @@ +[all] +127.0.0.1 \ No newline at end of file diff --git a/icrp/jenkins.yml b/icrp/jenkins.yml new file mode 100644 index 000000000..6f81e9075 --- /dev/null +++ b/icrp/jenkins.yml @@ -0,0 +1,13 @@ +--- +- name: setup jenkins + hosts: all + connection: local + become: yes + gather_facts: yes + + roles: + - common + - docker + - jenkins + + diff --git a/icrp/neo4j.yml b/icrp/neo4j.yml new file mode 100644 index 000000000..61398c2e0 --- /dev/null +++ b/icrp/neo4j.yml @@ -0,0 +1,11 @@ +--- +- name: setup neo4j database + hosts: all + connection: local + become: yes + gather_facts: yes + + roles: + - common + - neo4j + \ No newline at end of file diff --git a/icrp/roles/common/README.md b/icrp/roles/common/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/icrp/roles/common/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/icrp/roles/common/defaults/main.yml b/icrp/roles/common/defaults/main.yml new file mode 100644 index 000000000..fa3055099 --- /dev/null +++ b/icrp/roles/common/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for common \ No newline at end of file diff --git a/icrp/roles/common/handlers/main.yml b/icrp/roles/common/handlers/main.yml new file mode 100644 index 000000000..c6a8f0c7b --- /dev/null +++ b/icrp/roles/common/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for common \ No newline at end of file diff --git a/icrp/roles/common/meta/main.yml b/icrp/roles/common/meta/main.yml new file mode 100644 index 000000000..5d50bf41b --- /dev/null +++ b/icrp/roles/common/meta/main.yml @@ -0,0 +1,60 @@ +galaxy_info: + author: your name + description: your description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Some suggested licenses: + # - BSD (default) + # - MIT + # - GPLv2 + # - GPLv3 + # - Apache + # - CC-BY + license: license (GPLv2, CC-BY, etc) + + min_ansible_version: 2.4 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # Optionally specify the branch Galaxy will use when accessing the GitHub + # repo for this role. During role install, if no tags are available, + # Galaxy will use this branch. During import Galaxy will access files on + # this branch. If Travis integration is configured, only notifications for this + # branch will be accepted. Otherwise, in all cases, the repo's default branch + # (usually master) will be used. + #github_branch: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. \ No newline at end of file diff --git a/icrp/roles/common/tasks/main.yml b/icrp/roles/common/tasks/main.yml new file mode 100644 index 000000000..a97f88f47 --- /dev/null +++ b/icrp/roles/common/tasks/main.yml @@ -0,0 +1,10 @@ +--- +# tasks file for common +- name: Set timezone to America/New_York + timezone: + name: America/New_York + +# - name: set hostname +# hostname: +# name: "{{ hostvars[inventory_hostname].group_names[0] }}-{{ env }}" + \ No newline at end of file diff --git a/icrp/roles/common/tests/inventory b/icrp/roles/common/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/icrp/roles/common/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/icrp/roles/common/tests/test.yml b/icrp/roles/common/tests/test.yml new file mode 100644 index 000000000..8d24282da --- /dev/null +++ b/icrp/roles/common/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - common \ No newline at end of file diff --git a/icrp/roles/common/vars/main.yml b/icrp/roles/common/vars/main.yml new file mode 100644 index 000000000..feaa92f9b --- /dev/null +++ b/icrp/roles/common/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for common \ No newline at end of file diff --git a/icrp/roles/community-neo4j/README.md b/icrp/roles/community-neo4j/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/icrp/roles/community-neo4j/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/icrp/roles/community-neo4j/defaults/main.yml b/icrp/roles/community-neo4j/defaults/main.yml new file mode 100644 index 000000000..797e2b4c3 --- /dev/null +++ b/icrp/roles/community-neo4j/defaults/main.yml @@ -0,0 +1,6 @@ +--- +# defaults file for neo4j +newrelic: yes +neo4j_home: /var/lib/neo4j +neo4j_version: 3.5.21 +graphql_version: 3.5.15.5 diff --git a/icrp/roles/community-neo4j/handlers/main.yml b/icrp/roles/community-neo4j/handlers/main.yml new file mode 100644 index 000000000..0168d6d56 --- /dev/null +++ b/icrp/roles/community-neo4j/handlers/main.yml @@ -0,0 +1,12 @@ +--- +# handlers file for neo4j +- name: restart_neo4j + service: + name: neo4j + state: restarted + +- name: restart_firewalld + service: + name: firewalld + state: started + enabled: yes diff --git a/icrp/roles/community-neo4j/meta/main.yml b/icrp/roles/community-neo4j/meta/main.yml new file mode 100644 index 000000000..5d50bf41b --- /dev/null +++ b/icrp/roles/community-neo4j/meta/main.yml @@ -0,0 +1,60 @@ +galaxy_info: + author: your name + description: your description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Some suggested licenses: + # - BSD (default) + # - MIT + # - GPLv2 + # - GPLv3 + # - Apache + # - CC-BY + license: license (GPLv2, CC-BY, etc) + + min_ansible_version: 2.4 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # Optionally specify the branch Galaxy will use when accessing the GitHub + # repo for this role. During role install, if no tags are available, + # Galaxy will use this branch. During import Galaxy will access files on + # this branch. If Travis integration is configured, only notifications for this + # branch will be accepted. Otherwise, in all cases, the repo's default branch + # (usually master) will be used. + #github_branch: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. \ No newline at end of file diff --git a/icrp/roles/community-neo4j/tasks/main.yml b/icrp/roles/community-neo4j/tasks/main.yml new file mode 100644 index 000000000..5a0797101 --- /dev/null +++ b/icrp/roles/community-neo4j/tasks/main.yml @@ -0,0 +1,114 @@ +--- +# tasks file for neo4j +- name: install systems packages + yum: + name: + - firewalld + - epel-release + - unzip + - wget + - java-11-openjdk-devel + state: latest + disable_gpg_check: yes + +- name: enable and start firewalld + service: + name: firewalld + state: started + enabled: yes + +- name: create neo4j group + group: + name: neo4j + state: present + +- name: create neo4j user + user: + name: neo4j + groups: neo4j + append: yes + +- name: create {{neo4j_home}} + file: + path: "{{neo4j_home}}" + state: directory + +- name: download neo4j tar ball + get_url: + url: https://dist.neo4j.org/neo4j-community-{{neo4j_version}}-unix.tar.gz + dest: /tmp/neo4j-community.tar.gz + +- name: extract neo4j + unarchive: + src: /tmp/neo4j-community.tar.gz + dest: "{{neo4j_home}}" + remote_src: yes + extra_opts: [--strip-components=1] + +- name: change permission on /var/lib/neo4j/data + file: + path: "{{neo4j_home}}/{{item}}" + owner: neo4j + group: neo4j + recurse: yes + mode: '777' + loop: + - data + - logs + +- name: add DefaultLimitNOFILE=60000 to /etc/systemd/user.conf + lineinfile: + path: /etc/systemd/user.conf + line: 'DefaultLimitNOFILE=60000' + regex: '#DefaultLimitNOFILE=' + state: present + +- name: copy neo4j.conf and service "{{neo4j_home}}" + template: + src: "{{item.src}}" + dest: "{{item.dest}}" + owner: neo4j + group: neo4j + loop: + - { src: 'neo4j.conf.j2', dest: "{{neo4j_home}}/conf/neo4j.conf" } + - { src: 'neo4j.service.j2', dest: "/usr/lib/systemd/system/neo4j.service" } + notify: restart_neo4j + +- name: change ownership of /var/lib/neo4j + file: + path: "{{neo4j_home}}" + owner: neo4j + group: neo4j + recurse: yes + +- name: reload system daemon + systemd: + daemon_reload: yes + +- name: start and enable neo4j service + service: + name: neo4j + state: started + enabled: yes + +- name: open neo4j data ports + firewalld: + port: "{{item}}/tcp" + zone: public + immediate: yes + permanent: yes + state: enabled + loop: + - 7474 + - 7473 + - 7687 + notify: restart_firewalld + +- name: download neo4j-graphql plugins + get_url: + url: https://github.com/neo4j-graphql/neo4j-graphql/releases/download/{{graphql_version}}/neo4j-graphql-{{graphql_version}}.jar + dest: "{{neo4j_home}}/plugins" + owner: neo4j + group: neo4j + + diff --git a/icrp/roles/community-neo4j/templates/neo4j.conf.j2 b/icrp/roles/community-neo4j/templates/neo4j.conf.j2 new file mode 100644 index 000000000..cf8e84b85 --- /dev/null +++ b/icrp/roles/community-neo4j/templates/neo4j.conf.j2 @@ -0,0 +1,360 @@ +#***************************************************************** +# Neo4j configuration +# +# For more details and a complete list of settings, please see +# https://neo4j.com/docs/operations-manual/3.5/reference/configuration-settings/ +#***************************************************************** + +# The name of the database to mount +#dbms.active_database=graph.db + +# Paths of directories in the installation. +#dbms.directories.data=data +#dbms.directories.plugins=plugins +#dbms.directories.certificates=certificates +#dbms.directories.logs=logs +#dbms.directories.lib=lib +#dbms.directories.run=run + +# This setting constrains all `LOAD CSV` import files to be under the `import` directory. Remove or comment it out to +# allow files to be loaded from anywhere in the filesystem; this introduces possible security problems. See the +# `LOAD CSV` section of the manual for details. +dbms.directories.import=import + +# Whether requests to Neo4j are authenticated. +# To disable authentication, uncomment this line +#dbms.security.auth_enabled=false + +# Enable this to be able to upgrade a store from an older version. +#dbms.allow_upgrade=true + +# Java Heap Size: by default the Java heap size is dynamically +# calculated based on available system resources. +# Uncomment these lines to set specific initial and maximum +# heap size. +dbms.memory.heap.initial_size=512m +dbms.memory.heap.max_size=512m + +# The amount of memory to use for mapping the store files, in bytes (or +# kilobytes with the 'k' suffix, megabytes with 'm' and gigabytes with 'g'). +# If Neo4j is running on a dedicated server, then it is generally recommended +# to leave about 2-4 gigabytes for the operating system, give the JVM enough +# heap to hold all your transaction state and query context, and then leave the +# rest for the page cache. +# The default page cache memory assumes the machine is dedicated to running +# Neo4j, and is heuristically set to 50% of RAM minus the max Java heap size. +# dbms.memory.pagecache.size=10g + +#***************************************************************** +# Network connector configuration +#***************************************************************** + +# With default configuration Neo4j only accepts local connections. +# To accept non-local connections, uncomment this line: +dbms.connectors.default_listen_address=0.0.0.0 + +# You can also choose a specific network interface, and configure a non-default +# port for each connector, by setting their individual listen_address. + +# The address at which this server can be reached by its clients. This may be the server's IP address or DNS name, or +# it may be the address of a reverse proxy which sits in front of the server. This setting may be overridden for +# individual connectors below. +#dbms.connectors.default_advertised_address=localhost + +# You can also choose a specific advertised hostname or IP address, and +# configure an advertised port for each connector, by setting their +# individual advertised_address. + +# Bolt connector +dbms.connector.bolt.enabled=true +#dbms.connector.bolt.tls_level=OPTIONAL +dbms.connector.bolt.listen_address=:7687 + +# HTTP Connector. There can be zero or one HTTP connectors. +dbms.connector.http.enabled=true +#dbms.connector.http.listen_address=:7474 + +# HTTPS Connector. There can be zero or one HTTPS connectors. +dbms.connector.https.enabled=true +#dbms.connector.https.listen_address=:7473 + +# Number of Neo4j worker threads. +#dbms.threads.worker_count= + +#***************************************************************** +# SSL system configuration +#***************************************************************** + +# Names of the SSL policies to be used for the respective components. + +# The legacy policy is a special policy which is not defined in +# the policy configuration section, but rather derives from +# dbms.directories.certificates and associated files +# (by default: neo4j.key and neo4j.cert). Its use will be deprecated. + +# The policies to be used for connectors. +# +# N.B: Note that a connector must be configured to support/require +# SSL/TLS for the policy to actually be utilized. +# +# see: dbms.connector.*.tls_level + +#bolt.ssl_policy=legacy +#https.ssl_policy=legacy + +#***************************************************************** +# SSL policy configuration +#***************************************************************** + +# Each policy is configured under a separate namespace, e.g. +# dbms.ssl.policy..* +# +# The example settings below are for a new policy named 'default'. + +# The base directory for cryptographic objects. Each policy will by +# default look for its associated objects (keys, certificates, ...) +# under the base directory. +# +# Every such setting can be overridden using a full path to +# the respective object, but every policy will by default look +# for cryptographic objects in its base location. +# +# Mandatory setting + +#dbms.ssl.policy.default.base_directory=certificates/default + +# Allows the generation of a fresh private key and a self-signed +# certificate if none are found in the expected locations. It is +# recommended to turn this off again after keys have been generated. +# +# Keys should in general be generated and distributed offline +# by a trusted certificate authority (CA) and not by utilizing +# this mode. + +#dbms.ssl.policy.default.allow_key_generation=false + +# Enabling this makes it so that this policy ignores the contents +# of the trusted_dir and simply resorts to trusting everything. +# +# Use of this mode is discouraged. It would offer encryption but no security. + +#dbms.ssl.policy.default.trust_all=false + +# The private key for the default SSL policy. By default a file +# named private.key is expected under the base directory of the policy. +# It is mandatory that a key can be found or generated. + +#dbms.ssl.policy.default.private_key= + +# The private key for the default SSL policy. By default a file +# named public.crt is expected under the base directory of the policy. +# It is mandatory that a certificate can be found or generated. + +#dbms.ssl.policy.default.public_certificate= + +# The certificates of trusted parties. By default a directory named +# 'trusted' is expected under the base directory of the policy. It is +# mandatory to create the directory so that it exists, because it cannot +# be auto-created (for security purposes). +# +# To enforce client authentication client_auth must be set to 'require'! + +#dbms.ssl.policy.default.trusted_dir= + +# Client authentication setting. Values: none, optional, require +# The default is to require client authentication. +# +# Servers are always authenticated unless explicitly overridden +# using the trust_all setting. In a mutual authentication setup this +# should be kept at the default of require and trusted certificates +# must be installed in the trusted_dir. + +#dbms.ssl.policy.default.client_auth=require + +# It is possible to verify the hostname that the client uses +# to connect to the remote server. In order for this to work, the server public +# certificate must have a valid CN and/or matching Subject Alternative Names. + +# Note that this is irrelevant on host side connections (sockets receiving +# connections). + +# To enable hostname verification client side on nodes, set this to true. + +#dbms.ssl.policy.default.verify_hostname=false + +# A comma-separated list of allowed TLS versions. +# By default only TLSv1.2 is allowed. + +#dbms.ssl.policy.default.tls_versions= + +# A comma-separated list of allowed ciphers. +# The default ciphers are the defaults of the JVM platform. + +#dbms.ssl.policy.default.ciphers= + +#***************************************************************** +# Logging configuration +#***************************************************************** + +# To enable HTTP logging, uncomment this line +#dbms.logs.http.enabled=true + +# Number of HTTP logs to keep. +#dbms.logs.http.rotation.keep_number=5 + +# Size of each HTTP log that is kept. +#dbms.logs.http.rotation.size=20m + +# To enable GC Logging, uncomment this line +#dbms.logs.gc.enabled=true + +# GC Logging Options +# see http://docs.oracle.com/cd/E19957-01/819-0084-10/pt_tuningjava.html#wp57013 for more information. +#dbms.logs.gc.options=-XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCApplicationStoppedTime -XX:+PrintPromotionFailure -XX:+PrintTenuringDistribution + +# For Java 9 and newer GC Logging Options +# see https://docs.oracle.com/javase/10/tools/java.htm#JSWOR-GUID-BE93ABDC-999C-4CB5-A88B-1994AAAC74D5 +#dbms.logs.gc.options=-Xlog:gc*,safepoint,age*=trace + +# Number of GC logs to keep. +#dbms.logs.gc.rotation.keep_number=5 + +# Size of each GC log that is kept. +#dbms.logs.gc.rotation.size=20m + +# Log level for the debug log. One of DEBUG, INFO, WARN and ERROR. Be aware that logging at DEBUG level can be very verbose. +#dbms.logs.debug.level=INFO + +# Size threshold for rotation of the debug log. If set to zero then no rotation will occur. Accepts a binary suffix "k", +# "m" or "g". +#dbms.logs.debug.rotation.size=20m + +# Maximum number of history files for the internal log. +#dbms.logs.debug.rotation.keep_number=7 + +#***************************************************************** +# Miscellaneous configuration +#***************************************************************** + +# Enable this to specify a parser other than the default one. +#cypher.default_language_version=2.3 + +# Determines if Cypher will allow using file URLs when loading data using +# `LOAD CSV`. Setting this value to `false` will cause Neo4j to fail `LOAD CSV` +# clauses that load data from the file system. +#dbms.security.allow_csv_import_from_file_urls=true + + +# Value of the Access-Control-Allow-Origin header sent over any HTTP or HTTPS +# connector. This defaults to '*', which allows broadest compatibility. Note +# that any URI provided here limits HTTP/HTTPS access to that URI only. +#dbms.security.http_access_control_allow_origin=* + +# Value of the HTTP Strict-Transport-Security (HSTS) response header. This header +# tells browsers that a webpage should only be accessed using HTTPS instead of HTTP. +# It is attached to every HTTPS response. Setting is not set by default so +# 'Strict-Transport-Security' header is not sent. Value is expected to contain +# directives like 'max-age', 'includeSubDomains' and 'preload'. +#dbms.security.http_strict_transport_security= + +# Retention policy for transaction logs needed to perform recovery and backups. +dbms.tx_log.rotation.retention_policy=1 days + +# Only allow read operations from this Neo4j instance. This mode still requires +# write access to the directory for lock purposes. +#dbms.read_only=false + +# Comma separated list of JAX-RS packages containing JAX-RS resources, one +# package name for each mountpoint. The listed package names will be loaded +# under the mountpoints specified. Uncomment this line to mount the +# org.neo4j.examples.server.unmanaged.HelloWorldResource.java from +# neo4j-server-examples under /examples/unmanaged, resulting in a final URL of +# http://localhost:7474/examples/unmanaged/helloworld/{nodeId} +#dbms.unmanaged_extension_classes=org.neo4j.examples.server.unmanaged=/examples/unmanaged + +# A comma separated list of procedures and user defined functions that are allowed +# full access to the database through unsupported/insecure internal APIs. +#dbms.security.procedures.unrestricted=my.extensions.example,my.procedures.* + +# A comma separated list of procedures to be loaded by default. +# Leaving this unconfigured will load all procedures found. +#dbms.security.procedures.whitelist=apoc.coll.*,apoc.load.* + +#******************************************************************** +# JVM Parameters +#******************************************************************** + +# G1GC generally strikes a good balance between throughput and tail +# latency, without too much tuning. +dbms.jvm.additional=-XX:+UseG1GC + +# Have common exceptions keep producing stack traces, so they can be +# debugged regardless of how often logs are rotated. +dbms.jvm.additional=-XX:-OmitStackTraceInFastThrow + +# Make sure that `initmemory` is not only allocated, but committed to +# the process, before starting the database. This reduces memory +# fragmentation, increasing the effectiveness of transparent huge +# pages. It also reduces the possibility of seeing performance drop +# due to heap-growing GC events, where a decrease in available page +# cache leads to an increase in mean IO response time. +# Try reducing the heap memory, if this flag degrades performance. +dbms.jvm.additional=-XX:+AlwaysPreTouch + +# Trust that non-static final fields are really final. +# This allows more optimizations and improves overall performance. +# NOTE: Disable this if you use embedded mode, or have extensions or dependencies that may use reflection or +# serialization to change the value of final fields! +dbms.jvm.additional=-XX:+UnlockExperimentalVMOptions +dbms.jvm.additional=-XX:+TrustFinalNonStaticFields + +# Disable explicit garbage collection, which is occasionally invoked by the JDK itself. +dbms.jvm.additional=-XX:+DisableExplicitGC + +# Remote JMX monitoring, uncomment and adjust the following lines as needed. Absolute paths to jmx.access and +# jmx.password files are required. +# Also make sure to update the jmx.access and jmx.password files with appropriate permission roles and passwords, +# the shipped configuration contains only a read only role called 'monitor' with password 'Neo4j'. +# For more details, see: http://download.oracle.com/javase/8/docs/technotes/guides/management/agent.html +# On Unix based systems the jmx.password file needs to be owned by the user that will run the server, +# and have permissions set to 0600. +# For details on setting these file permissions on Windows see: +# http://docs.oracle.com/javase/8/docs/technotes/guides/management/security-windows.html +#dbms.jvm.additional=-Dcom.sun.management.jmxremote.port=3637 +#dbms.jvm.additional=-Dcom.sun.management.jmxremote.authenticate=true +#dbms.jvm.additional=-Dcom.sun.management.jmxremote.ssl=false +#dbms.jvm.additional=-Dcom.sun.management.jmxremote.password.file=/absolute/path/to/conf/jmx.password +#dbms.jvm.additional=-Dcom.sun.management.jmxremote.access.file=/absolute/path/to/conf/jmx.access + +# Some systems cannot discover host name automatically, and need this line configured: +#dbms.jvm.additional=-Djava.rmi.server.hostname=$THE_NEO4J_SERVER_HOSTNAME + +# Expand Diffie Hellman (DH) key size from default 1024 to 2048 for DH-RSA cipher suites used in server TLS handshakes. +# This is to protect the server from any potential passive eavesdropping. +dbms.jvm.additional=-Djdk.tls.ephemeralDHKeySize=2048 + +# This mitigates a DDoS vector. +dbms.jvm.additional=-Djdk.tls.rejectClientInitiatedRenegotiation=true + +# This filter prevents deserialization of arbitrary objects via java object serialization, addressing potential vulnerabilities. +# By default this filter whitelists all neo4j classes, as well as classes from the hazelcast library and the java standard library. +# These defaults should only be modified by expert users! +# For more details (including filter syntax) see: https://openjdk.java.net/jeps/290 +#dbms.jvm.additional=-Djdk.serialFilter=java.**;org.neo4j.**;com.neo4j.**;com.hazelcast.**;net.sf.ehcache.Element;com.sun.proxy.*;org.openjdk.jmh.**;!* + +#******************************************************************** +# Wrapper Windows NT/2000/XP Service Properties +#******************************************************************** +# WARNING - Do not modify any of these properties when an application +# using this configuration file has been installed as a service. +# Please uninstall the service before modifying this section. The +# service can then be reinstalled. + +# Name of the service +dbms.windows_service_name=neo4j + +#******************************************************************** +# Other Neo4j system properties +#******************************************************************** +dbms.jvm.additional=-Dunsupported.dbms.udc.source=tarball +dbms.unmanaged_extension_classes=org.neo4j.graphql=/graphql diff --git a/icrp/roles/community-neo4j/templates/neo4j.service.j2 b/icrp/roles/community-neo4j/templates/neo4j.service.j2 new file mode 100644 index 000000000..a829bd81a --- /dev/null +++ b/icrp/roles/community-neo4j/templates/neo4j.service.j2 @@ -0,0 +1,16 @@ +[Unit] +Description=Neo4j Graph Database +After=network-online.target +Wants=network-online.target + +[Service] +ExecStart=/var/lib/neo4j/bin/neo4j console +Restart=on-failure +User=neo4j +Group=neo4j +Environment="NEO4J_CONF=/var/lib/neo4j/conf" "NEO4J_HOME=/var/lib/neo4j" +LimitNOFILE=60000 +TimeoutSec=120 + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/icrp/roles/community-neo4j/tests/inventory b/icrp/roles/community-neo4j/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/icrp/roles/community-neo4j/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/icrp/roles/community-neo4j/tests/test.yml b/icrp/roles/community-neo4j/tests/test.yml new file mode 100644 index 000000000..ba5c658c4 --- /dev/null +++ b/icrp/roles/community-neo4j/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - neo4j \ No newline at end of file diff --git a/icrp/roles/community-neo4j/vars/main.yml b/icrp/roles/community-neo4j/vars/main.yml new file mode 100644 index 000000000..b531af7dd --- /dev/null +++ b/icrp/roles/community-neo4j/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for neo4j diff --git a/icrp/roles/data-loader/README.md b/icrp/roles/data-loader/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/icrp/roles/data-loader/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/icrp/roles/data-loader/defaults/main.yml b/icrp/roles/data-loader/defaults/main.yml new file mode 100644 index 000000000..817639564 --- /dev/null +++ b/icrp/roles/data-loader/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for data-processing \ No newline at end of file diff --git a/icrp/roles/data-loader/handlers/main.yml b/icrp/roles/data-loader/handlers/main.yml new file mode 100644 index 000000000..6d97e77fa --- /dev/null +++ b/icrp/roles/data-loader/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for data-processing \ No newline at end of file diff --git a/icrp/roles/data-loader/meta/main.yml b/icrp/roles/data-loader/meta/main.yml new file mode 100644 index 000000000..3a212a936 --- /dev/null +++ b/icrp/roles/data-loader/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.4 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/icrp/roles/data-loader/tasks/main.yml b/icrp/roles/data-loader/tasks/main.yml new file mode 100644 index 000000000..81b09ca1d --- /dev/null +++ b/icrp/roles/data-loader/tasks/main.yml @@ -0,0 +1,65 @@ +--- +- name: Check if workspace exists + stat: + path: "{{workspace}}" + register: bento_result + +- name: remove old bento + file: + path: "{{workspace}}" + state: absent + when: bento_result.stat.exists + +- name: clone dataloader + git: + repo: 'https://github.com/CBIIT/icdc-dataloader' + dest: "{{workspace}}/icdc-dataloader" + track_submodules: yes + update: yes + version: "Bento" + +- name: clone bento-custodian + git: + repo: 'https://github.com/CBIIT/bento-custodian' + dest: "{{workspace}}/bento-custodian" + +- name: update settings + template: + dest: "{{workspace}}/icdc-dataloader/config.yml" + src: config.yml.j2 + +- name: install python3 + yum: + name: python3 + state: installed + +- name: pip install requirements + pip: + requirements: "{{workspace}}/icdc-dataloader/requirements.txt" + executable: pip3 + +- name: set neo4j password + uri: + url: http://127.0.0.1:7474/user/neo4j/password + user: neo4j + password: "{{neo4j_password}}" + method: POST + body: > + {"password":"{{neo4j_password}}"} + body_format: json + headers: + Accept: "application/json" + Content-Type: "application/json" + +- name: loader data + shell: + cmd: > + python3 + loader.py + {{workspace}}/icdc-dataloader/config.yml + chdir: "{{workspace}}/icdc-dataloader" + register: data_loader + +- name: show dataloader output + debug: + msg: "{{data_loader}}" diff --git a/icrp/roles/data-loader/templates/config.yml.j2 b/icrp/roles/data-loader/templates/config.yml.j2 new file mode 100644 index 000000000..c94d38de5 --- /dev/null +++ b/icrp/roles/data-loader/templates/config.yml.j2 @@ -0,0 +1,42 @@ +Config: + temp_folder: tmp + backup_folder: /tmp/data-loader-backups + + neo4j: + # Location of Neo4j server, e.g., bolt://127.0.0.1:7687 + uri: bolt://127.0.0.1:7687 + # Neo4j username + user: neo4j + # Neo4j password + password: {{neo4j_password}} + + # Schema files' locations + schema: + - {{workspace}}/bento-custodian/data/model-desc/bento_model_file.yaml + - {{workspace}}/bento-custodian/data/model-desc/bento_model_properties.yaml + + #Property file location + prop_file: {{workspace}}/bento-custodian/data/config/props-bento-ext.yml + + # Skip validations, aka. Cheat Mode + cheat_mode: false + # Validations only, skip loading + dry_run: false + # Wipe out database before loading, you'll lose all data! + wipe_db: false + # Skip backup step + no_backup: true + # Automatically confirm deletion and database wiping (without asking user to confirm) + no_confirmation: true + # Max violations to display, default is 10 + max_violations: 10 + no_parents: false + + # S3 bucket name, if you are loading from an S3 bucket + s3_bucket: + # S3 folder for dataset + s3_folder: + # Loading mode, can be UPSERT_MODE, NEW_MODE or DELETE_MODE, default is UPSERT_MODE + loading_mode: UPSERT_MODE + # Location of dataset + dataset: {{workspace}}/bento-custodian/data/Bento_Mock_Data_for_PACT1 diff --git a/icrp/roles/data-loader/tests/inventory b/icrp/roles/data-loader/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/icrp/roles/data-loader/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/icrp/roles/data-loader/tests/test.yml b/icrp/roles/data-loader/tests/test.yml new file mode 100644 index 000000000..34a78f5b6 --- /dev/null +++ b/icrp/roles/data-loader/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - data-processing \ No newline at end of file diff --git a/icrp/roles/data-loader/vars/main.yml b/icrp/roles/data-loader/vars/main.yml new file mode 100644 index 000000000..246945905 --- /dev/null +++ b/icrp/roles/data-loader/vars/main.yml @@ -0,0 +1,12 @@ +--- +# vars file for data loading +neo4j_password: "{{neo4j_password}}" +neo4j_user: neo4j +tier: "{{ lookup('env','TIER') }}" +workspace: "/tmp/workspace" +neo4j_ip: "{{ lookup('env','NEO4J_IP') }}" +s3_folder: "{{ lookup('env','S3_FOLDER') }}" +wipe_db: "{{ lookup('env','WIPE_DB') }}" + + + diff --git a/icrp/roles/deploy-comets/README.md b/icrp/roles/deploy-comets/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/icrp/roles/deploy-comets/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/icrp/roles/deploy-comets/defaults/main.yml b/icrp/roles/deploy-comets/defaults/main.yml new file mode 100644 index 000000000..cc3ff9ffa --- /dev/null +++ b/icrp/roles/deploy-comets/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for deploy-comets \ No newline at end of file diff --git a/icrp/roles/deploy-comets/handlers/main.yml b/icrp/roles/deploy-comets/handlers/main.yml new file mode 100644 index 000000000..b30592c1a --- /dev/null +++ b/icrp/roles/deploy-comets/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for deploy-comets diff --git a/icrp/roles/deploy-comets/meta/main.yml b/icrp/roles/deploy-comets/meta/main.yml new file mode 100644 index 000000000..3a212a936 --- /dev/null +++ b/icrp/roles/deploy-comets/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.4 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/icrp/roles/deploy-comets/tasks/build.yml b/icrp/roles/deploy-comets/tasks/build.yml new file mode 100644 index 000000000..b44328d4a --- /dev/null +++ b/icrp/roles/deploy-comets/tasks/build.yml @@ -0,0 +1,103 @@ +--- + +- name: gather comets-app facts + ec2_instance_facts: + region: us-east-1 + filters: + "tag:Environment": "{{env}}" + "tag:Name": comets-frontend + "instance-state-name": running + register: frontend + +- name: set instance ip + set_fact: + queue_host: "{{ frontend.instances[0].network_interfaces[0].private_ip_address }}" + +- name: Check if comets and exits + stat: + path: /tmp/comets + register: comets_result + +- name: remove old comets repo + file: + path: /tmp/comets + state: absent + when: comets_result.stat.exists + +- name: Check if R-cometsAnalytics exits + stat: + path: /tmp/R-cometsAnalytics + register: r_result + +- name: remove old r repo + file: + path: /tmp/R-cometsAnalytics + state: absent + when: r_result.stat.exists + +- name: checkout comets repo + git: + repo: "{{comets_repo}}" + dest: /tmp/comets + version: master + force: yes + +- name: checkout R repo + git: + repo: "{{R_repo}}" + dest: /tmp/R-cometsAnalytics + version: comets_1.6.0_20191205 + force: yes + +- name: copy R code + synchronize: + dest: /tmp/comets/comets/restricted/rcode/ + src: /tmp/R-cometsAnalytics/ + delete: yes + recursive: true + rsync_opts: + - "--exclude=.git" + +- name: update settings + template: + dest: /tmp/comets/comets/restricted/settings.yml + src: settings.yml.j2 + +- name: add httpd.conf Dockerfile + template: src={{item.src}} dest={{item.dest}} + with_items: + - { src: 'Dockerfile.j2', dest: '/tmp/comets/Dockerfile' } + - { src: 'httpd.conf.j2', dest: '/tmp/comets/comets-httpd.conf' } + +- name: login into ecr + shell: "$(/bin/aws ecr get-login-password --region us-east-1 | docker login --username AWS --password-stdin {{ecr}})" + ignore_errors: True + register: ecr_login + + +# - name: build apache image +# docker_image: +# path: "/tmp/comets" +# pull: yes +# name: "{{ecr}}/comets" +# tag: "apache-{{version}}" +# push: yes + +- name: build {{ecr}}/comets:app image + docker_image: + path: "/tmp/comets" + dockerfile: "/tmp/comets/docker/comets.app.dockerfile" + pull: yes + name: "{{ecr}}/comets" + tag: "app-{{version}}" + push: yes + + +- name: build {{ecr}}/comets:processor image + docker_image: + path: "/tmp/comets" + dockerfile: "/tmp/comets/docker/comets.app.processor.dockerfile" + pull: yes + name: "{{ecr}}/comets" + tag: "processor-{{version}}" + push: yes diff --git a/icrp/roles/deploy-comets/tasks/deploy.yml b/icrp/roles/deploy-comets/tasks/deploy.yml new file mode 100644 index 000000000..a1902ac5c --- /dev/null +++ b/icrp/roles/deploy-comets/tasks/deploy.yml @@ -0,0 +1,196 @@ +--- +- name: create task comets definition + ecs_taskdefinition: + network_mode: bridge + family: comets-{{env}}-comets + memory: '1024' + cpu: '1024' + state: present + region: "{{region}}" + containers: + - name: comets + essential: true + image: "{{ecr}}/comets:app-{{version}}" + portMappings: + - containerPort: "8000" + hostPort: "8000" + protocol: tcp + links: + - activemq + dependsOn: + - containerName: activemq + condition: START + logConfiguration: + logDriver: awslogs + options: + awslogs-group: "comets-app-logs" + awslogs-region: "us-east-1" + awslogs-stream-prefix: "comets-{{env}}" + - name: activemq + image: cbiitss/activemq:latest + essential: true + portMappings: + - protocol: tcp + containerPort: 61613 + hostPort: 61613 + - protocol: tcp + containerPort: 8161 + hostPort: 8161 + + register: task_output + +- name: create processor task definition + ecs_taskdefinition: + network_mode: bridge + family: comets-{{env}}-processor + state: present + memory: '2048' + cpu: '1024' + region: "{{region}}" + containers: + - name: processor + essential: true + image: "{{ecr}}/comets:processor-{{version}}" + logConfiguration: + logDriver: awslogs + options: + awslogs-group: "comets-processor-logs" + awslogs-region: "us-east-1" + awslogs-stream-prefix: "comets-{{env}}" + register: task_output + +# - name: create apach task definition +# ecs_taskdefinition: +# network_mode: bridge +# family: comets-{{env}}-apache +# state: present +# memory: '512' +# cpu: '512' +# region: "{{region}}" +# containers: +# - name: apache +# essential: true +# image: "{{ecr}}/comets:apache-{{version}}" +# portMappings: +# - protocol: tcp +# containerPort: 80 +# hostPort: 80 +# logConfiguration: +# logDriver: awslogs +# options: +# awslogs-group: "comets-apache-logs" +# awslogs-region: "us-east-1" +# awslogs-stream-prefix: "comets-{{env}}" +# register: task_output + + + +# - name: query task definition apache +# ecs_taskdefinition_facts: +# task_definition: comets-{{env}}-apache +# region: "{{region}}" +# register: task_apache + +- name: query task definition comets + ecs_taskdefinition_facts: + task_definition: comets-{{env}}-comets + region: "{{region}}" + register: task_comets + +- name: query task definition processor + ecs_taskdefinition_facts: + task_definition: comets-{{env}}-processor + region: "{{region}}" + register: task_processor + +# - name: query ecs service apache +# ecs_service_facts: +# cluster: comets-{{env}} +# service: comets-{{env}}-apache +# details: true +# region: "{{region}}" +# register: service_apache + +- name: query ecs service comets + ecs_service_facts: + cluster: comets-{{env}} + service: comets-{{env}}-comets + details: true + region: "{{region}}" + register: service_comets + +- name: query ecs service processor + ecs_service_facts: + cluster: comets-{{env}} + service: comets-{{env}}-processor + details: true + region: "{{region}}" + register: service_processor + + +- name: set facts + set_fact: + comets_revision: "{{task_comets.revision}}" + # apache_revision: "{{task_apache.revision}}" + processor_revision: "{{task_processor.revision}}" + task_processor_name: "{{task_processor.family}}" + # task_apache_name: "{{task_apache.family}}" + task_comets_name: "{{task_comets.family}}" + lb_frontend: "{{service_comets.services[0].loadBalancers}}" + role_arn: "{{service_comets.services[0].roleArn}}" + + +# - debug: +# msg: "{{service_comets}}" +# - debug: +# msg: "{{service_processor}}" + +# - name: update apache service +# ecs_service: +# state: present +# name: comets-{{env}}-apache +# cluster: comets-{{env}} +# task_definition: "{{task_apache_name}}:{{apache_revision}}" +# role: "{{role_arn}}" +# # force_new_deployment: yes +# deployment_configuration: +# minimum_healthy_percent: 0 +# maximum_percent: 100 +# desired_count: 1 +# load_balancers: "{{ lb_frontend }}" +# region: "{{region}}" +# register: service_apache_output + + +- name: update comets service + ecs_service: + state: present + name: comets-{{env}}-comets + cluster: comets-{{env}} + task_definition: "{{task_comets_name}}:{{comets_revision}}" + role: "{{role_arn}}" + # force_new_deployment: yes + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + load_balancers: "{{ lb_frontend }}" + region: "{{region}}" + register: service_comets_output + + +- name: update processor service + ecs_service: + state: present + name: comets-{{env}}-processor + cluster: comets-{{env}} + task_definition: "{{task_processor_name}}:{{processor_revision}}" + # role: "{{role_arn}}" + # force_new_deployment: yes + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + region: "{{region}}" + register: service_processor_output + diff --git a/icrp/roles/deploy-comets/tasks/main.yml b/icrp/roles/deploy-comets/tasks/main.yml new file mode 100644 index 000000000..ff048c165 --- /dev/null +++ b/icrp/roles/deploy-comets/tasks/main.yml @@ -0,0 +1,89 @@ +--- +# tasks file for deploy-comets + +- name: gather comets-app facts + ec2_instance_facts: + region: us-east-1 + filters: + "tag:Environment": "{{env}}" + "tag:Name": comets-frontend + "instance-state-name": running + register: frontend + +- name: set instance ip + set_fact: + queue_host: "{{ frontend.instances[0].network_interfaces[0].private_ip_address }}" + +- name: Check if comets and exits + stat: + path: /tmp/comets + register: comets_result + +- name: remove old comets repo + file: + path: /tmp/comets + state: absent + when: comets_result.stat.exists + +- name: Check if R-cometsAnalytics exits + stat: + path: /tmp/R-cometsAnalytics + register: r_result + +- name: remove old r repo + file: + path: /tmp/R-cometsAnalytics + state: absent + when: r_result.stat.exists + +#create app and logs directory +- name: create app and logs directory + file: + path: "{{app_base_directory}}/{{item}}" + state: directory + loop: + - app + - logs + +- name: checkout comets repo + git: + repo: "{{comets_repo}}" + dest: /tmp/comets + version: master + force: yes + +- name: checkout R repo + git: + repo: "{{R_repo}}" + dest: /tmp/R-cometsAnalytics + version: comets_1.6.0_20191205 + force: yes + +- name: copy comets app + synchronize: + dest: /local/content/docker/comets/app/ + src: /tmp/comets/comets/ + delete: yes + recursive: true + rsync_opts: + - "--exclude=.git" + - "--exclude=settings.yml" + +- name: copy R code + synchronize: + dest: /local/content/docker/comets/app/restricted/rcode/ + src: /tmp/R-cometsAnalytics/ + delete: yes + recursive: true + rsync_opts: + - "--exclude=.git" + +- name: update settings + template: + dest: /local/content/docker/comets/app/restricted/settings.yml + src: settings.yml.j2 + +- name: restart docker + service: + name: docker + state: restarted \ No newline at end of file diff --git a/icrp/roles/deploy-comets/templates/Dockerfile.j2 b/icrp/roles/deploy-comets/templates/Dockerfile.j2 new file mode 100644 index 000000000..ca37f173b --- /dev/null +++ b/icrp/roles/deploy-comets/templates/Dockerfile.j2 @@ -0,0 +1,9 @@ +FROM httpd:2.4.43 + +RUN apt-get update \ + && apt-get install -y apt-utils libapache2-mod-auth-openidc apache2-bin \ + && ln -s /usr/lib/apache2/modules/mod_auth_openidc.so /usr/local/apache2/modules/mod_auth_openidc.so + +COPY ./comets-httpd.conf /usr/local/apache2/conf/httpd.conf + + \ No newline at end of file diff --git a/icrp/roles/deploy-comets/templates/httpd.conf.j2 b/icrp/roles/deploy-comets/templates/httpd.conf.j2 new file mode 100644 index 000000000..26644a739 --- /dev/null +++ b/icrp/roles/deploy-comets/templates/httpd.conf.j2 @@ -0,0 +1,648 @@ +# +# This is the main Apache HTTP server configuration file. It contains the +# configuration directives that give the server its instructions. +# See for detailed information. +# In particular, see +# +# for a discussion of each configuration directive. +# +# Do NOT simply read the instructions in here without understanding +# what they do. They're here only as hints or reminders. If you are unsure +# consult the online docs. You have been warned. +# +# Configuration and logfile names: If the filenames you specify for many +# of the server's control files begin with "/" (or "drive:/" for Win32), the +# server will use that explicit path. If the filenames do *not* begin +# with "/", the value of ServerRoot is prepended -- so "logs/access_log" +# with ServerRoot set to "/usr/local/apache2" will be interpreted by the +# server as "/usr/local/apache2/logs/access_log", whereas "/logs/access_log" +# will be interpreted as '/logs/access_log'. + +# +# ServerRoot: The top of the directory tree under which the server's +# configuration, error, and log files are kept. +# +# Do not add a slash at the end of the directory path. If you point +# ServerRoot at a non-local disk, be sure to specify a local disk on the +# Mutex directive, if file-based mutexes are used. If you wish to share the +# same ServerRoot for multiple httpd daemons, you will need to change at +# least PidFile. +# +ServerRoot "/usr/local/apache2" + +# +# Mutex: Allows you to set the mutex mechanism and mutex file directory +# for individual mutexes, or change the global defaults +# +# Uncomment and change the directory if mutexes are file-based and the default +# mutex file directory is not on a local disk or is not appropriate for some +# other reason. +# +# Mutex default:logs + +# +# Listen: Allows you to bind Apache to specific IP addresses and/or +# ports, instead of the default. See also the +# directive. +# +# Change this to Listen on specific IP addresses as shown below to +# prevent Apache from glomming onto all bound IP addresses. +# +#Listen 12.34.56.78:80 +Listen 80 + +# +# Dynamic Shared Object (DSO) Support +# +# To be able to use the functionality of a module which was built as a DSO you +# have to place corresponding `LoadModule' lines at this location so the +# directives contained in it are actually available _before_ they are used. +# Statically compiled modules (those listed by `httpd -l') do not need +# to be loaded here. +# +# Example: +# LoadModule foo_module modules/mod_foo.so +# +LoadModule mpm_event_module modules/mod_mpm_event.so +LoadModule mpm_prefork_module modules/mod_mpm_prefork.so +LoadModule mpm_worker_module modules/mod_mpm_worker.so +LoadModule authn_file_module modules/mod_authn_file.so +#LoadModule authn_dbm_module modules/mod_authn_dbm.so +#LoadModule authn_anon_module modules/mod_authn_anon.so +#LoadModule authn_dbd_module modules/mod_authn_dbd.so +#LoadModule authn_socache_module modules/mod_authn_socache.so +LoadModule authn_core_module modules/mod_authn_core.so +LoadModule authz_host_module modules/mod_authz_host.so +LoadModule authz_groupfile_module modules/mod_authz_groupfile.so +LoadModule authz_user_module modules/mod_authz_user.so +#LoadModule authz_dbm_module modules/mod_authz_dbm.so +#LoadModule authz_owner_module modules/mod_authz_owner.so +#LoadModule authz_dbd_module modules/mod_authz_dbd.so +LoadModule authz_core_module modules/mod_authz_core.so +#LoadModule authnz_ldap_module modules/mod_authnz_ldap.so +#LoadModule authnz_fcgi_module modules/mod_authnz_fcgi.so +LoadModule access_compat_module modules/mod_access_compat.so +LoadModule auth_basic_module modules/mod_auth_basic.so +#LoadModule auth_form_module modules/mod_auth_form.so +#LoadModule auth_digest_module modules/mod_auth_digest.so +#LoadModule allowmethods_module modules/mod_allowmethods.so +#LoadModule isapi_module modules/mod_isapi.so +#LoadModule file_cache_module modules/mod_file_cache.so +#LoadModule cache_module modules/mod_cache.so +#LoadModule cache_disk_module modules/mod_cache_disk.so +#LoadModule cache_socache_module modules/mod_cache_socache.so +#LoadModule socache_shmcb_module modules/mod_socache_shmcb.so +#LoadModule socache_dbm_module modules/mod_socache_dbm.so +#LoadModule socache_memcache_module modules/mod_socache_memcache.so +#LoadModule socache_redis_module modules/mod_socache_redis.so +#LoadModule watchdog_module modules/mod_watchdog.so +#LoadModule macro_module modules/mod_macro.so +#LoadModule dbd_module modules/mod_dbd.so +#LoadModule bucketeer_module modules/mod_bucketeer.so +#LoadModule dumpio_module modules/mod_dumpio.so +#LoadModule echo_module modules/mod_echo.so +#LoadModule example_hooks_module modules/mod_example_hooks.so +#LoadModule case_filter_module modules/mod_case_filter.so +#LoadModule case_filter_in_module modules/mod_case_filter_in.so +#LoadModule example_ipc_module modules/mod_example_ipc.so +#LoadModule buffer_module modules/mod_buffer.so +#LoadModule data_module modules/mod_data.so +#LoadModule ratelimit_module modules/mod_ratelimit.so +LoadModule reqtimeout_module modules/mod_reqtimeout.so +#LoadModule ext_filter_module modules/mod_ext_filter.so +#LoadModule request_module modules/mod_request.so +#LoadModule include_module modules/mod_include.so +LoadModule filter_module modules/mod_filter.so +#LoadModule reflector_module modules/mod_reflector.so +#LoadModule substitute_module modules/mod_substitute.so +#LoadModule sed_module modules/mod_sed.so +#LoadModule charset_lite_module modules/mod_charset_lite.so +#LoadModule deflate_module modules/mod_deflate.so +#LoadModule xml2enc_module modules/mod_xml2enc.so +#LoadModule proxy_html_module modules/mod_proxy_html.so +#LoadModule brotli_module modules/mod_brotli.so +LoadModule mime_module modules/mod_mime.so +#LoadModule ldap_module modules/mod_ldap.so +LoadModule log_config_module modules/mod_log_config.so +#LoadModule log_debug_module modules/mod_log_debug.so +#LoadModule log_forensic_module modules/mod_log_forensic.so +#LoadModule logio_module modules/mod_logio.so +#LoadModule lua_module modules/mod_lua.so +LoadModule env_module modules/mod_env.so +#LoadModule mime_magic_module modules/mod_mime_magic.so +#LoadModule cern_meta_module modules/mod_cern_meta.so +#LoadModule expires_module modules/mod_expires.so +LoadModule headers_module modules/mod_headers.so +#LoadModule ident_module modules/mod_ident.so +#LoadModule usertrack_module modules/mod_usertrack.so +#LoadModule unique_id_module modules/mod_unique_id.so +LoadModule setenvif_module modules/mod_setenvif.so +LoadModule version_module modules/mod_version.so +LoadModule remoteip_module modules/mod_remoteip.so +LoadModule proxy_module modules/mod_proxy.so +LoadModule proxy_connect_module modules/mod_proxy_connect.so +LoadModule proxy_ftp_module modules/mod_proxy_ftp.so +LoadModule proxy_http_module modules/mod_proxy_http.so +LoadModule proxy_fcgi_module modules/mod_proxy_fcgi.so +#LoadModule proxy_scgi_module modules/mod_proxy_scgi.so +LoadModule proxy_uwsgi_module modules/mod_proxy_uwsgi.so +#LoadModule proxy_fdpass_module modules/mod_proxy_fdpass.so +#LoadModule proxy_wstunnel_module modules/mod_proxy_wstunnel.so +LoadModule proxy_ajp_module modules/mod_proxy_ajp.so +LoadModule proxy_balancer_module modules/mod_proxy_balancer.so +LoadModule proxy_express_module modules/mod_proxy_express.so +LoadModule proxy_hcheck_module modules/mod_proxy_hcheck.so +#LoadModule session_module modules/mod_session.so +#LoadModule session_cookie_module modules/mod_session_cookie.so +#LoadModule session_crypto_module modules/mod_session_crypto.so +#LoadModule session_dbd_module modules/mod_session_dbd.so +#LoadModule slotmem_shm_module modules/mod_slotmem_shm.so +#LoadModule slotmem_plain_module modules/mod_slotmem_plain.so +#LoadModule ssl_module modules/mod_ssl.so +#LoadModule optional_hook_export_module modules/mod_optional_hook_export.so +#LoadModule optional_hook_import_module modules/mod_optional_hook_import.so +#LoadModule optional_fn_import_module modules/mod_optional_fn_import.so +#LoadModule optional_fn_export_module modules/mod_optional_fn_export.so +#LoadModule dialup_module modules/mod_dialup.so +LoadModule http2_module modules/mod_http2.so +#LoadModule proxy_http2_module modules/mod_proxy_http2.so +#LoadModule md_module modules/mod_md.so +#LoadModule lbmethod_byrequests_module modules/mod_lbmethod_byrequests.so +#LoadModule lbmethod_bytraffic_module modules/mod_lbmethod_bytraffic.so +#LoadModule lbmethod_bybusyness_module modules/mod_lbmethod_bybusyness.so +#LoadModule lbmethod_heartbeat_module modules/mod_lbmethod_heartbeat.so +LoadModule unixd_module modules/mod_unixd.so +#LoadModule heartbeat_module modules/mod_heartbeat.so +#LoadModule heartmonitor_module modules/mod_heartmonitor.so +#LoadModule dav_module modules/mod_dav.so +LoadModule status_module modules/mod_status.so +LoadModule autoindex_module modules/mod_autoindex.so +#LoadModule asis_module modules/mod_asis.so +#LoadModule info_module modules/mod_info.so +#LoadModule suexec_module modules/mod_suexec.so + + #LoadModule cgid_module modules/mod_cgid.so + + + #LoadModule cgi_module modules/mod_cgi.so + +#LoadModule dav_fs_module modules/mod_dav_fs.so +#LoadModule dav_lock_module modules/mod_dav_lock.so +#LoadModule vhost_alias_module modules/mod_vhost_alias.so +#LoadModule negotiation_module modules/mod_negotiation.so +LoadModule dir_module modules/mod_dir.so +#LoadModule imagemap_module modules/mod_imagemap.so +#LoadModule actions_module modules/mod_actions.so +#LoadModule speling_module modules/mod_speling.so +LoadModule userdir_module modules/mod_userdir.so +LoadModule alias_module modules/mod_alias.so +#LoadModule rewrite_module modules/mod_rewrite.so + + +# +# If you wish httpd to run as a different user or group, you must run +# httpd as root initially and it will switch. +# +# User/Group: The name (or #number) of the user/group to run httpd as. +# It is usually good practice to create a dedicated user and group for +# running httpd, as with most system services. +# +User daemon +Group daemon + + + +# 'Main' server configuration +# +# The directives in this section set up the values used by the 'main' +# server, which responds to any requests that aren't handled by a +# definition. These values also provide defaults for +# any containers you may define later in the file. +# +# All of these directives may appear inside containers, +# in which case these default settings will be overridden for the +# virtual host being defined. +# + +# +# ServerAdmin: Your address, where problems with the server should be +# e-mailed. This address appears on some server-generated pages, such +# as error documents. e.g. admin@your-domain.com +# +ServerAdmin you@example.com + +# +# ServerName gives the name and port that the server uses to identify itself. +# This can often be determined automatically, but we recommend you specify +# it explicitly to prevent problems during startup. +# +# If your host doesn't have a registered DNS name, enter its IP address here. +# +#ServerName www.example.com:80 + +# +# Deny access to the entirety of your server's filesystem. You must +# explicitly permit access to web content directories in other +# blocks below. +# + + AllowOverride none + Require all denied + + +# +# Note that from this point forward you must specifically allow +# particular features to be enabled - so if something's not working as +# you might expect, make sure that you have specifically enabled it +# below. +# + +# +# DocumentRoot: The directory out of which you will serve your +# documents. By default, all requests are taken from this directory, but +# symbolic links and aliases may be used to point to other locations. +# +DocumentRoot "/usr/local/apache2/htdocs" + + # + # Possible values for the Options directive are "None", "All", + # or any combination of: + # Indexes Includes FollowSymLinks SymLinksifOwnerMatch ExecCGI MultiViews + # + # Note that "MultiViews" must be named *explicitly* --- "Options All" + # doesn't give it to you. + # + # The Options directive is both complicated and important. Please see + # http://httpd.apache.org/docs/2.4/mod/core.html#options + # for more information. + # + Options Indexes FollowSymLinks + + # + # AllowOverride controls what directives may be placed in .htaccess files. + # It can be "All", "None", or any combination of the keywords: + # AllowOverride FileInfo AuthConfig Limit + # + AllowOverride None + + # + # Controls who can get stuff from this server. + # + Require all granted + + +# +# DirectoryIndex: sets the file that Apache will serve if a directory +# is requested. +# + + DirectoryIndex index.html + + +# +# The following lines prevent .htaccess and .htpasswd files from being +# viewed by Web clients. +# + + Require all denied + + +# +# ErrorLog: The location of the error log file. +# If you do not specify an ErrorLog directive within a +# container, error messages relating to that virtual host will be +# logged here. If you *do* define an error logfile for a +# container, that host's errors will be logged there and not here. +# +ErrorLog /proc/self/fd/2 + +# +# LogLevel: Control the number of messages logged to the error_log. +# Possible values include: debug, info, notice, warn, error, crit, +# alert, emerg. +# +LogLevel warn + + + # + # The following directives define some format nicknames for use with + # a CustomLog directive (see below). + # + LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined + LogFormat "%h %l %u %t \"%r\" %>s %b" common + + + # You need to enable mod_logio.c to use %I and %O + LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %I %O" combinedio + + + # + # The location and format of the access logfile (Common Logfile Format). + # If you do not define any access logfiles within a + # container, they will be logged here. Contrariwise, if you *do* + # define per- access logfiles, transactions will be + # logged therein and *not* in this file. + # + CustomLog /proc/self/fd/1 common + + # + # If you prefer a logfile with access, agent, and referer information + # (Combined Logfile Format) you can use the following directive. + # + #CustomLog "logs/access_log" combined + + + + # + # Redirect: Allows you to tell clients about documents that used to + # exist in your server's namespace, but do not anymore. The client + # will make a new request for the document at its new location. + # Example: + # Redirect permanent /foo http://www.example.com/bar + + # + # Alias: Maps web paths into filesystem paths and is used to + # access content that does not live under the DocumentRoot. + # Example: + # Alias /webpath /full/filesystem/path + # + # If you include a trailing / on /webpath then the server will + # require it to be present in the URL. You will also likely + # need to provide a section to allow access to + # the filesystem path. + + # + # ScriptAlias: This controls which directories contain server scripts. + # ScriptAliases are essentially the same as Aliases, except that + # documents in the target directory are treated as applications and + # run by the server when requested rather than as documents sent to the + # client. The same rules about trailing "/" apply to ScriptAlias + # directives as to Alias. + # + ScriptAlias /cgi-bin/ "/usr/local/apache2/cgi-bin/" + + + + + # + # ScriptSock: On threaded servers, designate the path to the UNIX + # socket used to communicate with the CGI daemon of mod_cgid. + # + #Scriptsock cgisock + + +# +# "/usr/local/apache2/cgi-bin" should be changed to whatever your ScriptAliased +# CGI directory exists, if you have that configured. +# + + AllowOverride None + Options None + Require all granted + + + + # + # Avoid passing HTTP_PROXY environment to CGI's on this or any proxied + # backend servers which have lingering "httpoxy" defects. + # 'Proxy' request header is undefined by the IETF, not listed by IANA + # + RequestHeader unset Proxy early + + + + # + # TypesConfig points to the file containing the list of mappings from + # filename extension to MIME-type. + # + TypesConfig conf/mime.types + + # + # AddType allows you to add to or override the MIME configuration + # file specified in TypesConfig for specific file types. + # + #AddType application/x-gzip .tgz + # + # AddEncoding allows you to have certain browsers uncompress + # information on the fly. Note: Not all browsers support this. + # + #AddEncoding x-compress .Z + #AddEncoding x-gzip .gz .tgz + # + # If the AddEncoding directives above are commented-out, then you + # probably should define those extensions to indicate media types: + # + AddType application/x-compress .Z + AddType application/x-gzip .gz .tgz + + # + # AddHandler allows you to map certain file extensions to "handlers": + # actions unrelated to filetype. These can be either built into the server + # or added with the Action directive (see below) + # + # To use CGI scripts outside of ScriptAliased directories: + # (You will also need to add "ExecCGI" to the "Options" directive.) + # + #AddHandler cgi-script .cgi + + # For type maps (negotiated resources): + #AddHandler type-map var + + # + # Filters allow you to process content before it is sent to the client. + # + # To parse .shtml files for server-side includes (SSI): + # (You will also need to add "Includes" to the "Options" directive.) + # + #AddType text/html .shtml + #AddOutputFilter INCLUDES .shtml + + +# +# The mod_mime_magic module allows the server to use various hints from the +# contents of the file itself to determine its type. The MIMEMagicFile +# directive tells the module where the hint definitions are located. +# +#MIMEMagicFile conf/magic + +# +# Customizable error responses come in three flavors: +# 1) plain text 2) local redirects 3) external redirects +# +# Some examples: +#ErrorDocument 500 "The server made a boo boo." +#ErrorDocument 404 /missing.html +#ErrorDocument 404 "/cgi-bin/missing_handler.pl" +#ErrorDocument 402 http://www.example.com/subscription_info.html +# + +# +# MaxRanges: Maximum number of Ranges in a request before +# returning the entire resource, or one of the special +# values 'default', 'none' or 'unlimited'. +# Default setting is to accept 200 Ranges. +#MaxRanges unlimited + +# +# EnableMMAP and EnableSendfile: On systems that support it, +# memory-mapping or the sendfile syscall may be used to deliver +# files. This usually improves server performance, but must +# be turned off when serving from networked-mounted +# filesystems or if support for these functions is otherwise +# broken on your system. +# Defaults: EnableMMAP On, EnableSendfile Off +# +#EnableMMAP off +#EnableSendfile on + +# Supplemental configuration +# +# The configuration files in the conf/extra/ directory can be +# included to add extra features or to modify the default configuration of +# the server, or you may simply copy their contents here and change as +# necessary. + +# Server-pool management (MPM specific) +#Include conf/extra/httpd-mpm.conf + +# Multi-language error messages +#Include conf/extra/httpd-multilang-errordoc.conf + +# Fancy directory listings +#Include conf/extra/httpd-autoindex.conf + +# Language settings +#Include conf/extra/httpd-languages.conf + +# User home directories +#Include conf/extra/httpd-userdir.conf + +# Real-time info on requests and configuration +#Include conf/extra/httpd-info.conf + +# Virtual hosts +#Include conf/extra/httpd-vhosts.conf + +# Local access to the Apache HTTP Server Manual +#Include conf/extra/httpd-manual.conf + +# Distributed authoring and versioning (WebDAV) +#Include conf/extra/httpd-dav.conf + +# Various default settings +#Include conf/extra/httpd-default.conf + +# Configure mod_proxy_html to understand HTML4/XHTML1 + +Include conf/extra/proxy-html.conf + + +# Secure (SSL/TLS) connections +#Include conf/extra/httpd-ssl.conf +# +# Note: The following must must be present to support +# starting without SSL on platforms with no /dev/random equivalent +# but a statically compiled-in mod_ssl. +# + +SSLRandomSeed startup builtin +SSLRandomSeed connect builtin + + +#comets conf + +LoadModule auth_openidc_module modules/mod_auth_openidc.so +LoadModule rewrite_module modules/mod_rewrite.so + +ServerName localhost:80 + + CustomLog logs/access.log combined env=!dontlog + ErrorLog logs/error.log + + + SetHandler server-info + ProxyPass ! + + +RewriteEngine On +ProxyRequests Off + +ProxyPass / http://{{queue_host}}:8000/ timeout=1800 +ProxyPassReverse / http://{{queue_host}}:8000/ + +OIDCProviderIssuer {{open_id_issuer}} +OIDCProviderAuthorizationEndpoint {{open_id_issuer}}/authorize +OIDCProviderTokenEndpoint {{open_id_issuer}}/oauth/token +OIDCProviderTokenEndpointAuth client_secret_post +OIDCProviderUserInfoEndpoint {{open_id_issuer}}/userinfo +OIDCClientID {{open_client_id}} +OIDCClientSecret {{open_client_secret}} +OIDCDefaultURL https://comets-analytics-{{env}}.org/public/timeout.html +OIDCProviderJwksUri {{open_id_issuer}}/.well-known/jwks.json +OIDCSessionMaxDuration 0 +OIDCScope "openid email family_name given_name app_metadata user_metadata user_id comets" +OIDCRedirectURI https://comets-analytics-{{env}}.org/auth0_redirect +OIDCCryptoPassphrase "{{open_passphrase}}" +OIDCCookiePath / + +OIDCProviderMetadataURL {{open_id_issuer}}/.well-known/openid-configuration +OIDCPassIDTokenAs payload +OIDCUserInfoRefreshInterval 5 +OIDCSessionInactivityTimeout 1800 + + + AuthType openid-connect + + Require claim comets:active + Require claim comets:admin + + Header echo ^OIDC_id_token_payload$ + Header set Cache-Control "no-cache, no-store, must-revalidate" + Header set Pragma "no-cache" + Header set Expires 0 + ErrorDocument 401 /errordocs/unauthorized.html + LogLevel debug + + + + + Require claim comets:admin + + ErrorDocument 401 /errordocs/index.html + + + + + Require claim "comets~^\w+$" + + Require claim comets:active + Require claim comets:admin + + + ErrorDocument 401 /errordocs/registration.html + + + + + Require valid-user + + Require claim "comets~^\w+$" + + + ErrorDocument 401 /errordocs/index.html + + + + Require all denied + + + + AuthType none + Require all granted + + + + ErrorDocument 401 ! + + + diff --git a/icrp/roles/deploy-comets/templates/settings.yml.j2 b/icrp/roles/deploy-comets/templates/settings.yml.j2 new file mode 100644 index 000000000..6afae9e44 --- /dev/null +++ b/icrp/roles/deploy-comets/templates/settings.yml.j2 @@ -0,0 +1,20 @@ +auth0: + token: "{{token}}" + domain: "ncicbiit" +email: + admin: [ "{{admin_email}}" ] + sender: "{{sender}}" + host: "{{host}}" + port: 465 + auth: true + username: "{{username}}" + password: "{{password}}" +queue: + host: "{{queue_host}}" + port: 61613 +s3: + bucket: "cbiit-tools-data" + username: "{{s3_username}}" + password: "{{s3_password}}" + input_folder: "comets-test/input/" + output_folder: "comets-test/output/" \ No newline at end of file diff --git a/icrp/roles/deploy-comets/tests/inventory b/icrp/roles/deploy-comets/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/icrp/roles/deploy-comets/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/icrp/roles/deploy-comets/tests/test.yml b/icrp/roles/deploy-comets/tests/test.yml new file mode 100644 index 000000000..d2ca8956f --- /dev/null +++ b/icrp/roles/deploy-comets/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - deploy-comets \ No newline at end of file diff --git a/icrp/roles/deploy-comets/vars/main.yml b/icrp/roles/deploy-comets/vars/main.yml new file mode 100644 index 000000000..6dd13579e --- /dev/null +++ b/icrp/roles/deploy-comets/vars/main.yml @@ -0,0 +1,23 @@ +--- +# vars file for deploy-comets +sender: "{{ lookup('aws_ssm', 'sender', region='us-east-1' ) }}" +host: "{{ lookup('aws_ssm', 'host', region='us-east-1' ) }}" +s3_password: "{{ lookup('aws_ssm', 's3_password', region='us-east-1' ) }}" +s3_username: "{{ lookup('aws_ssm', 's3_username', region='us-east-1' ) }}" +admin_email: "{{ lookup('aws_ssm', 'admin_email', region='us-east-1' ) }}" +username: "{{ lookup('aws_ssm', 'username', region='us-east-1' ) }}" +password: "{{ lookup('aws_ssm', 'password', region='us-east-1' ) }}" +token: "{{ lookup('aws_ssm', 'token', region='us-east-1' ) }}" +app_base_directory: /local/content/docker/comets +comets_repo: https://github.com/CBIIT/nci-webtools-comets-analytics +R_repo: https://github.com/CBIIT/R-cometsAnalytics +ecr: "{{ lookup('aws_ssm', 'ecr', region='us-east-1' ) }}" +version: 1.8.0 +env: "{{env}}" +region: us-east-1 + +#openid +open_id_issuer: "{{ lookup('aws_ssm', 'open_id_issuer', region='us-east-1' ) }}" +open_client_id: "{{ lookup('aws_ssm', 'open_client_id', region='us-east-1' ) }}" +open_client_secret: "{{ lookup('aws_ssm', 'open_client_secret', region='us-east-1' ) }}" +open_passphrase: "{{ lookup('aws_ssm', 'open_passphrase', region='us-east-1' ) }}" diff --git a/icrp/roles/deploy-custodian/.travis.yml b/icrp/roles/deploy-custodian/.travis.yml new file mode 100644 index 000000000..36bbf6208 --- /dev/null +++ b/icrp/roles/deploy-custodian/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/icrp/roles/deploy-custodian/README.md b/icrp/roles/deploy-custodian/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/icrp/roles/deploy-custodian/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/icrp/roles/deploy-custodian/defaults/main.yml b/icrp/roles/deploy-custodian/defaults/main.yml new file mode 100644 index 000000000..9085d3dfc --- /dev/null +++ b/icrp/roles/deploy-custodian/defaults/main.yml @@ -0,0 +1,3 @@ +--- +# defaults file for cicd +platform: aws \ No newline at end of file diff --git a/icrp/roles/deploy-custodian/handlers/main.yml b/icrp/roles/deploy-custodian/handlers/main.yml new file mode 100644 index 000000000..3c94d4172 --- /dev/null +++ b/icrp/roles/deploy-custodian/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for cicd \ No newline at end of file diff --git a/icrp/roles/deploy-custodian/meta/main.yml b/icrp/roles/deploy-custodian/meta/main.yml new file mode 100644 index 000000000..227ad9c34 --- /dev/null +++ b/icrp/roles/deploy-custodian/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.9 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/icrp/roles/deploy-custodian/tasks/build.yml b/icrp/roles/deploy-custodian/tasks/build.yml new file mode 100644 index 000000000..2b28383b7 --- /dev/null +++ b/icrp/roles/deploy-custodian/tasks/build.yml @@ -0,0 +1,123 @@ +--- +- name: install wget and curl + yum: + name: + - wget + - curl + - java-11-openjdk-devel + +- name: install maven and nodejs to build + shell: > + wget https://www-us.apache.org/dist/maven/maven-3/{{maven_version}}/binaries/apache-maven-{{maven_version}}-bin.tar.gz -P /tmp + && tar xf /tmp/apache-maven-{{maven_version}}-bin.tar.gz -C /usr/local + && ln -s /usr/local/apache-maven-{{maven_version}} /usr/local/maven + && curl -sL https://rpm.nodesource.com/setup_13.x | bash - + && yum install -y nodejs + +- name: clone backend github + git: + repo: 'https://github.com/CBIIT/bento-backend' + dest: "{{workspace}}/backend" + version: master + +- name: clone frontend github + git: + repo: 'https://github.com/CBIIT/bento-frontend' + dest: "{{workspace}}/frontend" + version: master + +- name: remove the application_example.properties file + file: + path: "{{workspace}}/backend/src/main/resources/application_example.properties" + state: absent + +- name: copy application.properties file to /src/main/resources/ + template: + src: application.properties.j2 + dest: "{{workspace}}/backend/src/main/resources/application.properties" + +- name: build backend code + command: mvn package -DskipTests + args: + chdir: "{{workspace}}" + +- name: copy Bento-0.0.1.war to api.war + copy: + remote_src: yes + src: "{{workspace}}/backend/target/Bento-0.0.1.war" + dest: "{{workspace}}/backend/target/ROOT.war" + +- name: build custodian/bento-backend image + docker_image: + build: + path: "{{workspace}}/backend" + dockerfile: "{{workspace}}/backend/dockerfiles/backend-dockerfile" + name: custodian/bento-backend + source: build + +- name: copy environment file to {{workspace}}/bento-frontend + template: + src: env.j2 + dest: "{{workspace}}/frontend/bento-frontend/.env" + +- name: run npm install in {{workspace}}/bento-frontend/ + command: "{{item}}" + args: + chdir: "{{workspace}}/frontend/bento-frontend" + loop: + - npm install + - npm install --save https://github.com/skiran86/mui-custom-datatables/tarball/master + +- name: run npm install and build in {{workspace}}/bento-frontend/node_modules/mui-custom-datatables + command: "{{item}}" + args: + chdir: "{{workspace}}/frontend/bento-frontend/node_modules/mui-custom-datatables" + loop: + - npm install + - npm run build + +- name: run npm build in frontend + command: npm run-script build + args: + chdir: "{{workspace}}/frontend/bento-frontend" + +- name: build custodian/bento-frontend image + docker_image: + build: + path: "{{workspace}}/frontend/bento-frontend" + dockerfile: "{{workspace}}/frontend/dockerfiles/frontend-dockerfile" + pull: yes + nocache: yes + name: custodian/bento-frontend + source: build + +- name: gather neo4j facts + ec2_instance_facts: + region: "{{region}}" + filters: + "tag:Name": custodian-database-demo + "instance-state-name": running + register: database + +- name: set neo4j bearer + shell: echo "Basic $(echo -n "neo4j:{{neo4j_password}}" | base64)" + register: output_bearer + +- name: set instance ip + set_fact: + neo4j_ip: "{{ database.instances[0].network_interfaces[0].private_ip_address }}" + bearer: "{{output_bearer.stdout_lines}}" + +- name: post schemas + uri: + url: http://{{neo4j_ip}}:7474/graphql/idl/ + method: POST + body: "{{ lookup('file','{{workspace}}/src/main/resources/graphql/bento-extended.graphql') }}" + headers: + Accept: "application/json" + Authorization: "{{bearer}}" + register: schema + +- name: schema output + debug: + msg: "{{schema}}" \ No newline at end of file diff --git a/icrp/roles/deploy-custodian/tasks/deploy.yml b/icrp/roles/deploy-custodian/tasks/deploy.yml new file mode 100644 index 000000000..9e5dea34e --- /dev/null +++ b/icrp/roles/deploy-custodian/tasks/deploy.yml @@ -0,0 +1,104 @@ +--- +- name: create task definition + ecs_taskdefinition: + containers: + - name: backend + essential: true + image: "cbiitssrepo/bento-backend" + portMappings: + - containerPort: "8080" + hostPort: "8080" + network_mode: bridge + family: bento-backend + memory: '512' + cpu: '512' + state: present + region: "{{region}}" + register: task_output + +- name: create task definition + ecs_taskdefinition: + containers: + - name: frontend + essential: true + image: "custodian/bento-frontend" + portMappings: + - containerPort: "80" + hostPort: "80" + network_mode: bridge + family: bento-frontend + state: present + memory: '512' + cpu: '512' + region: "{{region}}" + register: task_output + +- name: query task definition + ecs_taskdefinition_info: + task_definition: bento-frontend + region: "{{region}}" + register: task_frontend + +- name: query task definition + ecs_taskdefinition_info: + task_definition: bento-backend + region: "{{region}}" + register: task_backend + +- name: query backend service + ecs_service_info: + cluster: custodian + service: bento-backend + details: true + region: "{{region}}" + register: service_backend + +- name: query ecs service + ecs_service_info: + cluster: custodian + service: bento-frontend + details: true + region: "{{region}}" + register: service_frontend + +- name: set facts + set_fact: + frontend_revision: "{{task_frontend.revision}}" + backend_revision: "{{task_backend.revision}}" + task_backend_name: "{{task_backend.family}}" + task_frontend_name: "{{task_frontend.family}}" + lb_frontend: "{{service_frontend.services[0].loadBalancers}}" + lb_backend: "{{service_backend.services[0].loadBalancers}}" + role_arn: "{{service_backend.services[0].roleArn}}" + +- name: update frontend service + ecs_service: + state: present + name: bento-frontend + cluster: custodian + task_definition: "{{task_frontend_name}}:{{frontend_revision}}" + role: "{{role_arn}}" + force_new_deployment: yes + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + load_balancers: "{{ lb_frontend }}" + region: "{{region}}" + register: service_frontend_output + +- name: update backend service + ecs_service: + state: present + name: bento-backend + cluster: custodian + task_definition: "{{task_backend_name}}:{{backend_revision}}" + role: "{{role_arn}}" + force_new_deployment: yes + deployment_configuration: + minimum_healthy_percent: 0 + maximum_percent: 100 + desired_count: 1 + load_balancers: "{{ lb_backend }}" + region: "{{region}}" + register: service_backend_output diff --git a/icrp/roles/deploy-custodian/tasks/main.yml b/icrp/roles/deploy-custodian/tasks/main.yml new file mode 100644 index 000000000..e69de29bb diff --git a/icrp/roles/deploy-custodian/templates/application.properties.j2 b/icrp/roles/deploy-custodian/templates/application.properties.j2 new file mode 100644 index 000000000..cda065c47 --- /dev/null +++ b/icrp/roles/deploy-custodian/templates/application.properties.j2 @@ -0,0 +1,8 @@ +spring.mvc.throw-exception-if-no-handler-found=true +neo4j.graphql.endpoint=http://{{neo4j_ip}}:7474/graphql/ +neo4j.authorization={{bearer}} +spring.mvc.view.prefix=/WEB-INF/ +spring.mvc.view.suffix=.jsp +error.redirect_url=http://localhost/error.html +allow_grapqh_query = true +allow_graphql_mutation =false \ No newline at end of file diff --git a/icrp/roles/deploy-custodian/templates/env.j2 b/icrp/roles/deploy-custodian/templates/env.j2 new file mode 100644 index 000000000..9bfc37b86 --- /dev/null +++ b/icrp/roles/deploy-custodian/templates/env.j2 @@ -0,0 +1,3 @@ +REACT_APP_BACKEND_API=https://api-{{tier}}.bento-tools.org/v1/graphql/ +REACT_APP_APPLICATION_VERSION={{version}} +REACT_APP_ABOUT_CONTENT_URL=https://raw.githubusercontent.com/CBIIT/bento-frontend/master/src/content/{{tier}}/aboutPagesContent.yaml diff --git a/icrp/roles/deploy-custodian/tests/inventory b/icrp/roles/deploy-custodian/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/icrp/roles/deploy-custodian/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/icrp/roles/deploy-custodian/tests/test.yml b/icrp/roles/deploy-custodian/tests/test.yml new file mode 100644 index 000000000..c9b79015b --- /dev/null +++ b/icrp/roles/deploy-custodian/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - cicd \ No newline at end of file diff --git a/icrp/roles/deploy-custodian/vars/main.yml b/icrp/roles/deploy-custodian/vars/main.yml new file mode 100644 index 000000000..2c6b15d73 --- /dev/null +++ b/icrp/roles/deploy-custodian/vars/main.yml @@ -0,0 +1,6 @@ +--- +# vars file for cicd +workspace: "/tmp/workspace" +region: "{{region}}" +maven_version: 3.6.3 + diff --git a/icrp/roles/docker/README.md b/icrp/roles/docker/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/icrp/roles/docker/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/icrp/roles/docker/defaults/main.yml b/icrp/roles/docker/defaults/main.yml new file mode 100644 index 000000000..c45677333 --- /dev/null +++ b/icrp/roles/docker/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for docker \ No newline at end of file diff --git a/icrp/roles/docker/handlers/main.yml b/icrp/roles/docker/handlers/main.yml new file mode 100644 index 000000000..11684bcd3 --- /dev/null +++ b/icrp/roles/docker/handlers/main.yml @@ -0,0 +1,6 @@ +--- +# handlers file for docker +- name: restart docker + service: + name: docker + state: restarted diff --git a/icrp/roles/docker/meta/main.yml b/icrp/roles/docker/meta/main.yml new file mode 100644 index 000000000..5d50bf41b --- /dev/null +++ b/icrp/roles/docker/meta/main.yml @@ -0,0 +1,60 @@ +galaxy_info: + author: your name + description: your description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Some suggested licenses: + # - BSD (default) + # - MIT + # - GPLv2 + # - GPLv3 + # - Apache + # - CC-BY + license: license (GPLv2, CC-BY, etc) + + min_ansible_version: 2.4 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # Optionally specify the branch Galaxy will use when accessing the GitHub + # repo for this role. During role install, if no tags are available, + # Galaxy will use this branch. During import Galaxy will access files on + # this branch. If Travis integration is configured, only notifications for this + # branch will be accepted. Otherwise, in all cases, the repo's default branch + # (usually master) will be used. + #github_branch: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. \ No newline at end of file diff --git a/icrp/roles/docker/tasks/main.yml b/icrp/roles/docker/tasks/main.yml new file mode 100644 index 000000000..fdd19002d --- /dev/null +++ b/icrp/roles/docker/tasks/main.yml @@ -0,0 +1,102 @@ +--- +# - name: Remove other Docker versions +# yum: +# name: +# - docker +# - docker-client +# - docker-client-latest +# - docker-common +# - docker-latest +# - docker-latest-logrotate +# - docker-logrotate +# - docker-engine +# - docker-compose +# state: absent + +# tasks file for docker +- name: install epel-release + yum: + name: + - epel-release +- name: install systems packages needed for docker + yum: + name: + - yum-utils + - device-mapper-persistent-data + - lvm2 + - python-setuptools + - firewalld + - python-pip + - docker-compose + state: installed + +- name: install docker python module + pip: + name: docker + +- name: enable and start firewalld + service: + name: firewalld + state: started + enabled: yes + tags: + - master + +- name: open tcp port 2375 and 2376 + firewalld: + state: enabled + permanent: yes + port: "{{item}}/tcp" + immediate: yes + zone: public + loop: + - 2375 + - 2376 + tags: + - master + +# - name: enable extra repos when running on red hat +# command: subscription-manager repos --enable "rhel-*-optional-rpms" --enable "rhel-*-extras-rpms" +# when: ansible_distribution == 'Red Hat Enterprise Linux' + +- name: add docker repo + command: > + yum-config-manager --add-repo + https://download.docker.com/linux/centos/docker-ce.repo + +- name: install docker + yum: + name: ['docker-ce', 'docker-ce-cli', 'containerd.io'] + state: installed + +- name: enable and start docker + service: + name: docker + enabled: yes + state: restarted + +- name: create docker systemd options directory + file: + path: /etc/systemd/system/docker.service.d + state: directory + tags: + - master + +- name: configure docker startup options + template: + src: startup-options.conf.j2 + dest: /etc/systemd/system/docker.service.d/startup_options.conf + notify: + - restart docker + tags: + - master + +- name: reload systemctl daemon + systemd: + daemon_reload: yes + + + + + + diff --git a/icrp/roles/docker/templates/startup-options.conf.j2 b/icrp/roles/docker/templates/startup-options.conf.j2 new file mode 100644 index 000000000..afa83a0aa --- /dev/null +++ b/icrp/roles/docker/templates/startup-options.conf.j2 @@ -0,0 +1,3 @@ +[Service] +ExecStart= +ExecStart=/usr/bin/dockerd -H fd:// -H tcp://0.0.0.0:2375 \ No newline at end of file diff --git a/icrp/roles/docker/tests/inventory b/icrp/roles/docker/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/icrp/roles/docker/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/icrp/roles/docker/tests/test.yml b/icrp/roles/docker/tests/test.yml new file mode 100644 index 000000000..2c81ca427 --- /dev/null +++ b/icrp/roles/docker/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - docker \ No newline at end of file diff --git a/icrp/roles/docker/vars/main.yml b/icrp/roles/docker/vars/main.yml new file mode 100644 index 000000000..dc934ce9c --- /dev/null +++ b/icrp/roles/docker/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for docker \ No newline at end of file diff --git a/icrp/roles/ecs-agent/README.md b/icrp/roles/ecs-agent/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/icrp/roles/ecs-agent/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/icrp/roles/ecs-agent/defaults/main.yml b/icrp/roles/ecs-agent/defaults/main.yml new file mode 100644 index 000000000..3bc56b749 --- /dev/null +++ b/icrp/roles/ecs-agent/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for ecs-agent \ No newline at end of file diff --git a/icrp/roles/ecs-agent/files/docker-container@ecs-agent.service b/icrp/roles/ecs-agent/files/docker-container@ecs-agent.service new file mode 100644 index 000000000..733f682bf --- /dev/null +++ b/icrp/roles/ecs-agent/files/docker-container@ecs-agent.service @@ -0,0 +1,22 @@ +[Unit] +Description=Docker Container %I +Requires=docker.service +After=cloud-final.service + +[Service] +Restart=always +ExecStartPre=-/usr/bin/docker rm -f %i +ExecStart=/usr/bin/docker run --name %i \ +--privileged \ +--restart=on-failure:10 \ +--volume=/var/run:/var/run \ +--volume=/var/log/ecs/:/log:Z \ +--volume=/var/lib/ecs/data:/data:Z \ +--volume=/etc/ecs:/etc/ecs \ +--net=host \ +--env-file=/etc/ecs/ecs.config \ +amazon/amazon-ecs-agent:latest +ExecStop=/usr/bin/docker stop %i + +[Install] +WantedBy=default.target \ No newline at end of file diff --git a/icrp/roles/ecs-agent/handlers/main.yml b/icrp/roles/ecs-agent/handlers/main.yml new file mode 100644 index 000000000..502c66361 --- /dev/null +++ b/icrp/roles/ecs-agent/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for ecs-agent \ No newline at end of file diff --git a/icrp/roles/ecs-agent/meta/main.yml b/icrp/roles/ecs-agent/meta/main.yml new file mode 100644 index 000000000..3a212a936 --- /dev/null +++ b/icrp/roles/ecs-agent/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.4 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/icrp/roles/ecs-agent/tasks/main.yml b/icrp/roles/ecs-agent/tasks/main.yml new file mode 100644 index 000000000..585202f56 --- /dev/null +++ b/icrp/roles/ecs-agent/tasks/main.yml @@ -0,0 +1,93 @@ +--- +# tasks file for ecs-agent +- name: gather instance facts + ec2_instance_facts: + region: us-east-1 + filters: + "tag:Name": "bento-{{env}}-frontend" + "instance-state-name": running + "tag:Environment": "{{env}}" + register: frontend + +- name: set instance name + set_fact: + frontend_ip: "{{ frontend.instances[0].network_interfaces[0].private_ip_address }}" + +#set local routing +- name: set localhost routing + sysctl: + name: net.ipv4.conf.all.route_localnet + value: '1' + sysctl_set: yes + state: present + reload: yes + +- name: install iptables + yum: + name: + - iptables-services + state: present + +- name: start iptables service + service: + name: iptables + state: started + enabled: yes + +- name: configure ecs-agent routing + iptables: + table: nat + chain: PREROUTING + protocol: tcp + destination: 169.254.170.2 + destination_port: '80' + jump: DNAT + to_destination: 127.0.0.1:51679 + comment: configure nat + +- name: configure ecs-agent redirect + iptables: + table: nat + chain: OUTPUT + protocol: tcp + match: tcp + destination: 169.254.170.2 + destination_port: '80' + jump: REDIRECT + to_ports: '51679' + comment: Redirect web traffic to port 51679 + +- name: save iptables + command: service iptables save + args: + warn: false + +- name: reload iptables + command: service iptables reload + args: + warn: false + +- name: create ecs directory + file: + path: "{{item}}" + state: directory + loop: + - "/etc/ecs" + - "/var/log/ecs" + - "/var/lib/ecs/data" + +- name: copy file ecs.config to /etc/ecs/ecs.config + template: + src: ecs.config.j2 + dest: /etc/ecs/ecs.config + +- name: copy docker service to systemd directory + copy: + src: docker-container@ecs-agent.service + dest: /etc/systemd/system/docker-container@ecs-agent.service + +- name: enable and start docker-container@ecs-agent.service + service: + name: docker-container@ecs-agent.service + state: started + enabled: yes \ No newline at end of file diff --git a/icrp/roles/ecs-agent/templates/ecs.config.j2 b/icrp/roles/ecs-agent/templates/ecs.config.j2 new file mode 100644 index 000000000..d14ec15d1 --- /dev/null +++ b/icrp/roles/ecs-agent/templates/ecs.config.j2 @@ -0,0 +1,12 @@ +ECS_DATADIR=/data +ECS_ENABLE_TASK_IAM_ROLE=true +ECS_ENABLE_TASK_IAM_ROLE_NETWORK_HOST=true +ECS_LOGFILE=/log/ecs-agent.log +ECS_AVAILABLE_LOGGING_DRIVERS=["json-file","awslogs","syslog"] +ECS_LOGLEVEL=info +ECS_CLUSTER={{ecs_cluster_name}} +{% if ansible_default_ipv4.address == frontend_ip %} +ECS_INSTANCE_ATTRIBUTES={"role": "frontend"} +{% else %} +ECS_INSTANCE_ATTRIBUTES={"role": "backend"} +{% endif %} diff --git a/icrp/roles/ecs-agent/tests/inventory b/icrp/roles/ecs-agent/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/icrp/roles/ecs-agent/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/icrp/roles/ecs-agent/tests/test.yml b/icrp/roles/ecs-agent/tests/test.yml new file mode 100644 index 000000000..bd797d6fb --- /dev/null +++ b/icrp/roles/ecs-agent/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - ecs-agent \ No newline at end of file diff --git a/icrp/roles/ecs-agent/vars/main.yml b/icrp/roles/ecs-agent/vars/main.yml new file mode 100644 index 000000000..c73e942d3 --- /dev/null +++ b/icrp/roles/ecs-agent/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for ecs-agent \ No newline at end of file diff --git a/icrp/roles/jenkins/README.md b/icrp/roles/jenkins/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/icrp/roles/jenkins/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/icrp/roles/jenkins/defaults/main.yml b/icrp/roles/jenkins/defaults/main.yml new file mode 100644 index 000000000..e848bd6a2 --- /dev/null +++ b/icrp/roles/jenkins/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for jenkins \ No newline at end of file diff --git a/icrp/roles/jenkins/files/jenkins.yaml b/icrp/roles/jenkins/files/jenkins.yaml new file mode 100644 index 000000000..5cdc2e3c8 --- /dev/null +++ b/icrp/roles/jenkins/files/jenkins.yaml @@ -0,0 +1,123 @@ +jenkins: + systemMessage: "This jenkins instance is for Bento program" + agentProtocols: + - "JNLP4-connect" + securityRealm: + local: + allowsSignup: false + users: + - id: jenkinsAdmin + password: ${jenkinsAdminPassword} + - id: bento-user + password: ${jenkinUserPassword} + authorizationStrategy: + roleBased: + roles: + global: + - name: "admin" + description: "Jenkins administrators" + permissions: + - "Overall/Administer" + assignments: + - "jenkinsAdmin" + + - name: "readonly" + description: "Read-only users" + permissions: + - "Overall/Read" + - "Job/Read" + assignments: + - "authenticated" + + items: + - name: "Bento" + description: "Jobs in Folder A, but not the folder itself" + pattern: "Bento/.*" + permissions: + - "Job/Configure" + - "Job/Build" + - "Job/Delete" + assignments: + - "bento-user" + + # globalMatrix: + # grantedPermissions: + # - "Overall/Read:authenticated" + # - "Job/Read:authenticated" + # - "View/Read:authenticated" + # - "Overall/Administer:authenticated" + crumbIssuer: "standard" + clouds: + - docker: + name: "docker" + dockerApi: + dockerHost: + uri: "tcp://${DOCKER_AGENT_IP}:2375" + templates: + - labelString: "cicd_microservice" + dockerTemplateBase: + image: "cbiitssrepo/cicd-microservice" + volumes: + - /var/run/docker.sock:/var/run/docker.sock + remoteFs: "/home/jenkins" + connector: + attach: + user: "root" + instanceCapStr: "10" + + nodes: + - permanent: + name: "build-agent" + remoteFS: "./jenkins-agent-dir" + labelString: "build-agent" + launcher: + ssh: + credentialsId: server_ssh_key + host: ${DOCKER_AGENT_IP} + SshHostKeyVerificationStrategy: nonVerifyingKeyVerificationStrategy + +jobs: + - script: > + folder('Bento') + - script: > + pipelineJob('Bento/DeployBento') { + def repo = 'https://github.com/CBIIT/icdc-devops.git' + description("Bento pipeline Job") + definition { + cpsScm { + scm { + git { + remote { url(repo) } + branch('master') + scriptPath('jenkins/jobs/bento/Jenkinsfile') + extensions {} + } + } + } + } + } +tool: + jdk: + installations: + - name: jdk11 + home: /usr/lib/jvm/jre-11-openjdk + - name: Default + home: /usr/lib/jvm/jre-11-openjdk + maven: + installations: + - name: maven-3.6.1 + home: /usr/local/maven + - name: Default + home: /usr/local/maven +credentials: + system: + domainCredentials: + - credentials: + - basicSSHUserPrivateKey: + scope: GLOBAL + id: server_ssh_key + username: bento + description: "ssh key for connecting to server as bento" + privateKeySource: + directEntry: + privateKey: ${server_sshkey} diff --git a/icrp/roles/jenkins/handlers/main.yml b/icrp/roles/jenkins/handlers/main.yml new file mode 100644 index 000000000..c71405373 --- /dev/null +++ b/icrp/roles/jenkins/handlers/main.yml @@ -0,0 +1,6 @@ +--- +# handlers file for jenkins +- name: restart docker + service: + name: docker + state: restarted \ No newline at end of file diff --git a/icrp/roles/jenkins/meta/main.yml b/icrp/roles/jenkins/meta/main.yml new file mode 100644 index 000000000..5d50bf41b --- /dev/null +++ b/icrp/roles/jenkins/meta/main.yml @@ -0,0 +1,60 @@ +galaxy_info: + author: your name + description: your description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Some suggested licenses: + # - BSD (default) + # - MIT + # - GPLv2 + # - GPLv3 + # - Apache + # - CC-BY + license: license (GPLv2, CC-BY, etc) + + min_ansible_version: 2.4 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # Optionally specify the branch Galaxy will use when accessing the GitHub + # repo for this role. During role install, if no tags are available, + # Galaxy will use this branch. During import Galaxy will access files on + # this branch. If Travis integration is configured, only notifications for this + # branch will be accepted. Otherwise, in all cases, the repo's default branch + # (usually master) will be used. + #github_branch: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. \ No newline at end of file diff --git a/icrp/roles/jenkins/tasks/main.yml b/icrp/roles/jenkins/tasks/main.yml new file mode 100644 index 000000000..87cd93482 --- /dev/null +++ b/icrp/roles/jenkins/tasks/main.yml @@ -0,0 +1,86 @@ +--- +# tasks file for jenkins +- name: open http and https services + firewalld: + service: "{{item}}" + zone: public + immediate: yes + permanent: yes + state: enabled + loop: + - http + - https + +- name: set fact + set_fact: + docker_agent_ip: "{{ansible_default_ipv4.address}}" + +- name: install openjdk11 for local slave + yum: + name: java-11-openjdk + state: latest + +- name: create file structure for the jenkins + file: + path: "{{ item }}" + state: directory + group: bento + owner: bento + loop: + - "{{ jenkins_home }}/jk_secrets" + - "{{ docker_home }}" + +- name: copy docker files + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + group: bento + owner: bento + loop: + - {src: 'docker-compose.yml.j2',dest: '{{ docker_home }}/docker-compose.yml'} + - {src: 'dockerfile_jenkins.j2',dest: '{{docker_home}}/dockerfile_jenkins'} + - {src: 'plugins.txt.j2',dest: '{{docker_home}}/plugins.txt'} + - {src: 'jenkins.env.j2',dest: '{{docker_home}}/jenkins.env'} + +- name: copy conf files + copy: + src: jenkins.yaml + dest: "{{jenkins_home}}/jenkins.yaml" + owner: bento + group: bento + +- name: copy server_sshkey files + copy: + content: "{{server_sshkey_file}}" + dest: "{{jenkins_home}}/jk_secrets/server_sshkey" + +- name: add secret files + copy: + content: "{{docker_agent_ip}}" + dest: "{{jenkins_home}}/docker_agent_ip" + +- name: add secrets files + copy: + content: "{{ item.src }}" + dest: "{{jenkins_home}}/jk_secrets/{{ item.dest }}" + group: bento + owner: bento + loop: + - {src: "{{docker_agent_ip}}",dest: "docker_agent_ip"} + - {src: "{{jenkins_admin_password}}",dest: "jenkinsAdminPassword"} + - {src: "{{jenkins_user_password}}",dest: "jenkinsUserPassword"} + + +- name: build the docker image + docker_image: + path: "{{ docker_home }}" + name: bento/jenkins + dockerfile: "{{ dockerfile }}" + state: present + +- name: start the jenkins + command: docker-compose up -d + args: + chdir: "{{ docker_home }}" + + diff --git a/icrp/roles/jenkins/templates/docker-compose.yml.j2 b/icrp/roles/jenkins/templates/docker-compose.yml.j2 new file mode 100644 index 000000000..ba9b18bda --- /dev/null +++ b/icrp/roles/jenkins/templates/docker-compose.yml.j2 @@ -0,0 +1,15 @@ +version: '3.1' +services: + jenkins: + image: bento/jenkins + ports: + - 80:8080 + - 5001:5000 + volumes: + - {{jenkins_home}}:/var/jenkins_home + env_file: + - ./jenkins.env + environment: + - CASC_JENKINS_CONFIG=/var/jenkins_home/jenkins.yaml + - SECRETS=/var/jenkins_home/jk_secrets/ + restart: always diff --git a/icrp/roles/jenkins/templates/dockerfile_jenkins.j2 b/icrp/roles/jenkins/templates/dockerfile_jenkins.j2 new file mode 100644 index 000000000..d3d5c9bd0 --- /dev/null +++ b/icrp/roles/jenkins/templates/dockerfile_jenkins.j2 @@ -0,0 +1,7 @@ +FROM jenkins/jenkins + +ARG JAVA_OPTS +ENV JAVA_OPTS="-Djenkins.install.runSetupWizard=false ${JAVA_OPTS:-}" + +COPY plugins.txt /usr/share/jenkins/ref/plugins.txt +RUN xargs /usr/local/bin/install-plugins.sh < /usr/share/jenkins/ref/plugins.txt diff --git a/icrp/roles/jenkins/templates/jenkins.env.j2 b/icrp/roles/jenkins/templates/jenkins.env.j2 new file mode 100644 index 000000000..6fa06b4aa --- /dev/null +++ b/icrp/roles/jenkins/templates/jenkins.env.j2 @@ -0,0 +1,6 @@ +DOCKER_AGENT_IP={{ docker_agent_ip }} +jenkinsAdminPassword={{ jenkins_admin_password }} +jenkinsUserPassword={{ jenkins_user_password }} + + + diff --git a/icrp/roles/jenkins/templates/plugins.txt.j2 b/icrp/roles/jenkins/templates/plugins.txt.j2 new file mode 100644 index 000000000..4477fe760 --- /dev/null +++ b/icrp/roles/jenkins/templates/plugins.txt.j2 @@ -0,0 +1,151 @@ +ace-editor +analysis-core +analysis-model-api +ansible +ansicolor +antisamy-markup-formatter +apache-httpcomponents-client-4-api +authentication-tokens +blueocean +blueocean-autofavorite +blueocean-bitbucket-pipeline +blueocean-commons +blueocean-config +blueocean-core-js +blueocean-dashboard +blueocean-display-url +blueocean-events +blueocean-git-pipeline +blueocean-github-pipeline +blueocean-i18n +blueocean-jira +blueocean-jwt +blueocean-personalization +blueocean-pipeline-api-impl +blueocean-pipeline-editor +blueocean-pipeline-scm-api +blueocean-rest +blueocean-rest-impl +blueocean-web +bouncycastle-api +branch-api +build-environment +build-timeout +build-with-parameters +cloudbees-bitbucket-branch-source +cloudbees-folder +command-launcher +conditional-buildstep +configuration-as-code +configuration-as-code-groovy +copyartifact +credentials +credentials-binding +deploy +description-setter +display-url-api +docker-commons +docker-java-api +docker-plugin +docker-workflow +durable-task +email-ext +envinject +envinject-api +extended-choice-parameter +extensible-choice-parameter +favorite +filesystem-list-parameter-plugin +git +git-client +git-parameter +git-server +github +github-api +github-branch-source +github-oauth +github-organization-folder +gradle +handlebars +handy-uri-templates-2-api +htmlpublisher +jackson2-api +javadoc +jdk-tool +jenkins-design-language +job-dsl +jquery +jquery-detached +jquery-ui +jsch +junit +ldap +list-git-branches-parameter +lockable-resources +mailer +mapdb-api +matrix-auth +matrix-combinations-parameter +matrix-project +maven-plugin +mercurial +momentjs +multiple-scms +pam-auth +Parameterized-Remote-Trigger +parameterized-scheduler +parameterized-trigger +pipeline-build-step +pipeline-github-lib +pipeline-graph-analysis +pipeline-input-step +pipeline-milestone-step +pipeline-model-api +pipeline-model-declarative-agent +pipeline-model-definition +pipeline-model-extensions +pipeline-rest-api +pipeline-stage-step +pipeline-stage-tags-metadata +pipeline-stage-view +plain-credentials +preSCMbuildstep +pretested-integration +pubsub-light +purge-job-history +rbenv +rebuild +resource-disposer +role-strategy +ruby-runtime +run-condition +scm-api +script-security +seed +slack +sse-gateway +ssh-credentials +ssh-slaves +structs +subversion +text-finder +timestamper +token-macro +trilead-api +uno-choice +variant +warnings +warnings-ng +windows-slaves +workflow-aggregator +workflow-api +workflow-basic-steps +workflow-cps +workflow-cps-global-lib +workflow-durable-task-step +workflow-job +workflow-multibranch +workflow-scm-step +workflow-step-api +workflow-support +ws-cleanup \ No newline at end of file diff --git a/icrp/roles/jenkins/tests/inventory b/icrp/roles/jenkins/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/icrp/roles/jenkins/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/icrp/roles/jenkins/tests/test.yml b/icrp/roles/jenkins/tests/test.yml new file mode 100644 index 000000000..846c4ff17 --- /dev/null +++ b/icrp/roles/jenkins/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - jenkins \ No newline at end of file diff --git a/icrp/roles/jenkins/vars/main.yml b/icrp/roles/jenkins/vars/main.yml new file mode 100644 index 000000000..6f22ea20a --- /dev/null +++ b/icrp/roles/jenkins/vars/main.yml @@ -0,0 +1,10 @@ +--- +# vars file for jenkins +docker_home: "/local/content/docker" +jenkins_home: "/local/content/jenkins" +jenkins_yaml: "./config/icdc-jenkins.yaml" +server_sshkey_file: "{{ lookup('aws_ssm', 'server_sshkey_file', region='us-east-1' ) }}" +jenkins_admin_password: "{{ lookup('aws_ssm', 'jenkins_admin_password', region='us-east-1' ) }}" +dockerfile: "{{ docker_home }}/dockerfile_jenkins" +docker_compose_file: "{{ docker_home }}/docker-compose.yml" +jenkins_user_password: "{{ lookup('aws_ssm', 'jenkins_user_password', region='us-east-1' ) }}" diff --git a/icrp/roles/neo4j/README.md b/icrp/roles/neo4j/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/icrp/roles/neo4j/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/icrp/roles/neo4j/defaults/main.yml b/icrp/roles/neo4j/defaults/main.yml new file mode 100644 index 000000000..da4e33db3 --- /dev/null +++ b/icrp/roles/neo4j/defaults/main.yml @@ -0,0 +1,5 @@ +--- +# defaults file for neo4j +newrelic: yes +neo4j_home: /var/lib/neo4j +graphql_version: 3.5.0.4 diff --git a/icrp/roles/neo4j/handlers/main.yml b/icrp/roles/neo4j/handlers/main.yml new file mode 100644 index 000000000..dc497b8e9 --- /dev/null +++ b/icrp/roles/neo4j/handlers/main.yml @@ -0,0 +1,6 @@ +--- +# handlers file for neo4j +- name: restart neo4j + service: + name: neo4j + state: restarted \ No newline at end of file diff --git a/icrp/roles/neo4j/meta/main.yml b/icrp/roles/neo4j/meta/main.yml new file mode 100644 index 000000000..5d50bf41b --- /dev/null +++ b/icrp/roles/neo4j/meta/main.yml @@ -0,0 +1,60 @@ +galaxy_info: + author: your name + description: your description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Some suggested licenses: + # - BSD (default) + # - MIT + # - GPLv2 + # - GPLv3 + # - Apache + # - CC-BY + license: license (GPLv2, CC-BY, etc) + + min_ansible_version: 2.4 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # Optionally specify the branch Galaxy will use when accessing the GitHub + # repo for this role. During role install, if no tags are available, + # Galaxy will use this branch. During import Galaxy will access files on + # this branch. If Travis integration is configured, only notifications for this + # branch will be accepted. Otherwise, in all cases, the repo's default branch + # (usually master) will be used. + #github_branch: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. \ No newline at end of file diff --git a/icrp/roles/neo4j/tasks/main.yml b/icrp/roles/neo4j/tasks/main.yml new file mode 100644 index 000000000..c8ff440c0 --- /dev/null +++ b/icrp/roles/neo4j/tasks/main.yml @@ -0,0 +1,101 @@ +--- +# tasks file for neo4j + +- name: install systems packages + yum: + name: + - firewalld + - epel-release + - unzip + state: latest + disable_gpg_check: yes + +- name: enable and start firewalld + service: + name: firewalld + state: started + enabled: yes + +- name: import neo4j gpg key + rpm_key: + state: present + key: https://debian.neo4j.org/neotechnology.gpg.key + +- name: add neo4j repository + yum_repository: + name: neo4j + description: neo4j repository + file: neo4j + baseurl: https://yum.neo4j.org/stable + gpgcheck: yes + enabled: yes + +- name: install wget + yum: + name: + - wget + +- name: install neo4j enterprise + shell: NEO4J_ACCEPT_LICENSE_AGREEMENT=yes yum -y install neo4j-enterprise + +- name: start and enable neo4j service + service: + name: neo4j + state: started + enabled: yes + +- name: open neo4j data ports + firewalld: + port: "{{item}}/tcp" + zone: public + immediate: yes + permanent: yes + state: enabled + loop: + - 7474 + - 7473 + - 7687 + +- name: download neo4j-graphql plugins + get_url: + url: https://github.com/neo4j-graphql/neo4j-graphql/releases/download/{{graphql_version}}/neo4j-graphql-{{graphql_version}}.jar + dest: "{{neo4j_home}}/plugins" + owner: neo4j + group: neo4j + +- name: create directories for ssl create + file: + path: "{{neo4j_home}}/certificates/{{item}}" + state: directory + owner: neo4j + group: neo4j + loop: + - bak + - trusted + - revoked + +- name: Copy self cert to bak + copy: + remote_src: True + src: "{{ item }}" + dest: "{{neo4j_home}}/certificates/bak" + owner: neo4j + group: neo4j + with_fileglob: + - "{{neo4j_home}}/certificates/neo4j*" + +- name: removed the old certs + file: + path: "{{ item }}" + state: absent + with_fileglob: + - "{{neo4j_home}}/certificates/neo4j*" + +- name: copy neo4j.conf to /etc/neo4j/neo4j.conf + template: + src: neo4j.conf.j2 + dest: /etc/neo4j/neo4j.conf + owner: neo4j + group: neo4j + notify: + - restart neo4j \ No newline at end of file diff --git a/icrp/roles/neo4j/templates/neo4j.conf.j2 b/icrp/roles/neo4j/templates/neo4j.conf.j2 new file mode 100644 index 000000000..3c1ed36fc --- /dev/null +++ b/icrp/roles/neo4j/templates/neo4j.conf.j2 @@ -0,0 +1,820 @@ +#***************************************************************** +# Neo4j configuration +# +# For more details and a complete list of settings, please see +# https://neo4j.com/docs/operations-manual/current/reference/configuration-settings/ +#***************************************************************** + +# The name of the database to mount. Note that this is *not* to be confused with +# the causal_clustering.database setting, used to specify a logical database +# name when creating a multi-clustering deployment. +#dbms.active_database=graph.db + +# Paths of directories in the installation. +dbms.directories.data=/var/lib/neo4j/data +dbms.directories.plugins=/var/lib/neo4j/plugins +dbms.directories.certificates=/var/lib/neo4j/certificates +dbms.directories.logs=/var/log/neo4j +dbms.directories.lib=/usr/share/neo4j/lib +dbms.directories.run=/var/run/neo4j +dbms.directories.metrics=/var/lib/neo4j/metrics + +# This setting constrains all `LOAD CSV` import files to be under the `import` directory. Remove or comment it out to +# allow files to be loaded from anywhere in the filesystem; this introduces possible security problems. See the +# `LOAD CSV` section of the manual for details. +dbms.directories.import=/var/lib/neo4j/import + +# Whether requests to Neo4j are authenticated. +# To disable authentication, uncomment this line +#dbms.security.auth_enabled=false + +# Enable this to be able to upgrade a store from an older version. +dbms.allow_upgrade=true + +# Java Heap Size: by default the Java heap size is dynamically +# calculated based on available system resources. +# Uncomment these lines to set specific initial and maximum +# heap size. +#dbms.memory.heap.initial_size=512m +#dbms.memory.heap.max_size=512m + +# The amount of memory to use for mapping the store files, in bytes (or +# kilobytes with the 'k' suffix, megabytes with 'm' and gigabytes with 'g'). +# If Neo4j is running on a dedicated server, then it is generally recommended +# to leave about 2-4 gigabytes for the operating system, give the JVM enough +# heap to hold all your transaction state and query context, and then leave the +# rest for the page cache. +# The default page cache memory assumes the machine is dedicated to running +# Neo4j, and is heuristically set to 50% of RAM minus the max Java heap size. +#dbms.memory.pagecache.size=10g + +# Enable online backups to be taken from this database. +dbms.backup.enabled=true + +# By default the backup service will only listen on localhost. +# To enable remote backups you will have to bind to an external +# network interface (e.g. 0.0.0.0 for all interfaces). +# The protocol running varies depending on deployment. In a Causal Clustering environment this is the +# same protocol that runs on causal_clustering.transaction_listen_address. +dbms.backup.address=0.0.0.0:6362 + +# Enable encryption on the backup service for CC instances (does not work for single-instance or HA clusters) +#dbms.backup.ssl_policy=backup + +#***************************************************************** +# Network connector configuration +#***************************************************************** + +# With default configuration Neo4j only accepts local connections. +# To accept non-local connections, uncomment this line: +dbms.connectors.default_listen_address=0.0.0.0 + +# You can also choose a specific network interface, and configure a non-default +# port for each connector, by setting their individual listen_address. + +# The address at which this server can be reached by its clients. This may be the server's IP address or DNS name, or +# it may be the address of a reverse proxy which sits in front of the server. This setting may be overridden for +# individual connectors below. +#dbms.connectors.default_advertised_address=localhost + +# You can also choose a specific advertised hostname or IP address, and +# configure an advertised port for each connector, by setting their +# individual advertised_address. + +# Bolt connector +dbms.connector.bolt.enabled=true +#dbms.connector.bolt.tls_level=OPTIONAL +#dbms.connector.bolt.listen_address=:7687 + +# HTTP Connector. There can be zero or one HTTP connectors. +dbms.connector.http.enabled=true +#dbms.connector.http.listen_address=:7474 + +# HTTPS Connector. There can be zero or one HTTPS connectors. +dbms.connector.https.enabled=true +#dbms.connector.https.listen_address=:7473 + +# Number of Neo4j worker threads. +#dbms.threads.worker_count= + +#***************************************************************** +# SSL system configuration +#***************************************************************** + +# Names of the SSL policies to be used for the respective components. + +# The legacy policy is a special policy which is not defined in +# the policy configuration section, but rather derives from +# dbms.directories.certificates and associated files +# (by default: neo4j.key and neo4j.cert). Its use will be deprecated. + +# The policies to be used for connectors. +# +# N.B: Note that a connector must be configured to support/require +# SSL/TLS for the policy to actually be utilized. +# +# see: dbms.connector.*.tls_level + +#bolt.ssl_policy=legacy +#https.ssl_policy=legacy + +# For a causal cluster the configuring of a policy mandates its use. + +#causal_clustering.ssl_policy= + +#***************************************************************** +# SSL policy configuration +#***************************************************************** + +# Each policy is configured under a separate namespace, e.g. +# dbms.ssl.policy..* +# +# The example settings below are for a new policy named 'default'. + +# The base directory for cryptographic objects. Each policy will by +# default look for its associated objects (keys, certificates, ...) +# under the base directory. +# +# Every such setting can be overridden using a full path to +# the respective object, but every policy will by default look +# for cryptographic objects in its base location. +# +# Mandatory setting + +#dbms.ssl.policy.default.base_directory=certificates/default + +# Allows the generation of a fresh private key and a self-signed +# certificate if none are found in the expected locations. It is +# recommended to turn this off again after keys have been generated. +# +# Keys should in general be generated and distributed offline +# by a trusted certificate authority (CA) and not by utilizing +# this mode. + +#dbms.ssl.policy.default.allow_key_generation=false + +# Enabling this makes it so that this policy ignores the contents +# of the trusted_dir and simply resorts to trusting everything. +# +# Use of this mode is discouraged. It would offer encryption but no security. + +#dbms.ssl.policy.default.trust_all=false + +# The private key for the default SSL policy. By default a file +# named private.key is expected under the base directory of the policy. +# It is mandatory that a key can be found or generated. + +#dbms.ssl.policy.default.private_key= + +# The private key for the default SSL policy. By default a file +# named public.crt is expected under the base directory of the policy. +# It is mandatory that a certificate can be found or generated. + +#dbms.ssl.policy.default.public_certificate= + +# The certificates of trusted parties. By default a directory named +# 'trusted' is expected under the base directory of the policy. It is +# mandatory to create the directory so that it exists, because it cannot +# be auto-created (for security purposes). +# +# To enforce client authentication client_auth must be set to 'require'! + +#dbms.ssl.policy.default.trusted_dir= + +# Certificate Revocation Lists (CRLs). By default a directory named +# 'revoked' is expected under the base directory of the policy. It is +# mandatory to create the directory so that it exists, because it cannot +# be auto-created (for security purposes). + +#dbms.ssl.policy.default.revoked_dir= + +# Client authentication setting. Values: none, optional, require +# The default is to require client authentication. +# +# Servers are always authenticated unless explicitly overridden +# using the trust_all setting. In a mutual authentication setup this +# should be kept at the default of require and trusted certificates +# must be installed in the trusted_dir. + +#dbms.ssl.policy.default.client_auth=require + +# It is possible to verify the hostname that the client uses +# to connect to the remote server. In order for this to work, the server public +# certificate must have a valid CN and/or matching Subject Alternative Names. + +# Note that this is irrelevant on host side connections (sockets receiving +# connections). + +# To enable hostname verification client side on nodes, set this to true. + +#dbms.ssl.policy.default.verify_hostname=false + +# A comma-separated list of allowed TLS versions. +# By default only TLSv1.2 is allowed. + +#dbms.ssl.policy.default.tls_versions= + +# A comma-separated list of allowed ciphers. +# The default ciphers are the defaults of the JVM platform. + +#dbms.ssl.policy.default.ciphers= + +#***************************************************************** +# Logging configuration +#***************************************************************** + +# To enable HTTP logging, uncomment this line +#dbms.logs.http.enabled=true + +# Number of HTTP logs to keep. +#dbms.logs.http.rotation.keep_number=5 + +# Size of each HTTP log that is kept. +#dbms.logs.http.rotation.size=20m + +# To enable GC Logging, uncomment this line +#dbms.logs.gc.enabled=true + +# GC Logging Options +# see http://docs.oracle.com/cd/E19957-01/819-0084-10/pt_tuningjava.html#wp57013 for more information. +#dbms.logs.gc.options=-XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCApplicationStoppedTime -XX:+PrintPromotionFailure -XX:+PrintTenuringDistribution + +# For Java 9 and newer GC Logging Options +# see https://docs.oracle.com/javase/10/tools/java.htm#JSWOR-GUID-BE93ABDC-999C-4CB5-A88B-1994AAAC74D5 +#dbms.logs.gc.options=-Xlog:gc*,safepoint,age*=trace + +# Number of GC logs to keep. +#dbms.logs.gc.rotation.keep_number=5 + +# Size of each GC log that is kept. +#dbms.logs.gc.rotation.size=20m + +# Log level for the debug log. One of DEBUG, INFO, WARN and ERROR. Be aware that logging at DEBUG level can be very verbose. +#dbms.logs.debug.level=INFO + +# Size threshold for rotation of the debug log. If set to zero then no rotation will occur. Accepts a binary suffix "k", +# "m" or "g". +#dbms.logs.debug.rotation.size=20m + +# Maximum number of history files for the internal log. +#dbms.logs.debug.rotation.keep_number=7 + +# Log executed queries that takes longer than the configured threshold. Enable by uncommenting this line. +#dbms.logs.query.enabled=true + +# If the execution of query takes more time than this threshold, the query is logged. If set to zero then all queries +# are logged. +#dbms.logs.query.threshold=0 + +# The file size in bytes at which the query log will auto-rotate. If set to zero then no rotation will occur. Accepts a +# binary suffix "k", "m" or "g". +#dbms.logs.query.rotation.size=20m + +# Maximum number of history files for the query log. +#dbms.logs.query.rotation.keep_number=7 + +# Include parameters for the executed queries being logged (this is enabled by default). +#dbms.logs.query.parameter_logging_enabled=true + +# Uncomment this line to include detailed time information for the executed queries being logged: +#dbms.logs.query.time_logging_enabled=true + +# Uncomment this line to include bytes allocated by the executed queries being logged: +#dbms.logs.query.allocation_logging_enabled=true + +# Uncomment this line to include page hits and page faults information for the executed queries being logged: +#dbms.logs.query.page_logging_enabled=true + +# The security log is always enabled when `dbms.security.auth_enabled=true`, and resides in `logs/security.log`. + +# Log level for the security log. One of DEBUG, INFO, WARN and ERROR. +#dbms.logs.security.level=INFO + +# Threshold for rotation of the security log. +#dbms.logs.security.rotation.size=20m + +# Minimum time interval after last rotation of the security log before it may be rotated again. +#dbms.logs.security.rotation.delay=300s + +# Maximum number of history files for the security log. +#dbms.logs.security.rotation.keep_number=7 + +#***************************************************************** +# Causal Clustering Configuration +#***************************************************************** + +# Uncomment and specify these lines for running Neo4j in Causal Clustering mode. +# See the Causal Clustering documentation at https://neo4j.com/docs/ for details. + +# Database mode +# Allowed values: +# CORE - Core member of the cluster, part of the consensus quorum. +# READ_REPLICA - Read replica in the cluster, an eventually-consistent read-only instance of the database. +# To operate this Neo4j instance in Causal Clustering mode as a core member, uncomment this line: +#dbms.mode=CORE + +# Expected number of Core servers in the cluster at formation +#causal_clustering.minimum_core_cluster_size_at_formation=3 + +# Minimum expected number of Core servers in the cluster at runtime. +#causal_clustering.minimum_core_cluster_size_at_runtime=3 + +# A comma-separated list of the address and port for which to reach all other members of the cluster. It must be in the +# host:port format. For each machine in the cluster, the address will usually be the public ip address of that machine. +# The port will be the value used in the setting "causal_clustering.discovery_listen_address". +#causal_clustering.initial_discovery_members=localhost:5000,localhost:5001,localhost:5002 + +# Host and port to bind the cluster member discovery management communication. +# This is the setting to add to the collection of address in causal_clustering.initial_core_cluster_members. +# Use 0.0.0.0 to bind to any network interface on the machine. If you want to only use a specific interface +# (such as a private ip address on AWS, for example) then use that ip address instead. +# If you don't know what value to use here, use this machines ip address. +#causal_clustering.discovery_listen_address=:5000 + +# Network interface and port for the transaction shipping server to listen on. +# Please note that it is also possible to run the backup client against this port so always limit access to it via the +# firewall and configure an ssl policy. If you want to allow for messages to be read from +# any network on this machine, us 0.0.0.0. If you want to constrain communication to a specific network address +# (such as a private ip on AWS, for example) then use that ip address instead. +# If you don't know what value to use here, use this machines ip address. +#causal_clustering.transaction_listen_address=:6000 + +# Network interface and port for the RAFT server to listen on. If you want to allow for messages to be read from +# any network on this machine, us 0.0.0.0. If you want to constrain communication to a specific network address +# (such as a private ip on AWS, for example) then use that ip address instead. +# If you don't know what value to use here, use this machines ip address. +#causal_clustering.raft_listen_address=:7000 + +# List a set of names for groups to which this server should belong. This +# is a comma-separated list and names should only use alphanumericals +# and underscore. This can be used to identify groups of servers in the +# configuration for load balancing and replication policies. +# +# The main intention for this is to group servers, but it is possible to specify +# a unique identifier here as well which might be useful for troubleshooting +# or other special purposes. +#causal_clustering.server_groups= + +#***************************************************************** +# Causal Clustering Load Balancing +#***************************************************************** + +# N.B: Read the online documentation for a thorough explanation! + +# Selects the load balancing plugin that shall be enabled. +#causal_clustering.load_balancing.plugin=server_policies + +####### Examples for "server_policies" plugin ####### + +# Will select all available servers as the default policy, which is the +# policy used when the client does not specify a policy preference. The +# default configuration for the default policy is all(). +#causal_clustering.load_balancing.config.server_policies.default=all() + +# Will select servers in groups 'group1' or 'group2' under the default policy. +#causal_clustering.load_balancing.config.server_policies.default=groups(group1,group2) + +# Slightly more advanced example: +# Will select servers in 'group1', 'group2' or 'group3', but only if there are at least 2. +# This policy will be exposed under the name of 'mypolicy'. +#causal_clustering.load_balancing.config.server_policies.mypolicy=groups(group1,group2,group3) -> min(2) + +# Below will create an even more advanced policy named 'regionA' consisting of several rules +# yielding the following behaviour: +# +# select servers in regionA, if at least 2 are available +# otherwise: select servers in regionA and regionB, if at least 2 are available +# otherwise: select all servers +# +# The intention is to create a policy for a particular region which prefers +# a certain set of local servers, but which will fallback to other regions +# or all available servers as required. +# +# N.B: The following configuration uses the line-continuation character \ +# which allows you to construct an easily readable rule set spanning +# several lines. +# +#causal_clustering.load_balancing.config.server_policies.policyA=\ +#groups(regionA) -> min(2);\ +#groups(regionA,regionB) -> min(2); + +# Note that implicitly the last fallback is to always consider all() servers, +# but this can be prevented by specifying a halt() as the last rule. +# +#causal_clustering.load_balancing.config.server_policies.regionA_only=\ +#groups(regionA);\ +#halt(); + +#***************************************************************** +# Causal Clustering Additional Configuration Options +#***************************************************************** +# The following settings are used less frequently. +# If you don't know what these are, you don't need to change these from their default values. + +# The name of the database being hosted by this server instance. This +# configuration setting may be safely ignored unless deploying a multicluster. +# Instances may be allocated to constituent clusters by assigning them +# distinct database names using this setting. For instance if you had 6 +# instances you could form 2 clusters by assigning half the database name +# "foo", half the name "bar". The setting value must match exactly between +# members of the same cluster. This setting is a one-off: once an instance +# is configured with a database name it may not be changed in future without +# using `neo4j-admin unbind`. +#causal_clustering.database=default + +# Address and port that this machine advertises that it's RAFT server is listening at. Should be a +# specific network address. If you are unsure about what value to use here, use this machine's ip address. +#causal_clustering.raft_advertised_address=:7000 + +# Address and port that this machine advertises that it's transaction shipping server is listening at. Should be a +# specific network address. If you are unsure about what value to use here, use this machine's ip address. +#causal_clustering.transaction_advertised_address=:6000 + +# The time limit within which a new leader election will occur if no messages from the current leader are received. +# Larger values allow for more stable leaders at the expense of longer unavailability times in case of leader +# failures. +#causal_clustering.leader_election_timeout=7s + +# The time limit allowed for a new member to attempt to update its data to match the rest of the cluster. +#causal_clustering.join_catch_up_timeout=10m + +# The size of the batch for streaming entries to other machines while trying to catch up another machine. +#causal_clustering.catchup_batch_size=64 + +# When to pause sending entries to other machines and allow them to catch up. +#causal_clustering.log_shipping_max_lag=256 + +# Raft log pruning frequncy. +#causal_clustering.raft_log_pruning_frequency=10m + +# The size to allow the raft log to grow before rotating. +#causal_clustering.raft_log_rotation_size=250M + +### The following setting is relevant for Edge servers only. +# The interval of pulling updates from Core servers. +#causal_clustering.pull_interval=1s + +# For how long should drivers cache the discovery data from +# the dbms.cluster.routing.getServers() procedure. Defaults to 300s. +#causal_clustering.cluster_routing_ttl=300s + +#***************************************************************** +# HA configuration +#***************************************************************** + +# Uncomment and specify these lines for running Neo4j in High Availability mode. +# See the High Availability documentation at https://neo4j.com/docs/ for details. + +# Database mode +# Allowed values: +# HA - High Availability +# SINGLE - Single mode, default. +# To run in High Availability mode uncomment this line: +#dbms.mode=HA + +# ha.server_id is the number of each instance in the HA cluster. It should be +# an integer (e.g. 1), and should be unique for each cluster instance. +#ha.server_id= + +# ha.initial_hosts is a comma-separated list (without spaces) of the host:port +# where the ha.host.coordination of all instances will be listening. Typically +# this will be the same for all cluster instances. +#ha.initial_hosts=127.0.0.1:5001,127.0.0.1:5002,127.0.0.1:5003 + +# IP and port for this instance to listen on, for communicating cluster status +# information with other instances (also see ha.initial_hosts). The IP +# must be the configured IP address for one of the local interfaces. +#ha.host.coordination=127.0.0.1:5001 + +# IP and port for this instance to listen on, for communicating transaction +# data with other instances (also see ha.initial_hosts). The IP +# must be the configured IP address for one of the local interfaces. +#ha.host.data=127.0.0.1:6001 + +# The interval, in seconds, at which slaves will pull updates from the master. You must comment out +# the option to disable periodic pulling of updates. +#ha.pull_interval=10 + +# Amount of slaves the master will try to push a transaction to upon commit +# (default is 1). The master will optimistically continue and not fail the +# transaction even if it fails to reach the push factor. Setting this to 0 will +# increase write performance when writing through master but could potentially +# lead to branched data (or loss of transaction) if the master goes down. +#ha.tx_push_factor=1 + +# Strategy the master will use when pushing data to slaves (if the push factor +# is greater than 0). There are three options available "fixed_ascending" (default), +# "fixed_descending" or "round_robin". Fixed strategies will start by pushing to +# slaves ordered by server id (accordingly with qualifier) and are useful when +# planning for a stable fail-over based on ids. +#ha.tx_push_strategy=fixed_ascending + +# Policy for how to handle branched data. +#ha.branched_data_policy=keep_all + +# How often heartbeat messages should be sent. Defaults to ha.default_timeout. +#ha.heartbeat_interval=5s + +# How long to wait for heartbeats from other instances before marking them as suspects for failure. +# This value reflects considerations of network latency, expected duration of garbage collection pauses +# and other factors that can delay message sending and processing. Larger values will result in more +# stable masters but also will result in longer waits before a failover in case of master failure. +# This value should not be set to less than twice the ha.heartbeat_interval value otherwise there is a high +# risk of frequent master switches and possibly branched data occurrence. +#ha.heartbeat_timeout=40s + +# If you are using a load-balancer that doesn't support HTTP Auth, you may need to turn off authentication for the +# HA HTTP status endpoint by uncommenting the following line. +#dbms.security.ha_status_auth_enabled=false + +# Whether this instance should only participate as slave in cluster. If set to +# true, it will never be elected as master. +#ha.slave_only=false + +#******************************************************************** +# Security Configuration +#******************************************************************** + +# The authentication and authorization provider that contains both users and roles. +# This can be one of the built-in `native` or `ldap` auth providers, +# or it can be an externally provided plugin, with a custom name prefixed by `plugin`, +# i.e. `plugin-`. +#dbms.security.auth_provider=native + +# The time to live (TTL) for cached authentication and authorization info when using +# external auth providers (LDAP or plugin). Setting the TTL to 0 will +# disable auth caching. +#dbms.security.auth_cache_ttl=10m + +# The maximum capacity for authentication and authorization caches (respectively). +#dbms.security.auth_cache_max_capacity=10000 + +# Set to log successful authentication events to the security log. +# If this is set to `false` only failed authentication events will be logged, which +# could be useful if you find that the successful events spam the logs too much, +# and you do not require full auditing capability. +#dbms.security.log_successful_authentication=true + +#================================================ +# LDAP Auth Provider Configuration +#================================================ + +# URL of LDAP server to use for authentication and authorization. +# The format of the setting is `://:`, where hostname is the only required field. +# The supported values for protocol are `ldap` (default) and `ldaps`. +# The default port for `ldap` is 389 and for `ldaps` 636. +# For example: `ldaps://ldap.example.com:10389`. +# +# NOTE: You may want to consider using STARTTLS (`dbms.security.ldap.use_starttls`) instead of LDAPS +# for secure connections, in which case the correct protocol is `ldap`. +#dbms.security.ldap.host=localhost + +# Use secure communication with the LDAP server using opportunistic TLS. +# First an initial insecure connection will be made with the LDAP server, and then a STARTTLS command +# will be issued to negotiate an upgrade of the connection to TLS before initiating authentication. +#dbms.security.ldap.use_starttls=false + +# The LDAP referral behavior when creating a connection. This is one of `follow`, `ignore` or `throw`. +# `follow` automatically follows any referrals +# `ignore` ignores any referrals +# `throw` throws an exception, which will lead to authentication failure +#dbms.security.ldap.referral=follow + +# The timeout for establishing an LDAP connection. If a connection with the LDAP server cannot be +# established within the given time the attempt is aborted. +# A value of 0 means to use the network protocol's (i.e., TCP's) timeout value. +#dbms.security.ldap.connection_timeout=30s + +# The timeout for an LDAP read request (i.e. search). If the LDAP server does not respond within +# the given time the request will be aborted. A value of 0 means wait for a response indefinitely. +#dbms.security.ldap.read_timeout=30s + +#---------------------------------- +# LDAP Authentication Configuration +#---------------------------------- + +# LDAP authentication mechanism. This is one of `simple` or a SASL mechanism supported by JNDI, +# for example `DIGEST-MD5`. `simple` is basic username +# and password authentication and SASL is used for more advanced mechanisms. See RFC 2251 LDAPv3 +# documentation for more details. +#dbms.security.ldap.authentication.mechanism=simple + +# LDAP user DN template. An LDAP object is referenced by its distinguished name (DN), and a user DN is +# an LDAP fully-qualified unique user identifier. This setting is used to generate an LDAP DN that +# conforms with the LDAP directory's schema from the user principal that is submitted with the +# authentication token when logging in. +# The special token {0} is a placeholder where the user principal will be substituted into the DN string. +#dbms.security.ldap.authentication.user_dn_template=uid={0},ou=users,dc=example,dc=com + +# Determines if the result of authentication via the LDAP server should be cached or not. +# Caching is used to limit the number of LDAP requests that have to be made over the network +# for users that have already been authenticated successfully. A user can be authenticated against +# an existing cache entry (instead of via an LDAP server) as long as it is alive +# (see `dbms.security.auth_cache_ttl`). +# An important consequence of setting this to `true` is that +# Neo4j then needs to cache a hashed version of the credentials in order to perform credentials +# matching. This hashing is done using a cryptographic hash function together with a random salt. +# Preferably a conscious decision should be made if this method is considered acceptable by +# the security standards of the organization in which this Neo4j instance is deployed. +#dbms.security.ldap.authentication.cache_enabled=true + +#---------------------------------- +# LDAP Authorization Configuration +#---------------------------------- +# Authorization is performed by searching the directory for the groups that +# the user is a member of, and then map those groups to Neo4j roles. + +# Perform LDAP search for authorization info using a system account instead of the user's own account. +# +# If this is set to `false` (default), the search for group membership will be performed +# directly after authentication using the LDAP context bound with the user's own account. +# The mapped roles will be cached for the duration of `dbms.security.auth_cache_ttl`, +# and then expire, requiring re-authentication. To avoid frequently having to re-authenticate +# sessions you may want to set a relatively long auth cache expiration time together with this option. +# NOTE: This option will only work if the users are permitted to search for their +# own group membership attributes in the directory. +# +# If this is set to `true`, the search will be performed using a special system account user +# with read access to all the users in the directory. +# You need to specify the username and password using the settings +# `dbms.security.ldap.authorization.system_username` and +# `dbms.security.ldap.authorization.system_password` with this option. +# Note that this account only needs read access to the relevant parts of the LDAP directory +# and does not need to have access rights to Neo4j, or any other systems. +#dbms.security.ldap.authorization.use_system_account=false + +# An LDAP system account username to use for authorization searches when +# `dbms.security.ldap.authorization.use_system_account` is `true`. +# Note that the `dbms.security.ldap.authentication.user_dn_template` will not be applied to this username, +# so you may have to specify a full DN. +#dbms.security.ldap.authorization.system_username= + +# An LDAP system account password to use for authorization searches when +# `dbms.security.ldap.authorization.use_system_account` is `true`. +#dbms.security.ldap.authorization.system_password= + +# The name of the base object or named context to search for user objects when LDAP authorization is enabled. +# A common case is that this matches the last part of `dbms.security.ldap.authentication.user_dn_template`. +#dbms.security.ldap.authorization.user_search_base=ou=users,dc=example,dc=com + +# The LDAP search filter to search for a user principal when LDAP authorization is +# enabled. The filter should contain the placeholder token {0} which will be substituted for the +# user principal. +#dbms.security.ldap.authorization.user_search_filter=(&(objectClass=*)(uid={0})) + +# A list of attribute names on a user object that contains groups to be used for mapping to roles +# when LDAP authorization is enabled. +#dbms.security.ldap.authorization.group_membership_attributes=memberOf + +# An authorization mapping from LDAP group names to Neo4j role names. +# The map should be formatted as a semicolon separated list of key-value pairs, where the +# key is the LDAP group name and the value is a comma separated list of corresponding role names. +# For example: group1=role1;group2=role2;group3=role3,role4,role5 +# +# You could also use whitespaces and quotes around group names to make this mapping more readable, +# for example: dbms.security.ldap.authorization.group_to_role_mapping=\ +# "cn=Neo4j Read Only,cn=users,dc=example,dc=com" = reader; \ +# "cn=Neo4j Read-Write,cn=users,dc=example,dc=com" = publisher; \ +# "cn=Neo4j Schema Manager,cn=users,dc=example,dc=com" = architect; \ +# "cn=Neo4j Administrator,cn=users,dc=example,dc=com" = admin +#dbms.security.ldap.authorization.group_to_role_mapping= + + +#***************************************************************** +# Miscellaneous configuration +#***************************************************************** + +# Enable this to specify a parser other than the default one. +#cypher.default_language_version=2.3 + +# Determines if Cypher will allow using file URLs when loading data using +# `LOAD CSV`. Setting this value to `false` will cause Neo4j to fail `LOAD CSV` +# clauses that load data from the file system. +#dbms.security.allow_csv_import_from_file_urls=true + +# Retention policy for transaction logs needed to perform recovery and backups. +#dbms.tx_log.rotation.retention_policy=7 days + +# Limit the number of IOs the background checkpoint process will consume per second. +# This setting is advisory, is ignored in Neo4j Community Edition, and is followed to +# best effort in Enterprise Edition. +# An IO is in this case a 8 KiB (mostly sequential) write. Limiting the write IO in +# this way will leave more bandwidth in the IO subsystem to service random-read IOs, +# which is important for the response time of queries when the database cannot fit +# entirely in memory. The only drawback of this setting is that longer checkpoint times +# may lead to slightly longer recovery times in case of a database or system crash. +# A lower number means lower IO pressure, and consequently longer checkpoint times. +# The configuration can also be commented out to remove the limitation entirely, and +# let the checkpointer flush data as fast as the hardware will go. +# Set this to -1 to disable the IOPS limit. +# dbms.checkpoint.iops.limit=300 + +# Only allow read operations from this Neo4j instance. This mode still requires +# write access to the directory for lock purposes. +#dbms.read_only=false + +# Comma separated list of JAX-RS packages containing JAX-RS resources, one +# package name for each mountpoint. The listed package names will be loaded +# under the mountpoints specified. Uncomment this line to mount the +# org.neo4j.examples.server.unmanaged.HelloWorldResource.java from +# neo4j-server-examples under /examples/unmanaged, resulting in a final URL of +# http://localhost:7474/examples/unmanaged/helloworld/{nodeId} +#dbms.unmanaged_extension_classes=org.neo4j.examples.server.unmanaged=/examples/unmanaged + +# A comma separated list of procedures and user defined functions that are allowed +# full access to the database through unsupported/insecure internal APIs. +#dbms.security.procedures.unrestricted=my.extensions.example,my.procedures.* + +# A comma separated list of procedures to be loaded by default. +# Leaving this unconfigured will load all procedures found. +#dbms.security.procedures.whitelist=apoc.coll.*,apoc.load.* + +# Specified comma separated list of id types (like node or relationship) that should be reused. +# When some type is specified database will try to reuse corresponding ids as soon as it will be safe to do so. +# Currently only 'node' and 'relationship' types are supported. +# This settings is ignored in Neo4j Community Edition. +#dbms.ids.reuse.types.override=node,relationship + +#******************************************************************** +# JVM Parameters +#******************************************************************** + +# G1GC generally strikes a good balance between throughput and tail +# latency, without too much tuning. +dbms.jvm.additional=-XX:+UseG1GC + +# Have common exceptions keep producing stack traces, so they can be +# debugged regardless of how often logs are rotated. +dbms.jvm.additional=-XX:-OmitStackTraceInFastThrow + +# Make sure that `initmemory` is not only allocated, but committed to +# the process, before starting the database. This reduces memory +# fragmentation, increasing the effectiveness of transparent huge +# pages. It also reduces the possibility of seeing performance drop +# due to heap-growing GC events, where a decrease in available page +# cache leads to an increase in mean IO response time. +# Try reducing the heap memory, if this flag degrades performance. +dbms.jvm.additional=-XX:+AlwaysPreTouch + +# Trust that non-static final fields are really final. +# This allows more optimizations and improves overall performance. +# NOTE: Disable this if you use embedded mode, or have extensions or dependencies that may use reflection or +# serialization to change the value of final fields! +dbms.jvm.additional=-XX:+UnlockExperimentalVMOptions +dbms.jvm.additional=-XX:+TrustFinalNonStaticFields + +# Disable explicit garbage collection, which is occasionally invoked by the JDK itself. +dbms.jvm.additional=-XX:+DisableExplicitGC + +# Remote JMX monitoring, uncomment and adjust the following lines as needed. Absolute paths to jmx.access and +# jmx.password files are required. +# Also make sure to update the jmx.access and jmx.password files with appropriate permission roles and passwords, +# the shipped configuration contains only a read only role called 'monitor' with password 'Neo4j'. +# For more details, see: http://download.oracle.com/javase/8/docs/technotes/guides/management/agent.html +# On Unix based systems the jmx.password file needs to be owned by the user that will run the server, +# and have permissions set to 0600. +# For details on setting these file permissions on Windows see: +# http://docs.oracle.com/javase/8/docs/technotes/guides/management/security-windows.html +#dbms.jvm.additional=-Dcom.sun.management.jmxremote.port=3637 +#dbms.jvm.additional=-Dcom.sun.management.jmxremote.authenticate=true +#dbms.jvm.additional=-Dcom.sun.management.jmxremote.ssl=false +#dbms.jvm.additional=-Dcom.sun.management.jmxremote.password.file=/absolute/path/to/conf/jmx.password +#dbms.jvm.additional=-Dcom.sun.management.jmxremote.access.file=/absolute/path/to/conf/jmx.access + +# Some systems cannot discover host name automatically, and need this line configured: +#dbms.jvm.additional=-Djava.rmi.server.hostname=$THE_NEO4J_SERVER_HOSTNAME + +# Expand Diffie Hellman (DH) key size from default 1024 to 2048 for DH-RSA cipher suites used in server TLS handshakes. +# This is to protect the server from any potential passive eavesdropping. +dbms.jvm.additional=-Djdk.tls.ephemeralDHKeySize=2048 + +# This mitigates a DDoS vector. +dbms.jvm.additional=-Djdk.tls.rejectClientInitiatedRenegotiation=true + +# This filter prevents deserialization of arbitrary objects via java object serialization, addressing potential vulnerabilities. +# By default this filter whitelists all neo4j classes, as well as classes from the hazelcast library and the java standard library. +# These defaults should only be modified by expert users! +# For more details (including filter syntax) see: https://openjdk.java.net/jeps/290 +#dbms.jvm.additional=-Djdk.serialFilter=java.**;org.neo4j.**;com.neo4j.**;com.hazelcast.**;net.sf.ehcache.Element;com.sun.proxy.*;org.openjdk.jmh.**;!* + +#******************************************************************** +# Wrapper Windows NT/2000/XP Service Properties +#******************************************************************** +# WARNING - Do not modify any of these properties when an application +# using this configuration file has been installed as a service. +# Please uninstall the service before modifying this section. The +# service can then be reinstalled. + +# Name of the service +dbms.windows_service_name=neo4j + +#******************************************************************** +# Other Neo4j system properties +#******************************************************************** +dbms.jvm.additional=-Dunsupported.dbms.udc.source=rpm +dbms.unmanaged_extension_classes=org.neo4j.graphql=/graphql +metrics.enabled=true +metrics.neo4j.enabled=true +metrics.neo4j.tx.enabled=true +metrics.neo4j.pagecache.enabled=true + diff --git a/icrp/roles/neo4j/templates/neo4j.conf.j2.bak b/icrp/roles/neo4j/templates/neo4j.conf.j2.bak new file mode 100644 index 000000000..faa235325 --- /dev/null +++ b/icrp/roles/neo4j/templates/neo4j.conf.j2.bak @@ -0,0 +1,816 @@ + +#***************************************************************** +# Neo4j configuration +# +# For more details and a complete list of settings, please see +# https://neo4j.com/docs/operations-manual/current/reference/configuration-settings/ +#***************************************************************** + +# The name of the database to mount. Note that this is *not* to be confused with +# the causal_clustering.database setting, used to specify a logical database +# name when creating a multi-clustering deployment. +#dbms.active_database=graph.db + +# Paths of directories in the installation. +dbms.unmanaged_extension_classes=org.neo4j.graphql=/graphql +dbms.directories.plugins=/var/lib/neo4j/plugins +dbms.directories.certificates=/var/lib/neo4j/certificates +dbms.directories.logs=/var/log/neo4j +dbms.directories.lib=/usr/share/neo4j/lib +dbms.directories.run=/var/run/neo4j +dbms.directories.metrics=/var/lib/neo4j/metrics + +# This setting constrains all `LOAD CSV` import files to be under the `import` directory. Remove or comment it out to +# allow files to be loaded from anywhere in the filesystem; this introduces possible security problems. See the +# `LOAD CSV` section of the manual for details. +dbms.directories.import=/var/lib/neo4j/import + +# Whether requests to Neo4j are authenticated. +# To disable authentication, uncomment this line +#dbms.security.auth_enabled=false + +# Enable this to be able to upgrade a store from an older version. +#dbms.allow_upgrade=true + +# Java Heap Size: by default the Java heap size is dynamically +# calculated based on available system resources. +# Uncomment these lines to set specific initial and maximum +# heap size. +dbms.memory.heap.initial_size={{heap_min_size}} +dbms.memory.heap.max_size={{heap_max_size}} + +# The amount of memory to use for mapping the store files, in bytes (or +# kilobytes with the 'k' suffix, megabytes with 'm' and gigabytes with 'g'). +# If Neo4j is running on a dedicated server, then it is generally recommended +# to leave about 2-4 gigabytes for the operating system, give the JVM enough +# heap to hold all your transaction state and query context, and then leave the +# rest for the page cache. +# The default page cache memory assumes the machine is dedicated to running +# Neo4j, and is heuristically set to 50% of RAM minus the max Java heap size. +#dbms.memory.pagecache.size=10g + +# Enable online backups to be taken from this database. +dbms.backup.enabled=true + +# By default the backup service will only listen on localhost. +# To enable remote backups you will have to bind to an external +# network interface (e.g. 0.0.0.0 for all interfaces). +# The protocol running varies depending on deployment. In a Causal Clustering environment this is the +# same protocol that runs on causal_clustering.transaction_listen_address. +#dbms.backup.address=0.0.0.0:6362 + +# Enable encryption on the backup service for CC instances (does not work for single-instance or HA clusters) +#dbms.backup.ssl_policy=backup + +#***************************************************************** +# Network connector configuration +#***************************************************************** + +# With default configuration Neo4j only accepts local connections. +# To accept non-local connections, uncomment this line: +dbms.connectors.default_listen_address=0.0.0.0 + +# You can also choose a specific network interface, and configure a non-default +# port for each connector, by setting their individual listen_address. + +# The address at which this server can be reached by its clients. This may be the server's IP address or DNS name, or +# it may be the address of a reverse proxy which sits in front of the server. This setting may be overridden for +# individual connectors below. +#dbms.connectors.default_advertised_address=localhost + +# You can also choose a specific advertised hostname or IP address, and +# configure an advertised port for each connector, by setting their +# individual advertised_address. + +# Bolt connector +dbms.connector.bolt.enabled=true +#dbms.connector.bolt.tls_level=OPTIONAL +#dbms.connector.bolt.listen_address=:7687 + +# HTTP Connector. There can be zero or one HTTP connectors. +dbms.connector.http.enabled=true +#dbms.connector.http.listen_address=:7474 + +# HTTPS Connector. There can be zero or one HTTPS connectors. +dbms.connector.https.enabled=true +#dbms.connector.https.listen_address=:7473 + +# Number of Neo4j worker threads. +#dbms.threads.worker_count= + +#***************************************************************** +# SSL system configuration +#***************************************************************** + +# Names of the SSL policies to be used for the respective components. + +# The legacy policy is a special policy which is not defined in +# the policy configuration section, but rather derives from +# dbms.directories.certificates and associated files +# (by default: neo4j.key and neo4j.cert). Its use will be deprecated. + +# The policies to be used for connectors. +# +# N.B: Note that a connector must be configured to support/require +# SSL/TLS for the policy to actually be utilized. +# +# see: dbms.connector.*.tls_level + +#bolt.ssl_policy=legacy +#https.ssl_policy=legacy + +# For a causal cluster the configuring of a policy mandates its use. + +#causal_clustering.ssl_policy= + +#***************************************************************** +# SSL policy configuration +#***************************************************************** + +# Each policy is configured under a separate namespace, e.g. +# dbms.ssl.policy..* +# +# The example settings below are for a new policy named 'default'. + +# The base directory for cryptographic objects. Each policy will by +# default look for its associated objects (keys, certificates, ...) +# under the base directory. +# +# Every such setting can be overridden using a full path to +# the respective object, but every policy will by default look +# for cryptographic objects in its base location. +# +# Mandatory setting + +#dbms.ssl.policy.default.base_directory=certificates/default + +# Allows the generation of a fresh private key and a self-signed +# certificate if none are found in the expected locations. It is +# recommended to turn this off again after keys have been generated. +# +# Keys should in general be generated and distributed offline +# by a trusted certificate authority (CA) and not by utilizing +# this mode. + +#dbms.ssl.policy.default.allow_key_generation=false + +# Enabling this makes it so that this policy ignores the contents +# of the trusted_dir and simply resorts to trusting everything. +# +# Use of this mode is discouraged. It would offer encryption but no security. + +#dbms.ssl.policy.default.trust_all=false + +# The private key for the default SSL policy. By default a file +# named private.key is expected under the base directory of the policy. +# It is mandatory that a key can be found or generated. + +#dbms.ssl.policy.default.private_key= + +# The private key for the default SSL policy. By default a file +# named public.crt is expected under the base directory of the policy. +# It is mandatory that a certificate can be found or generated. + +#dbms.ssl.policy.default.public_certificate= + +# The certificates of trusted parties. By default a directory named +# 'trusted' is expected under the base directory of the policy. It is +# mandatory to create the directory so that it exists, because it cannot +# be auto-created (for security purposes). +# +# To enforce client authentication client_auth must be set to 'require'! + +#dbms.ssl.policy.default.trusted_dir= + +# Certificate Revocation Lists (CRLs). By default a directory named +# 'revoked' is expected under the base directory of the policy. It is +# mandatory to create the directory so that it exists, because it cannot +# be auto-created (for security purposes). + +#dbms.ssl.policy.default.revoked_dir= + +# Client authentication setting. Values: none, optional, require +# The default is to require client authentication. +# +# Servers are always authenticated unless explicitly overridden +# using the trust_all setting. In a mutual authentication setup this +# should be kept at the default of require and trusted certificates +# must be installed in the trusted_dir. + +#dbms.ssl.policy.default.client_auth=require + +# It is possible to verify the hostname that the client uses +# to connect to the remote server. In order for this to work, the server public +# certificate must have a valid CN and/or matching Subject Alternative Names. + +# Note that this is irrelevant on host side connections (sockets receiving +# connections). + +# To enable hostname verification client side on nodes, set this to true. + +#dbms.ssl.policy.default.verify_hostname=false + +# A comma-separated list of allowed TLS versions. +# By default only TLSv1.2 is allowed. + +#dbms.ssl.policy.default.tls_versions= + +# A comma-separated list of allowed ciphers. +# The default ciphers are the defaults of the JVM platform. + +#dbms.ssl.policy.default.ciphers= + +#***************************************************************** +# Logging configuration +#***************************************************************** + +# To enable HTTP logging, uncomment this line +#dbms.logs.http.enabled=true + +# Number of HTTP logs to keep. +#dbms.logs.http.rotation.keep_number=5 + +# Size of each HTTP log that is kept. +#dbms.logs.http.rotation.size=20m + +# To enable GC Logging, uncomment this line +#dbms.logs.gc.enabled=true + +# GC Logging Options +# see http://docs.oracle.com/cd/E19957-01/819-0084-10/pt_tuningjava.html#wp57013 for more information. +#dbms.logs.gc.options=-XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCApplicationStoppedTime -XX:+PrintPromotionFailure -XX:+PrintTenuringDistribution + +# For Java 9 and newer GC Logging Options +# see https://docs.oracle.com/javase/10/tools/java.htm#JSWOR-GUID-BE93ABDC-999C-4CB5-A88B-1994AAAC74D5 +#dbms.logs.gc.options=-Xlog:gc*,safepoint,age*=trace + +# Number of GC logs to keep. +#dbms.logs.gc.rotation.keep_number=5 + +# Size of each GC log that is kept. +#dbms.logs.gc.rotation.size=20m + +# Log level for the debug log. One of DEBUG, INFO, WARN and ERROR. Be aware that logging at DEBUG level can be very verbose. +#dbms.logs.debug.level=INFO + +# Size threshold for rotation of the debug log. If set to zero then no rotation will occur. Accepts a binary suffix "k", +# "m" or "g". +#dbms.logs.debug.rotation.size=20m + +# Maximum number of history files for the internal log. +#dbms.logs.debug.rotation.keep_number=7 + +# Log executed queries that takes longer than the configured threshold. Enable by uncommenting this line. +dbms.logs.query.enabled=true + +# If the execution of query takes more time than this threshold, the query is logged. If set to zero then all queries +# are logged. +dbms.logs.query.threshold=0 + +# The file size in bytes at which the query log will auto-rotate. If set to zero then no rotation will occur. Accepts a +# binary suffix "k", "m" or "g". +dbms.logs.query.rotation.size=20m + +# Maximum number of history files for the query log. +dbms.logs.query.rotation.keep_number=7 + +# Include parameters for the executed queries being logged (this is enabled by default). +dbms.logs.query.parameter_logging_enabled=true + +# Uncomment this line to include detailed time information for the executed queries being logged: +dbms.logs.query.time_logging_enabled=true + +# Uncomment this line to include bytes allocated by the executed queries being logged: +#dbms.logs.query.allocation_logging_enabled=true + +# Uncomment this line to include page hits and page faults information for the executed queries being logged: +dbms.logs.query.page_logging_enabled=true + +# The security log is always enabled when `dbms.security.auth_enabled=true`, and resides in `logs/security.log`. + +# Log level for the security log. One of DEBUG, INFO, WARN and ERROR. +dbms.logs.security.level=INFO + +# Threshold for rotation of the security log. +#dbms.logs.security.rotation.size=20m + +# Minimum time interval after last rotation of the security log before it may be rotated again. +#dbms.logs.security.rotation.delay=300s + +# Maximum number of history files for the security log. +#dbms.logs.security.rotation.keep_number=7 + +#***************************************************************** +# Causal Clustering Configuration +#***************************************************************** + +# Uncomment and specify these lines for running Neo4j in Causal Clustering mode. +# See the Causal Clustering documentation at https://neo4j.com/docs/ for details. + +# Database mode +# Allowed values: +# CORE - Core member of the cluster, part of the consensus quorum. +# READ_REPLICA - Read replica in the cluster, an eventually-consistent read-only instance of the database. +# To operate this Neo4j instance in Causal Clustering mode as a core member, uncomment this line: +#dbms.mode=CORE + +# Expected number of Core servers in the cluster at formation +#causal_clustering.minimum_core_cluster_size_at_formation=3 + +# Minimum expected number of Core servers in the cluster at runtime. +#causal_clustering.minimum_core_cluster_size_at_runtime=3 + +# A comma-separated list of the address and port for which to reach all other members of the cluster. It must be in the +# host:port format. For each machine in the cluster, the address will usually be the public ip address of that machine. +# The port will be the value used in the setting "causal_clustering.discovery_listen_address". +#causal_clustering.initial_discovery_members=localhost:5000,localhost:5001,localhost:5002 + +# Host and port to bind the cluster member discovery management communication. +# This is the setting to add to the collection of address in causal_clustering.initial_core_cluster_members. +# Use 0.0.0.0 to bind to any network interface on the machine. If you want to only use a specific interface +# (such as a private ip address on AWS, for example) then use that ip address instead. +# If you don't know what value to use here, use this machines ip address. +#causal_clustering.discovery_listen_address=:5000 + +# Network interface and port for the transaction shipping server to listen on. +# Please note that it is also possible to run the backup client against this port so always limit access to it via the +# firewall and configure an ssl policy. If you want to allow for messages to be read from +# any network on this machine, us 0.0.0.0. If you want to constrain communication to a specific network address +# (such as a private ip on AWS, for example) then use that ip address instead. +# If you don't know what value to use here, use this machines ip address. +#causal_clustering.transaction_listen_address=:6000 + +# Network interface and port for the RAFT server to listen on. If you want to allow for messages to be read from +# any network on this machine, us 0.0.0.0. If you want to constrain communication to a specific network address +# (such as a private ip on AWS, for example) then use that ip address instead. +# If you don't know what value to use here, use this machines ip address. +#causal_clustering.raft_listen_address=:7000 + +# List a set of names for groups to which this server should belong. This +# is a comma-separated list and names should only use alphanumericals +# and underscore. This can be used to identify groups of servers in the +# configuration for load balancing and replication policies. +# +# The main intention for this is to group servers, but it is possible to specify +# a unique identifier here as well which might be useful for troubleshooting +# or other special purposes. +#causal_clustering.server_groups= + +#***************************************************************** +# Causal Clustering Load Balancing +#***************************************************************** + +# N.B: Read the online documentation for a thorough explanation! + +# Selects the load balancing plugin that shall be enabled. +#causal_clustering.load_balancing.plugin=server_policies + +####### Examples for "server_policies" plugin ####### + +# Will select all available servers as the default policy, which is the +# policy used when the client does not specify a policy preference. The +# default configuration for the default policy is all(). +#causal_clustering.load_balancing.config.server_policies.default=all() + +# Will select servers in groups 'group1' or 'group2' under the default policy. +#causal_clustering.load_balancing.config.server_policies.default=groups(group1,group2) + +# Slightly more advanced example: +# Will select servers in 'group1', 'group2' or 'group3', but only if there are at least 2. +# This policy will be exposed under the name of 'mypolicy'. +#causal_clustering.load_balancing.config.server_policies.mypolicy=groups(group1,group2,group3) -> min(2) + +# Below will create an even more advanced policy named 'regionA' consisting of several rules +# yielding the following behaviour: +# +# select servers in regionA, if at least 2 are available +# otherwise: select servers in regionA and regionB, if at least 2 are available +# otherwise: select all servers +# +# The intention is to create a policy for a particular region which prefers +# a certain set of local servers, but which will fallback to other regions +# or all available servers as required. +# +# N.B: The following configuration uses the line-continuation character \ +# which allows you to construct an easily readable rule set spanning +# several lines. +# +#causal_clustering.load_balancing.config.server_policies.policyA=\ +#groups(regionA) -> min(2);\ +#groups(regionA,regionB) -> min(2); + +# Note that implicitly the last fallback is to always consider all() servers, +# but this can be prevented by specifying a halt() as the last rule. +# +#causal_clustering.load_balancing.config.server_policies.regionA_only=\ +#groups(regionA);\ +#halt(); + +#***************************************************************** +# Causal Clustering Additional Configuration Options +#***************************************************************** +# The following settings are used less frequently. +# If you don't know what these are, you don't need to change these from their default values. + +# The name of the database being hosted by this server instance. This +# configuration setting may be safely ignored unless deploying a multicluster. +# Instances may be allocated to constituent clusters by assigning them +# distinct database names using this setting. For instance if you had 6 +# instances you could form 2 clusters by assigning half the database name +# "foo", half the name "bar". The setting value must match exactly between +# members of the same cluster. This setting is a one-off: once an instance +# is configured with a database name it may not be changed in future without +# using `neo4j-admin unbind`. +#causal_clustering.database=default + +# Address and port that this machine advertises that it's RAFT server is listening at. Should be a +# specific network address. If you are unsure about what value to use here, use this machine's ip address. +#causal_clustering.raft_advertised_address=:7000 + +# Address and port that this machine advertises that it's transaction shipping server is listening at. Should be a +# specific network address. If you are unsure about what value to use here, use this machine's ip address. +#causal_clustering.transaction_advertised_address=:6000 + +# The time limit within which a new leader election will occur if no messages from the current leader are received. +# Larger values allow for more stable leaders at the expense of longer unavailability times in case of leader +# failures. +#causal_clustering.leader_election_timeout=7s + +# The time limit allowed for a new member to attempt to update its data to match the rest of the cluster. +#causal_clustering.join_catch_up_timeout=10m + +# The size of the batch for streaming entries to other machines while trying to catch up another machine. +#causal_clustering.catchup_batch_size=64 + +# When to pause sending entries to other machines and allow them to catch up. +#causal_clustering.log_shipping_max_lag=256 + +# Raft log pruning frequncy. +#causal_clustering.raft_log_pruning_frequency=10m + +# The size to allow the raft log to grow before rotating. +#causal_clustering.raft_log_rotation_size=250M + +### The following setting is relevant for Edge servers only. +# The interval of pulling updates from Core servers. +#causal_clustering.pull_interval=1s + +# For how long should drivers cache the discovery data from +# the dbms.cluster.routing.getServers() procedure. Defaults to 300s. +#causal_clustering.cluster_routing_ttl=300s + +#***************************************************************** +# HA configuration +#***************************************************************** + +# Uncomment and specify these lines for running Neo4j in High Availability mode. +# See the High Availability documentation at https://neo4j.com/docs/ for details. + +# Database mode +# Allowed values: +# HA - High Availability +# SINGLE - Single mode, default. +# To run in High Availability mode uncomment this line: +#dbms.mode=HA + +# ha.server_id is the number of each instance in the HA cluster. It should be +# an integer (e.g. 1), and should be unique for each cluster instance. +#ha.server_id= + +# ha.initial_hosts is a comma-separated list (without spaces) of the host:port +# where the ha.host.coordination of all instances will be listening. Typically +# this will be the same for all cluster instances. +#ha.initial_hosts=127.0.0.1:5001,127.0.0.1:5002,127.0.0.1:5003 + +# IP and port for this instance to listen on, for communicating cluster status +# information with other instances (also see ha.initial_hosts). The IP +# must be the configured IP address for one of the local interfaces. +#ha.host.coordination=127.0.0.1:5001 + +# IP and port for this instance to listen on, for communicating transaction +# data with other instances (also see ha.initial_hosts). The IP +# must be the configured IP address for one of the local interfaces. +#ha.host.data=127.0.0.1:6001 + +# The interval, in seconds, at which slaves will pull updates from the master. You must comment out +# the option to disable periodic pulling of updates. +#ha.pull_interval=10 + +# Amount of slaves the master will try to push a transaction to upon commit +# (default is 1). The master will optimistically continue and not fail the +# transaction even if it fails to reach the push factor. Setting this to 0 will +# increase write performance when writing through master but could potentially +# lead to branched data (or loss of transaction) if the master goes down. +#ha.tx_push_factor=1 + +# Strategy the master will use when pushing data to slaves (if the push factor +# is greater than 0). There are three options available "fixed_ascending" (default), +# "fixed_descending" or "round_robin". Fixed strategies will start by pushing to +# slaves ordered by server id (accordingly with qualifier) and are useful when +# planning for a stable fail-over based on ids. +#ha.tx_push_strategy=fixed_ascending + +# Policy for how to handle branched data. +#ha.branched_data_policy=keep_all + +# How often heartbeat messages should be sent. Defaults to ha.default_timeout. +#ha.heartbeat_interval=5s + +# How long to wait for heartbeats from other instances before marking them as suspects for failure. +# This value reflects considerations of network latency, expected duration of garbage collection pauses +# and other factors that can delay message sending and processing. Larger values will result in more +# stable masters but also will result in longer waits before a failover in case of master failure. +# This value should not be set to less than twice the ha.heartbeat_interval value otherwise there is a high +# risk of frequent master switches and possibly branched data occurrence. +#ha.heartbeat_timeout=40s + +# If you are using a load-balancer that doesn't support HTTP Auth, you may need to turn off authentication for the +# HA HTTP status endpoint by uncommenting the following line. +#dbms.security.ha_status_auth_enabled=false + +# Whether this instance should only participate as slave in cluster. If set to +# true, it will never be elected as master. +#ha.slave_only=false + +#******************************************************************** +# Security Configuration +#******************************************************************** + +# The authentication and authorization provider that contains both users and roles. +# This can be one of the built-in `native` or `ldap` auth providers, +# or it can be an externally provided plugin, with a custom name prefixed by `plugin`, +# i.e. `plugin-`. +#dbms.security.auth_provider=native + +# The time to live (TTL) for cached authentication and authorization info when using +# external auth providers (LDAP or plugin). Setting the TTL to 0 will +# disable auth caching. +#dbms.security.auth_cache_ttl=10m + +# The maximum capacity for authentication and authorization caches (respectively). +#dbms.security.auth_cache_max_capacity=10000 + +# Set to log successful authentication events to the security log. +# If this is set to `false` only failed authentication events will be logged, which +# could be useful if you find that the successful events spam the logs too much, +# and you do not require full auditing capability. +#dbms.security.log_successful_authentication=true + +#================================================ +# LDAP Auth Provider Configuration +#================================================ + +# URL of LDAP server to use for authentication and authorization. +# The format of the setting is `://:`, where hostname is the only required field. +# The supported values for protocol are `ldap` (default) and `ldaps`. +# The default port for `ldap` is 389 and for `ldaps` 636. +# For example: `ldaps://ldap.example.com:10389`. +# +# NOTE: You may want to consider using STARTTLS (`dbms.security.ldap.use_starttls`) instead of LDAPS +# for secure connections, in which case the correct protocol is `ldap`. +#dbms.security.ldap.host=localhost + +# Use secure communication with the LDAP server using opportunistic TLS. +# First an initial insecure connection will be made with the LDAP server, and then a STARTTLS command +# will be issued to negotiate an upgrade of the connection to TLS before initiating authentication. +#dbms.security.ldap.use_starttls=false + +# The LDAP referral behavior when creating a connection. This is one of `follow`, `ignore` or `throw`. +# `follow` automatically follows any referrals +# `ignore` ignores any referrals +# `throw` throws an exception, which will lead to authentication failure +#dbms.security.ldap.referral=follow + +# The timeout for establishing an LDAP connection. If a connection with the LDAP server cannot be +# established within the given time the attempt is aborted. +# A value of 0 means to use the network protocol's (i.e., TCP's) timeout value. +#dbms.security.ldap.connection_timeout=30s + +# The timeout for an LDAP read request (i.e. search). If the LDAP server does not respond within +# the given time the request will be aborted. A value of 0 means wait for a response indefinitely. +#dbms.security.ldap.read_timeout=30s + +#---------------------------------- +# LDAP Authentication Configuration +#---------------------------------- + +# LDAP authentication mechanism. This is one of `simple` or a SASL mechanism supported by JNDI, +# for example `DIGEST-MD5`. `simple` is basic username +# and password authentication and SASL is used for more advanced mechanisms. See RFC 2251 LDAPv3 +# documentation for more details. +#dbms.security.ldap.authentication.mechanism=simple + +# LDAP user DN template. An LDAP object is referenced by its distinguished name (DN), and a user DN is +# an LDAP fully-qualified unique user identifier. This setting is used to generate an LDAP DN that +# conforms with the LDAP directory's schema from the user principal that is submitted with the +# authentication token when logging in. +# The special token {0} is a placeholder where the user principal will be substituted into the DN string. +#dbms.security.ldap.authentication.user_dn_template=uid={0},ou=users,dc=example,dc=com + +# Determines if the result of authentication via the LDAP server should be cached or not. +# Caching is used to limit the number of LDAP requests that have to be made over the network +# for users that have already been authenticated successfully. A user can be authenticated against +# an existing cache entry (instead of via an LDAP server) as long as it is alive +# (see `dbms.security.auth_cache_ttl`). +# An important consequence of setting this to `true` is that +# Neo4j then needs to cache a hashed version of the credentials in order to perform credentials +# matching. This hashing is done using a cryptographic hash function together with a random salt. +# Preferably a conscious decision should be made if this method is considered acceptable by +# the security standards of the organization in which this Neo4j instance is deployed. +#dbms.security.ldap.authentication.cache_enabled=true + +#---------------------------------- +# LDAP Authorization Configuration +#---------------------------------- +# Authorization is performed by searching the directory for the groups that +# the user is a member of, and then map those groups to Neo4j roles. + +# Perform LDAP search for authorization info using a system account instead of the user's own account. +# +# If this is set to `false` (default), the search for group membership will be performed +# directly after authentication using the LDAP context bound with the user's own account. +# The mapped roles will be cached for the duration of `dbms.security.auth_cache_ttl`, +# and then expire, requiring re-authentication. To avoid frequently having to re-authenticate +# sessions you may want to set a relatively long auth cache expiration time together with this option. +# NOTE: This option will only work if the users are permitted to search for their +# own group membership attributes in the directory. +# +# If this is set to `true`, the search will be performed using a special system account user +# with read access to all the users in the directory. +# You need to specify the username and password using the settings +# `dbms.security.ldap.authorization.system_username` and +# `dbms.security.ldap.authorization.system_password` with this option. +# Note that this account only needs read access to the relevant parts of the LDAP directory +# and does not need to have access rights to Neo4j, or any other systems. +#dbms.security.ldap.authorization.use_system_account=false + +# An LDAP system account username to use for authorization searches when +# `dbms.security.ldap.authorization.use_system_account` is `true`. +# Note that the `dbms.security.ldap.authentication.user_dn_template` will not be applied to this username, +# so you may have to specify a full DN. +#dbms.security.ldap.authorization.system_username= + +# An LDAP system account password to use for authorization searches when +# `dbms.security.ldap.authorization.use_system_account` is `true`. +#dbms.security.ldap.authorization.system_password= + +# The name of the base object or named context to search for user objects when LDAP authorization is enabled. +# A common case is that this matches the last part of `dbms.security.ldap.authentication.user_dn_template`. +#dbms.security.ldap.authorization.user_search_base=ou=users,dc=example,dc=com + +# The LDAP search filter to search for a user principal when LDAP authorization is +# enabled. The filter should contain the placeholder token {0} which will be substituted for the +# user principal. +#dbms.security.ldap.authorization.user_search_filter=(&(objectClass=*)(uid={0})) + +# A list of attribute names on a user object that contains groups to be used for mapping to roles +# when LDAP authorization is enabled. +#dbms.security.ldap.authorization.group_membership_attributes=memberOf + +# An authorization mapping from LDAP group names to Neo4j role names. +# The map should be formatted as a semicolon separated list of key-value pairs, where the +# key is the LDAP group name and the value is a comma separated list of corresponding role names. +# For example: group1=role1;group2=role2;group3=role3,role4,role5 +# +# You could also use whitespaces and quotes around group names to make this mapping more readable, +# for example: dbms.security.ldap.authorization.group_to_role_mapping=\ +# "cn=Neo4j Read Only,cn=users,dc=example,dc=com" = reader; \ +# "cn=Neo4j Read-Write,cn=users,dc=example,dc=com" = publisher; \ +# "cn=Neo4j Schema Manager,cn=users,dc=example,dc=com" = architect; \ +# "cn=Neo4j Administrator,cn=users,dc=example,dc=com" = admin +#dbms.security.ldap.authorization.group_to_role_mapping= + + +#***************************************************************** +# Miscellaneous configuration +#***************************************************************** + +# Enable this to specify a parser other than the default one. +#cypher.default_language_version=3.0 + +# Determines if Cypher will allow using file URLs when loading data using +# `LOAD CSV`. Setting this value to `false` will cause Neo4j to fail `LOAD CSV` +# clauses that load data from the file system. +#dbms.security.allow_csv_import_from_file_urls=true + +# Retention policy for transaction logs needed to perform recovery and backups. +#dbms.tx_log.rotation.retention_policy=7 days + +# Limit the number of IOs the background checkpoint process will consume per second. +# This setting is advisory, is ignored in Neo4j Community Edition, and is followed to +# best effort in Enterprise Edition. +# An IO is in this case a 8 KiB (mostly sequential) write. Limiting the write IO in +# this way will leave more bandwidth in the IO subsystem to service random-read IOs, +# which is important for the response time of queries when the database cannot fit +# entirely in memory. The only drawback of this setting is that longer checkpoint times +# may lead to slightly longer recovery times in case of a database or system crash. +# A lower number means lower IO pressure, and consequently longer checkpoint times. +# The configuration can also be commented out to remove the limitation entirely, and +# let the checkpointer flush data as fast as the hardware will go. +# Set this to -1 to disable the IOPS limit. +# dbms.checkpoint.iops.limit=300 + +# Only allow read operations from this Neo4j instance. This mode still requires +# write access to the directory for lock purposes. +#dbms.read_only=false + +# Comma separated list of JAX-RS packages containing JAX-RS resources, one +# package name for each mountpoint. The listed package names will be loaded +# under the mountpoints specified. Uncomment this line to mount the +# org.neo4j.examples.server.unmanaged.HelloWorldResource.java from +# neo4j-server-examples under /examples/unmanaged, resulting in a final URL of +# http://localhost:7474/examples/unmanaged/helloworld/{nodeId} +#dbms.unmanaged_extension_classes=org.neo4j.examples.server.unmanaged=/examples/unmanaged + +# A comma separated list of procedures and user defined functions that are allowed +# full access to the database through unsupported/insecure internal APIs. +#dbms.security.procedures.unrestricted=my.extensions.example,my.procedures.* + +# A comma separated list of procedures to be loaded by default. +# Leaving this unconfigured will load all procedures found. +#dbms.security.procedures.whitelist=apoc.coll.*,apoc.load.* + +# Specified comma separated list of id types (like node or relationship) that should be reused. +# When some type is specified database will try to reuse corresponding ids as soon as it will be safe to do so. +# Currently only 'node' and 'relationship' types are supported. +# This settings is ignored in Neo4j Community Edition. +#dbms.ids.reuse.types.override=node,relationship + +#******************************************************************** +# JVM Parameters +#******************************************************************** + +# G1GC generally strikes a good balance between throughput and tail +# latency, without too much tuning. +dbms.jvm.additional=-XX:+UseG1GC + +# Have common exceptions keep producing stack traces, so they can be +# debugged regardless of how often logs are rotated. +dbms.jvm.additional=-XX:-OmitStackTraceInFastThrow + +# Make sure that `initmemory` is not only allocated, but committed to +# the process, before starting the database. This reduces memory +# fragmentation, increasing the effectiveness of transparent huge +# pages. It also reduces the possibility of seeing performance drop +# due to heap-growing GC events, where a decrease in available page +# cache leads to an increase in mean IO response time. +# Try reducing the heap memory, if this flag degrades performance. +dbms.jvm.additional=-XX:+AlwaysPreTouch + +# Trust that non-static final fields are really final. +# This allows more optimizations and improves overall performance. +# NOTE: Disable this if you use embedded mode, or have extensions or dependencies that may use reflection or +# serialization to change the value of final fields! +dbms.jvm.additional=-XX:+UnlockExperimentalVMOptions +dbms.jvm.additional=-XX:+TrustFinalNonStaticFields + +# Disable explicit garbage collection, which is occasionally invoked by the JDK itself. +dbms.jvm.additional=-XX:+DisableExplicitGC + +# Remote JMX monitoring, uncomment and adjust the following lines as needed. Absolute paths to jmx.access and +# jmx.password files are required. +# Also make sure to update the jmx.access and jmx.password files with appropriate permission roles and passwords, +# the shipped configuration contains only a read only role called 'monitor' with password 'Neo4j'. +# For more details, see: http://download.oracle.com/javase/8/docs/technotes/guides/management/agent.html +# On Unix based systems the jmx.password file needs to be owned by the user that will run the server, +# and have permissions set to 0600. +# For details on setting these file permissions on Windows see: +# http://docs.oracle.com/javase/8/docs/technotes/guides/management/security-windows.html +#dbms.jvm.additional=-Dcom.sun.management.jmxremote.port=3637 +#dbms.jvm.additional=-Dcom.sun.management.jmxremote.authenticate=true +#dbms.jvm.additional=-Dcom.sun.management.jmxremote.ssl=false +#dbms.jvm.additional=-Dcom.sun.management.jmxremote.password.file=/absolute/path/to/conf/jmx.password +#dbms.jvm.additional=-Dcom.sun.management.jmxremote.access.file=/absolute/path/to/conf/jmx.access + +# Some systems cannot discover host name automatically, and need this line configured: +#dbms.jvm.additional=-Djava.rmi.server.hostname=$THE_NEO4J_SERVER_HOSTNAME + +# Expand Diffie Hellman (DH) key size from default 1024 to 2048 for DH-RSA cipher suites used in server TLS handshakes. +# This is to protect the server from any potential passive eavesdropping. +dbms.jvm.additional=-Djdk.tls.ephemeralDHKeySize=2048 + +# This mitigates a DDoS vector. +dbms.jvm.additional=-Djdk.tls.rejectClientInitiatedRenegotiation=true + +#******************************************************************** +# Wrapper Windows NT/2000/XP Service Properties +#******************************************************************** +# WARNING - Do not modify any of these properties when an application +# using this configuration file has been installed as a service. +# Please uninstall the service before modifying this section. The +# service can then be reinstalled. + +# Name of the service +dbms.windows_service_name=neo4j + +#******************************************************************** +# Other Neo4j system properties +#******************************************************************** +dbms.jvm.additional=-Dunsupported.dbms.udc.source=rpm +dbms.jvm.additional=-javaagent:/var/lib/neo4j/newrelic/newrelic.jar +metrics.enabled=true +metrics.neo4j.enabled=true +metrics.neo4j.tx.enabled=true +metrics.neo4j.pagecache.enabled=true +metrics.neo4j.counts.enabled=true +metrics.neo4j.network.enabled=true \ No newline at end of file diff --git a/icrp/roles/neo4j/tests/inventory b/icrp/roles/neo4j/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/icrp/roles/neo4j/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/icrp/roles/neo4j/tests/test.yml b/icrp/roles/neo4j/tests/test.yml new file mode 100644 index 000000000..ba5c658c4 --- /dev/null +++ b/icrp/roles/neo4j/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - neo4j \ No newline at end of file diff --git a/icrp/roles/neo4j/vars/main.yml b/icrp/roles/neo4j/vars/main.yml new file mode 100644 index 000000000..76d6bdb95 --- /dev/null +++ b/icrp/roles/neo4j/vars/main.yml @@ -0,0 +1,9 @@ +--- +# vars file for neo4j +collector_name: "{{ env }}-neo4j" +additional_logs: + - name: "{{ env }} Neo4j Logs" + description: "{{ env }} neo4j logs" + category: "{{env }}/db/neo4j" + path: "/var/log/neo4j/*.log" + filters: "" \ No newline at end of file diff --git a/icrp/roles/newrelic/README.md b/icrp/roles/newrelic/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/icrp/roles/newrelic/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/icrp/roles/newrelic/defaults/main.yml b/icrp/roles/newrelic/defaults/main.yml new file mode 100644 index 000000000..4e801fb8e --- /dev/null +++ b/icrp/roles/newrelic/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for roles/newrelic diff --git a/icrp/roles/newrelic/handlers/main.yml b/icrp/roles/newrelic/handlers/main.yml new file mode 100644 index 000000000..4223f4e46 --- /dev/null +++ b/icrp/roles/newrelic/handlers/main.yml @@ -0,0 +1,6 @@ +--- +# handlers file for roles/newrelic +- name: restart newrelic-infra + service: + name: newrelic-infra + state: restarted \ No newline at end of file diff --git a/icrp/roles/newrelic/meta/main.yml b/icrp/roles/newrelic/meta/main.yml new file mode 100644 index 000000000..5d50bf41b --- /dev/null +++ b/icrp/roles/newrelic/meta/main.yml @@ -0,0 +1,60 @@ +galaxy_info: + author: your name + description: your description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Some suggested licenses: + # - BSD (default) + # - MIT + # - GPLv2 + # - GPLv3 + # - Apache + # - CC-BY + license: license (GPLv2, CC-BY, etc) + + min_ansible_version: 2.4 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # Optionally specify the branch Galaxy will use when accessing the GitHub + # repo for this role. During role install, if no tags are available, + # Galaxy will use this branch. During import Galaxy will access files on + # this branch. If Travis integration is configured, only notifications for this + # branch will be accepted. Otherwise, in all cases, the repo's default branch + # (usually master) will be used. + #github_branch: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. \ No newline at end of file diff --git a/icrp/roles/newrelic/tasks/main.yml b/icrp/roles/newrelic/tasks/main.yml new file mode 100644 index 000000000..99e70c338 --- /dev/null +++ b/icrp/roles/newrelic/tasks/main.yml @@ -0,0 +1,32 @@ +--- +- name: add newrelic-infra gpg key + rpm_key: + state: present + key: https://download.newrelic.com/infrastructure_agent/gpg/newrelic-infra.gpg + +- name: setup newrelic repo + yum_repository: + name: newrelic-infra + description: Newrelic infrastruture repository + baseurl: https://download.newrelic.com/infrastructure_agent/linux/yum/el/7/x86_64/ + +- name: install newrelic-infra + package: + name: + - libcap + - newrelic-infra + state: installed + environment: + NRIA_MODE: PRIVILEGED + +- name: copy newrelic config file to /etc/ + template: + src: newrelic-infra.yml.j2 + dest: /etc/newrelic-infra.yml + +- name: enable and start newrelic-infra service + service: + name: newrelic-infra + state: started + enabled: yes + diff --git a/icrp/roles/newrelic/templates/newrelic-infra.yml.j2 b/icrp/roles/newrelic/templates/newrelic-infra.yml.j2 new file mode 100644 index 000000000..16f1d80b3 --- /dev/null +++ b/icrp/roles/newrelic/templates/newrelic-infra.yml.j2 @@ -0,0 +1,6 @@ +license_key: {{ newrelic_license_key }} +log_file: /var/log/newrelic-infra/newrelic-infra.log +display_name: bento-{{platform}}-{{app}}-{{env}} +collector_url: https://gov-infra-api.newrelic.com +identity_url: https://gov-identity-api.newrelic.com +command_channel_url: https://gov-infrastructure-command-api.newrelic.com diff --git a/icrp/roles/newrelic/tests/inventory b/icrp/roles/newrelic/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/icrp/roles/newrelic/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/icrp/roles/newrelic/tests/test.yml b/icrp/roles/newrelic/tests/test.yml new file mode 100644 index 000000000..b9b489f3f --- /dev/null +++ b/icrp/roles/newrelic/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - roles/newrelic \ No newline at end of file diff --git a/icrp/roles/newrelic/vars/main.yml b/icrp/roles/newrelic/vars/main.yml new file mode 100644 index 000000000..04bfd637d --- /dev/null +++ b/icrp/roles/newrelic/vars/main.yml @@ -0,0 +1,7 @@ +--- +# vars file for roles/newrelic +newrelic_license_key: "{{ lookup('aws_ssm', 'newrelic_license_key', region='us-east-1' ) }}" +hostname: "{{ hostname }}" +env: "{{env}}" +app: "{{app}}" +platform: "{{platform}}" \ No newline at end of file diff --git a/icrp/roles/sumologic/README.md b/icrp/roles/sumologic/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/icrp/roles/sumologic/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/icrp/roles/sumologic/defaults/main.yml b/icrp/roles/sumologic/defaults/main.yml new file mode 100644 index 000000000..ceba8dc85 --- /dev/null +++ b/icrp/roles/sumologic/defaults/main.yml @@ -0,0 +1,6 @@ +--- +# defaults file for sumologic +timezone: 'Etc/EST' +additional_logs: [] +config: /opt/SumoCollector/config +sumo_docker_version: 1.0.3 \ No newline at end of file diff --git a/icrp/roles/sumologic/handlers/main.yml b/icrp/roles/sumologic/handlers/main.yml new file mode 100644 index 000000000..be869c71d --- /dev/null +++ b/icrp/roles/sumologic/handlers/main.yml @@ -0,0 +1,6 @@ +--- +# handlers file for sumologic +- name: restart collector + service: + name: collector + state: restarted \ No newline at end of file diff --git a/icrp/roles/sumologic/meta/main.yml b/icrp/roles/sumologic/meta/main.yml new file mode 100644 index 000000000..5d50bf41b --- /dev/null +++ b/icrp/roles/sumologic/meta/main.yml @@ -0,0 +1,60 @@ +galaxy_info: + author: your name + description: your description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Some suggested licenses: + # - BSD (default) + # - MIT + # - GPLv2 + # - GPLv3 + # - Apache + # - CC-BY + license: license (GPLv2, CC-BY, etc) + + min_ansible_version: 2.4 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # Optionally specify the branch Galaxy will use when accessing the GitHub + # repo for this role. During role install, if no tags are available, + # Galaxy will use this branch. During import Galaxy will access files on + # this branch. If Travis integration is configured, only notifications for this + # branch will be accepted. Otherwise, in all cases, the repo's default branch + # (usually master) will be used. + #github_branch: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. \ No newline at end of file diff --git a/icrp/roles/sumologic/tasks/main.yml b/icrp/roles/sumologic/tasks/main.yml new file mode 100644 index 000000000..78b844739 --- /dev/null +++ b/icrp/roles/sumologic/tasks/main.yml @@ -0,0 +1,38 @@ +--- +# tasks file for sumologic +- name: check if sumologic is installed + yum: + list: 'SumoCollector' + register: sumo_installed + +- name: download sumologic rpm + get_url: + url: https://collectors.sumologic.com/rest/download/rpm/64 + dest: /tmp/sumologic.rpm + when: sumo_installed.results == [] + +- name: Install SumoCollector + yum: + name: '/tmp/sumologic.rpm' + state: installed + when: sumo_installed.results == [] + +- name: copy user.properties and source configuration + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + group: sumologic_collector + with_items: + - {src: 'sources.json.j2',dest: '{{config}}/sources.json'} + - {src: 'user.properties.j2',dest: '{{config}}/user.properties'} + notify: restart collector + +# - name: install sumologic docker plugin +# command: docker plugin install store/sumologic/docker-logging-driver:{{sumo_docker_version}} --alias sumologic --grant-all-permissions +# - name: Start service +# service: +# name: collector +# state: started +# enabled: yes + + diff --git a/icrp/roles/sumologic/templates/sources.json.j2 b/icrp/roles/sumologic/templates/sources.json.j2 new file mode 100644 index 000000000..6f62bb29c --- /dev/null +++ b/icrp/roles/sumologic/templates/sources.json.j2 @@ -0,0 +1,68 @@ +{ + "api.version": "v1", + "sources": [ + { + "name": "Linux Secure Log", + "description": "Security events and user logins", + "category": "{{env}}/OS/Linux/secure", + "automaticDateParsing": false, + "multilineProcessingEnabled": false, + "useAutolineMatching": false, + "forceTimeZone": true, + "timeZone": "{{ timezone }}", + "filters": [], + "encoding": "UTF-8", + "pathExpression": "/var/log/secure*", + "blacklist": [], + "sourceType": "LocalFile" + }, + { + "name": "Linux Message Log", + "description": "System events, such as user creation, deletion, system start, shutdown, etc", + "category": "{{env}}/OS/Linux/message", + "automaticDateParsing": false, + "multilineProcessingEnabled": false, + "useAutolineMatching": false, + "forceTimeZone": true, + "timeZone": "{{ timezone }}", + "filters": [], + "encoding": "UTF-8", + "pathExpression": "/var/log/messages*", + "blacklist": [], + "sourceType": "LocalFile" + }, + { + "name": "Linux dmesg", + "description": "Kernel messages", + "category": "{{env}}/OS/Linux/dmesg", + "automaticDateParsing": false, + "multilineProcessingEnabled": true, + "useAutolineMatching": false, + "forceTimeZone": true, + "timeZone": "{{ timezone }}", + "filters": [], + "encoding": "UTF-8", + "pathExpression": "/var/log/dmesg", + "blacklist": [], + "sourceType": "LocalFile" + } +{% for log in additional_logs %} + , + { + "name": "{{ log.name }}", + "description": "{{ log.description }}", + "category": "{{ log.category }}", + "automaticDateParsing": false, + "multilineProcessingEnabled": false, + "useAutolineMatching": false, + "forceTimeZone": true, + "timeZone": "{{ timezone }}", + "filters": [{{ log.filters }}], + "encoding": "UTF-8", + "pathExpression": "{{ log.path }}", + "blacklist": [], + "sourceType": "LocalFile" + } +{% endfor %} + ] +} \ No newline at end of file diff --git a/icrp/roles/sumologic/templates/user.properties.j2 b/icrp/roles/sumologic/templates/user.properties.j2 new file mode 100644 index 000000000..009548740 --- /dev/null +++ b/icrp/roles/sumologic/templates/user.properties.j2 @@ -0,0 +1,6 @@ +name = {{ collector_name }} +accessid = {{ sumo_access_id }} +accesskey = {{ sumo_access_key }} +ephemeral = true +syncSources = /opt/SumoCollector/config/sources.json +skipAccessKeyRemoval = true diff --git a/icrp/roles/sumologic/tests/inventory b/icrp/roles/sumologic/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/icrp/roles/sumologic/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/icrp/roles/sumologic/tests/test.yml b/icrp/roles/sumologic/tests/test.yml new file mode 100644 index 000000000..9049b5d4a --- /dev/null +++ b/icrp/roles/sumologic/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - sumologic \ No newline at end of file diff --git a/icrp/roles/sumologic/vars/main.yml b/icrp/roles/sumologic/vars/main.yml new file mode 100644 index 000000000..23e7727b3 --- /dev/null +++ b/icrp/roles/sumologic/vars/main.yml @@ -0,0 +1,11 @@ +--- +# vars file for sumologic +sumo_access_key: "{{ lookup('aws_ssm', 'sumo_access_key', region='us-east-1' ) }}" +sumo_access_id: "{{ lookup('aws_ssm', 'sumo_access_id', region='us-east-1' ) }}" +collector_name: bento-{{platform}}-{{app}}-{{env}} +env: "{{env}}" +app: "{{app}}" +platform: "{{platform}}" + + + diff --git a/jenkins/jobs/Neo4j-Monitor/neo4jMonitor.py b/jenkins/jobs/Neo4j-Monitor/neo4jMonitor.py new file mode 100644 index 000000000..21ef71670 --- /dev/null +++ b/jenkins/jobs/Neo4j-Monitor/neo4jMonitor.py @@ -0,0 +1,30 @@ +import requests +from requests.exceptions import ConnectionError +import argparse + +parser = argparse.ArgumentParser() +parser.add_argument('--servers', nargs='*') +parser.add_argument('--slackURL') + +slackHeaders = {'content-type': 'application/json', 'Accept-Charset': 'UTF-8'} +args = parser.parse_args() +slack_url = args.slackURL + +# POST function to send to Slack +def post_message_to_slack(message_text): + return requests.post(slack_url, data=message_text, headers=slackHeaders) + +for i in args.servers: + graphURL = 'http://' + i + '.nci.nih.gov:7474/graphql/' + + try: + request = requests.get(graphURL) + except ConnectionError: + print('The graph QL endpoint on ' + i + ' is NOT responding - please verify that this application is working') + slack_message = 'The graph QL endpoint on {} is NOT responding - please verify that this application is working'.format(i) + payload = '{"text":"' + slack_message + '"}' + post_message_to_slack(payload) + + else: + print('The graph QL endpoint on ' + i + ' is available') +~ diff --git a/jenkins/jobs/Neo4j-Monitor/neo4jMonitor.sh b/jenkins/jobs/Neo4j-Monitor/neo4jMonitor.sh new file mode 100644 index 000000000..502bd9124 --- /dev/null +++ b/jenkins/jobs/Neo4j-Monitor/neo4jMonitor.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +serverList=$1 +slack_url=$2 +dbCreds=$3 +slackHeaders="\"Content-type: application/json\"" +queryString="'{\"query\":\"{numberOfCases}\"}'" + +# POST function to send to Slack +post_message_to_slack(){ + curlCMD="curl --silent --max-time 10 --request POST --header $slackHeaders --data $payload $slack_url" + slackError=$(eval $curlCMD) + #echo "Slack Error: $slackError" + } + +for i in $serverList +do + graphURL="http://$dbCreds@$i.nci.nih.gov:7474/graphql/" + #error=$(curl --write-out %{http_code} --silent --output /dev/null $graphURL) + graphReq="curl --silent --max-time 10 --request POST --header $slackHeaders --data $queryString $graphURL" + error=$(eval $graphReq) + #echo $error + + #if [[ $error -eq '200' ]] || [[ $error -eq '401' ]] + if [[ $error == *"\"data\":{\"numberOfCases\":"* ]] + then + echo "The graph QL endpoint on $i is available" + else + echo "The graph QL endpoint on $i is NOT responding - please verify that this application is working" + slack_message="The graph QL endpoint on $i is NOT responding - please verify that this application is working" + payload="\"{\\\"text\\\":\\\"$slack_message\\\"}\"" + post_message_to_slack + fi +done diff --git a/jenkins/jobs/OpenPedCan-api/Jenkinsfile b/jenkins/jobs/OpenPedCan-api/Jenkinsfile new file mode 100644 index 000000000..1f20dedd2 --- /dev/null +++ b/jenkins/jobs/OpenPedCan-api/Jenkinsfile @@ -0,0 +1,207 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'cicd_microservice' + } + } + parameters { + extendedChoice( + name: 'Region', + defaultValue: 'us-east-1', + description: 'Choose aws region to build', + type: 'PT_SINGLE_SELECT', + value: 'us-east-1' ) + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'main', + name: 'OpenPedCan_Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: 'https://github.com/PediatricOpenTargets/OpenPedCan-api.git') + string( + defaultValue: 'OpenPedCan-api', + description: 'Project Name', + name: 'ProjectName') + + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + + // checkout([$class: 'GitSCM', + // branches: [[name: "${params.OpenPedCan_Tag}"]], + // doGenerateSubmoduleConfigurations: + // false, extensions: [], submoduleCfg: [], + // userRemoteConfigs: + // [[url: 'https://github.com/PediatricOpenTargets/OpenPedCan-api.git']]]) + + checkout([$class: 'GitSCM', + branches: [[name: "${params.OpenPedCan_Tag}"]], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'RelativeTargetDirectory', + relativeTargetDir: 'OpenPedCan-api']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/PediatricOpenTargets/OpenPedCan-api.git']]]) + + checkout( poll: false, + changelog:false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']]]) + + } + + } + // stage('Set Environment'){ + // environment { + // SLACK_URL = "${SLACK_URL}" + // VERSION = "${params.Frontend_Tag}" + // } + // steps { + // script { + // switch("${params.Environment}") { + // case "dev": + // withCredentials([usernamePassword(credentialsId: 'ins_dev_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER'), + // string(credentialsId: 'ins_dev_bearer', variable: 'BEARER')]) { + // env.TIER = "dev" + // env.NEO4J_USER = "${NEO4J_USER}" + // env.NEO4J_PASS = "${NEO4J_PASS}" + // env.BEARER = "${BEARER}" + + // } + // break + // default: + // withCredentials([usernamePassword(credentialsId: 'ins_dev_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + // env.TIER = "dev" + // env.NEO4J_USER = "${NEO4J_USER}" + // env.NEO4J_PASS = "${NEO4J_PASS}" + // env.BEARER = "${BEARER}" + + // } + // break + // } + // } + // } + // } + + stage('build'){ + environment { + VERSION = "${params.OpenPedCan_Tag}" + } + steps { + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER')]){ + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/OpenPedCan-build.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + project: 'OpenPedCan-api' + ], + colorized: true) + } + } + } + } + + stage('deploy'){ + environment { + VERSION = "${params.OpenPedCan_Tag}" + } + steps{ + node('cicd_microservice') { + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/OpenPedCan-deploy.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + project: 'OpenPedCan-api' + ], + colorized: true) + } + } + } + } + + + + } + post { + + always { + + sendSlackMessage() + + } + + + cleanup { + + cleanWs() + + } + + } +} \ No newline at end of file diff --git a/jenkins/jobs/alb-ops/Jenkinsfile b/jenkins/jobs/alb-ops/Jenkinsfile new file mode 100644 index 000000000..e580617c4 --- /dev/null +++ b/jenkins/jobs/alb-ops/Jenkinsfile @@ -0,0 +1,166 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${ICDC_SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'slave-ncias-d2320-c' + } + } + parameters { + extendedChoice( + name: 'Project', + defaultValue: 'dev', + description: 'Select Project', + type: 'PT_SINGLE_SELECT', + value: 'icdc,ctdc,bento' ) + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'sandbox,dev,test,qa,stage,prod' ) + extendedChoice( + name: 'Action', + defaultValue: 'create', + description: 'Select ALB Action', + type: 'PT_RADIO', + value: 'create,remove' ) + + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + + checkout( poll: false, + changelog:false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']]]) + + } + + } + // stage('Set Environment'){ + // steps { + // script { + // switch("${params.Environment}") { + // case "sandbox": + // withCredentials([string(credentialsId: 'sandbox_bearer', variable: 'BEARER') + // ]) { + // env.TIER = "sandbox" + + // } + // break + // case "dev": + // withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER') + // ]) { + // env.TIER = "dev" + + // } + // break + // case "qa": + // withCredentials([string(credentialsId: 'qa_bearer', variable: 'BEARER') + // ]) { + // env.TIER = "qa" + + // } + // break + // case "stage": + // withCredentials([string(credentialsId: 'stage_bearer', variable: 'BEARER') + // ]) { + // env.TIER = "stage" + // } + // break + // case "prod": + // withCredentials([string(credentialsId: 'prod_bearer', variable: 'BEARER') + // ]) { + // env.TIER = "prod" + // } + // break + // default: + // withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER') + // ]) { + // env.TIER = "dev" + // } + // break + // } + // } + // } + // } + stage('build'){ + environment { + ACTION = "${params.Action}" + PROJECT = "${params.Project}" + TIER = "${params.Environment}" + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER'), + file(credentialsId: 'ansible_host_file', variable: 'host_file'), + // string(credentialsId: 'listener_arn', variable: 'LISTENER_ARN'), + ]) + { + sh "cp ${host_file} ${WORKSPACE}/icdc-devops/ansible/hosts" + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/alb-ops.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + action: "${params.Action}", + project: "${params.Project}" + ], + colorized: true) + } + } + } + } + } + post { + always { + sendSlackMessage() + // cleanWs() + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/ansible-pipeline/Jenkinsfile b/jenkins/jobs/ansible-pipeline/Jenkinsfile new file mode 100644 index 000000000..3799e5eda --- /dev/null +++ b/jenkins/jobs/ansible-pipeline/Jenkinsfile @@ -0,0 +1,132 @@ +pipeline { + agent { + node { + label 'cicd_microservice' + } + } + parameters { + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'master', + name: 'Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/bento-backend') + extendedChoice( + name: 'Environment', + defaultValue: 'test', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'sandbox,dev,test,qa,stage,prod' ) + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + checkout([$class: 'GitSCM', + branches: [[name: "${params.Tag}"]], + doGenerateSubmoduleConfigurations: + false, extensions: [], submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/bento-backend']]]) + + dir('icdc-devops'){ + git branch: 'master', + url: 'https://github.com/CBIIT/icdc-devops.git' + } + dir('bento-frontend'){ + git branch: 'master', + url: 'https://github.com/CBIIT/bento-frontend' + } + } + + } + stage('Set Environment'){ + environment { + SANDBOX_URL = "k9dc.essential-dev.com" + TEST_URL = "bento.essential-dev.com" + DEV_URL = "caninecommons-dev.cancer.gov" + QA_URL = "caninecommons-qa.cancer.gov" + STAGE_URL = "caninecommons-stage.cancer.gov" + PROD_URL = "caninecommons.cancer.gov" + VERSION = "${params.Tag}" + } + steps { + script { + switch("${params.Environment}") { + case "sandbox": + withCredentials([file(credentialsId: 'sandbox_env_file', variable: 'secret_file')]) { + sh "cp ${secret_file} ${WORKSPACE}/bento-frontend/.env" + sh "cd ${WORKSPACE}/bento-frontend && sed -i 's,tag_version,${VERSION},g' .env" + + + } + break + case "test": + withCredentials([file(credentialsId: 'test_env_file', variable: 'secret_file')]) { + sh "cp ${secret_file} ${WORKSPACE}/bento-frontend/.env" + sh "cd ${WORKSPACE}/bento-frontend && sed -i 's,tag_version,${VERSION},g' .env" + sh "cat ${WORKSPACE}/bento-frontend/.env" + env.TOMCAT01_IP = "${TOMCAT01_IP}" + env.TOMCAT02_IP = "${TOMCAT02_IP}" + env.NEO4J_IP = "${NEO4J_IP}" + env.FENCE_URL = "bento.essential-dev.com" + } + break + default: + println "Select valid option" + break + } + } + } + } + stage('build'){ + environment { + SLACK_URL = "${SLACK_URL}" + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([string(credentialsId: 'authorization_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'neo4j_user', passwordVariable: 'NEO4J_PASSWORD', usernameVariable: 'NEO4J_USER'), + string(credentialsId: 'fence-id', variable: 'FENCE_ID'), + usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER'), + string(credentialsId: 'fence-credential', variable: 'FENCE_CREDENTIAL')]) { + + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/build.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + } + stage('deploy-frontend'){ + steps{ + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/deploy-frontend.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + stage('deploy'){ + steps{ + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/deploy-backend.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/apm-integrations/Jenkinsfile b/jenkins/jobs/apm-integrations/Jenkinsfile new file mode 100644 index 000000000..5380fdaf5 --- /dev/null +++ b/jenkins/jobs/apm-integrations/Jenkinsfile @@ -0,0 +1,114 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${ICDC_SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'commons-docker-ncias-p2236-v' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'demo,dev,qa,stage,prod' ) + + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + } + + } + stage('deploy'){ + environment { + SLACK_URL = "${SLACK_URL}" + TIER = "${params.Environment}" + } + + steps{ + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([file(credentialsId: 'ansible_inventory', variable: 'hosts')]) + { + sh "cp ${hosts} ${WORKSPACE}/icdc-devops/ansible/inventory.yml" + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/apm-integrations.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/inventory.yml', + credentialsId: 'commonsdocker', + extraVars: [tier: "${params.Environment}"], + become: true, + hostKeyChecking: false, + colorized: true) + } + + + } + } + } + } + post { + always { + + sendSlackMessage() + + } + + cleanup { + + cleanWs() + + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/apm-integrations/Jenkinsfile_deploy b/jenkins/jobs/apm-integrations/Jenkinsfile_deploy new file mode 100644 index 000000000..b02c1a90d --- /dev/null +++ b/jenkins/jobs/apm-integrations/Jenkinsfile_deploy @@ -0,0 +1,299 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${ICDC_SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'commons-docker-ncias-p2236-v' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'demo,dev,qa,stage,prod' ) +// booleanParam( +// defaultValue: false, +// description: 'Flush the Redis cache', +// name: 'Flush_Redis') + +// gitParameter(branchFilter: 'origin/(.*)', +// defaultValue: 'integration', +// name: 'Frontend_Image', +// type: 'PT_BRANCH_TAG', +// quickFilterEnabled: false, +// selectedValue: 'DEFAULT', +// sortMode: 'ASCENDING_SMART', +// tagFilter: '*', +// useRepository: 'https://github.com/CBIIT/bento-frontend') +// +// The Frontend_Image parameter has been replaced in this job by a GUI parameter due to the fact that type "active choice reactive parameter" is not supported. +// The GUI definition for this parameter should follow Environment and use the following values: +// +// Script: +// env_value = Environment +// def gettags = ("git ls-remote -h -t https://github.com/CBIIT/bento-frontend.git").execute() +// def tags = gettags.text.readLines().collect { it.split()[1].replaceAll('refs/heads/', '').replaceAll('refs/tags/', '').replaceAll("\\^\\{\\}", '')} +// List tagnames = tags.collect{ '' + it + '' } +// +// if(env_value.equalsIgnoreCase("dev")) { +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("integration")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// } else { +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("master")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// } +// +// return tagnames +// +// Fallback Script: +// return ['integration'] +// +// NOTE: this sets the default frontend branch to "integration" and is required for builds triggered from SCM polling. +// +// Referenced parameters: +// Environment + +// gitParameter(branchFilter: 'origin/(.*)', +// defaultValue: 'Integration', +// name: 'Backend_Image', +// type: 'PT_BRANCH_TAG', +// quickFilterEnabled: false, +// selectedValue: 'DEFAULT', +// sortMode: 'ASCENDING_SMART', +// tagFilter: '*', +// useRepository: 'https://github.com/CBIIT/bento-backend') +// +// The Backend_Image parameter has been replaced in this job by a GUI parameter due to the fact that type "active choice reactive parameter" is not supported. +// The GUI definition for this parameter should follow Frontend_Image and use the following values: +// +// Script: +// front_tag = Frontend_Image +// def gettags = ("git ls-remote -h -t https://github.com/CBIIT/bento-backend.git").execute() +// def tags = gettags.text.readLines().collect { it.split()[1].replaceAll('refs/heads/', '').replaceAll('refs/tags/', '').replaceAll("\\^\\{\\}", '')} +// List tagnames = tags.collect{ '' + it + '' } +// +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase(front_tag)) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("Integration")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// +// return tagnames +// +// Fallback Script: +// return ['Integration'] +// +// NOTE: this sets the default backend branch to "Integration" and is required for builds triggered from SCM polling. The branch defined here should math the default for Frontend_Image +// +// Referenced parameters: +// Frontend_Image + + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + } + + } + stage('Set Environment'){ + environment { + ICDC_SLACK_URL = "${ICDC_SLACK_URL}" + VERSION = "${params.Frontend_Tag}" + } + steps { + + script { + currentBuild.displayName = "Tag: ${VERSION} Environment: ${params.Environment}" + } + + script { + switch("${params.Environment}") { + case "sandbox": + withCredentials([string(credentialsId: 'sandbox_bearer', variable: 'BEARER') + ]) { + env.NEO4J_IP = "${NEO4J_SANDBOX_IP}" + env.BEARER = "${BEARER}" + env.TIER = "sandbox" + + } + break + case "dev": + withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER') + ]) { + env.NEO4J_IP = "${NEO4J_ICDC_DEV_IP}" + env.BEARER = "${BEARER}" + env.TIER = "dev" + + } + break + case "qa": + withCredentials([string(credentialsId: 'qa_bearer', variable: 'BEARER') + ]) { + env.NEO4J_IP = "${NEO4J_ICDC_QA_IP}" + env.BEARER = "${BEARER}" + env.TIER = "qa" + } + break + case "stage": + withCredentials([string(credentialsId: 'stage_bearer', variable: 'BEARER') + ]) { + env.NEO4J_IP = "${NEO4J_ICDC_STAGE_IP}" + env.BEARER = "${BEARER}" + env.TIER = "stage" + + } + break + case "prod": + withCredentials([string(credentialsId: 'prod_bearer', variable: 'BEARER') + ]) { + env.NEO4J_IP = "${NEO4J_ICDC_PROD_IP}" + env.BEARER = "${BEARER}" + env.TIER = "prod" + + } + break + case "demo": + withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER') + ]) { + env.NEO4J_IP = "${NEO4J_DEMO_IP}" + env.BEARER = "${BEARER}" + env.TIER = "demo" + + } + break + default: + withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER') + ]) { + env.NEO4J_IP = "${NEO4J_ICDC_DEV_IP}" + env.BEARER = "${BEARER}" + env.TIER = "dev" + + } + break + } + } + } + } + stage('deploy'){ + environment { + SLACK_URL = "${SLACK_URL}" + TIER = "${params.Environment}" + VERSION = "${params.Frontend_Tag}" + FE_VERSION = "${params.Frontend_Tag}" + BE_VERSION = "${params.Backend_Tag}" + } + + steps{ + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER'), + string(credentialsId: 'tls_hostname', variable: 'TLS_HOSTNAME'), + file(credentialsId: 'ansible_host_file', variable: 'host_file'), + ]){ + sh "cp ${host_file} ${WORKSPACE}/icdc-devops/ansible/hosts" + sh "echo ${TLS_HOSTNAME}" + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/deploy-bento-icdc.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + credentialsId: 'commonsdocker', + extraVars: [ + tier: "${params.Environment}", + release: "${params.Release}", + project: 'icdc', + frontend_version: "${params.Frontend_Tag}", + backend_version: "${params.Backend_Tag}", + backend_git_tag: "${params.Backend_Tag}", + frontend_git_tag: "${params.Frontend_Tag}" + ], + become: true, + hostKeyChecking: false, + colorized: true) + } + + + } + } + } + } + post { + always { + + sendSlackMessage() + + } + + cleanup { + + cleanWs() + + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/bento-ctdc-data-loader/Jenkinsfile b/jenkins/jobs/bento-ctdc-data-loader/Jenkinsfile new file mode 100644 index 000000000..880105085 --- /dev/null +++ b/jenkins/jobs/bento-ctdc-data-loader/Jenkinsfile @@ -0,0 +1,135 @@ +pipeline { + agent { + node { + label 'data-loader' + } + } + parameters { + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'Neo4j-v4', + name: 'Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/icdc-dataloader') + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'dev') + string(defaultValue: "", + description: 'S3 Folder to load data from', + name: 'S3Folder') + extendedChoice( + defaultValue: 'no', + name: 'WipeDB', + description: 'Choose yes to wipe DB', + quoteValue: false, + multiSelectDelimiter: ',', + value: 'no,yes', + type: 'PT_RADIO') + extendedChoice( + defaultValue: 'no', + name: 'CheatMode', + description: 'Bypass Data Validation', + quoteValue: false, + multiSelectDelimiter: ',', + value: 'no,yes', + type: 'PT_RADIO') + extendedChoice( + defaultValue: 'false', + name: 'SplitTransactions', + description: 'Choose true to the Split Transactions', + quoteValue: false, + multiSelectDelimiter: ',', + value: 'false,true', + type: 'PT_SINGLE_SELECT') + } + // options { + // ansiColor('xterm') + // } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + checkout([$class: 'GitSCM', + branches: [[name: "${params.Tag}"]], + doGenerateSubmoduleConfigurations: + false, extensions: [], submoduleCfg: [], + recursiveSubmodules: true, + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-dataloader']]]) + sh 'git submodule update --init' + dir('icdc-devops'){ + git branch: 'master', + url: 'https://github.com/CBIIT/icdc-devops.git' + } + dir('ctdc-model'){ + git branch: 'master', + url: 'https://github.com/CBIIT/ctdc-model' + } + } + + } + stage('set-environment'){ + steps { + script { + switch("${params.Environment}") { + case "dev": + withCredentials([usernamePassword(credentialsId: 'ctdc_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_CTDC_IP}" + env.S3_FOLDER = "${params.S3Folder}" + env.TIER = "${params.Environment}" + env.WIPE_DB = "${params.WipeDB}" + env.NEO4J_PASSWORD = "${NEO4J_PASS}" + env.CHEAT_MODE = "${params.CheatMode}" + env.PROJECT = "ctdc" + env.SPLIT = "${params.SplitTransactions}" + } + break + default: + println "Select valid option" + break + } + } + } + } + stage('load-data'){ + environment { + SLACK_URL = "${SLACK_URL}" + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/bento-ctdc-data-loader.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + stage('flush_redis'){ + environment { + TIER = "${params.Environment}" + } + steps{ + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/redis.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + } + post { + cleanup { + cleanWs() + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/bento-ctdc/Jenkinsfile b/jenkins/jobs/bento-ctdc/Jenkinsfile new file mode 100644 index 000000000..85c357fc5 --- /dev/null +++ b/jenkins/jobs/bento-ctdc/Jenkinsfile @@ -0,0 +1,330 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${ICDC_SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'commons-docker-ncias-p2236-v' + // label 'docker-maven' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'dev,qa,stage,prod') + booleanParam( + defaultValue: true, + description: 'Flush the Redis cache', + name: 'Flush_Redis') + booleanParam( + defaultValue: true, + description: 'Enable the Redis filter', + name: 'Enable_Redis_Filter') + string( + defaultValue: 'redis-schema.graphql', + description: 'The file to use when loading redis schema', + name: 'Redis_Schema_File') + string( + defaultValue: 'redis-filter-config.bento.yaml', + description: 'The file to use when loading redis queries', + name: 'Redis_Queries_File') + string( + defaultValue: 'test-queries.1k.bento.yaml', + description: 'The file to use when loading test queries', + name: 'Test_Queries_File') + string( + defaultValue: '', + description: 'The set of parameters to use for test automation - if left blank no tests will be run', + name: 'Test_Params') + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Backend_Tag}"]], + doGenerateSubmoduleConfigurations: + false, extensions: [], submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/bento-backend']]]) + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Frontend_Tag}"]], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'RelativeTargetDirectory', + relativeTargetDir: 'ctdc-frontend']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/bento-ctdc-frontend']]]) + + checkout( poll: false, + changelog:false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']]]) + + } + + } + stage('Set Environment'){ + environment { + ICDC_SLACK_URL = "${ICDC_SLACK_URL}" + } + steps { + + script { + switch("${params.Environment}") { + case "dev": + withCredentials([usernamePassword(credentialsId: 'neo4j_ctdc_dev_cred', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_CTDC_DEV_IP}" + env.TIER = "${params.Environment}" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + case "qa": + withCredentials([usernamePassword(credentialsId: 'neo4j_ctdc_qa_cred', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_CTDC_QA_IP}" + env.TIER = "${params.Environment}" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + case "stage": + withCredentials([usernamePassword(credentialsId: 'neo4j_ctdc_stage_cred', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_CTDC_STAGE_IP}" + env.TIER = "${params.Environment}" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + case "prod": + withCredentials([usernamePassword(credentialsId: 'neo4j_ctdc_prod_cred', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_CTDC_PROD_IP}" + env.TIER = "${params.Environment}" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + default: + withCredentials([usernamePassword(credentialsId: 'neo4j_ctdc_dev_cred', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_CTDC_DEV_IP}" + env.TIER = "${params.Environment}" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + } + } + } + } + stage('build'){ + environment{ + FE_VERSION = "${params.Frontend_Tag}-${BUILD_NUMBER}" + BE_VERSION = "${params.Backend_Tag}-${BUILD_NUMBER}" + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER'), + file(credentialsId: 'ansible_host_file', variable: 'host_file'), + string(credentialsId: 'fence-id', variable: 'FENCE_ID'), + string(credentialsId: 'docker_host', variable: 'DOCKER_HOST'), + string(credentialsId: 'tls_hostname', variable: 'TLS_HOSTNAME') + ]) + { + sh "cp ${host_file} ${WORKSPACE}/icdc-devops/ansible/hosts" + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/build-cloudone-ctdc.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + redis_schema_file: "${params.Redis_Schema_File}", + redis_init_queries_file: "${params.Redis_Queries_File}", + test_queries_file: "${params.Test_Queries_File}", + project: "ctdc" + ], + colorized: true + ) + } + } + } + } + + stage('deploy'){ + environment { + FE_VERSION = "${params.Frontend_Tag}-${BUILD_NUMBER}" + BE_VERSION = "${params.Backend_Tag}-${BUILD_NUMBER}" + } + + steps{ + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER'), + string(credentialsId: 'newrelic_license_key', variable: 'NEWRELIC_LIC_KEY'), + string(credentialsId: 'sumo_access_id', variable: 'SUMO_ACCESS_ID'), + string(credentialsId: 'sumo_access_key', variable: 'SUMO_ACCESS_KEY'), + string(credentialsId: 'bento_syslog_host', variable: 'SYSLOG_HOST'), + string(credentialsId: 'fence-id', variable: 'FENCE_ID'), + string(credentialsId: 'fence-credential', variable: 'FENCE_CREDENTIAL') + ]){ + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/deploy-cloudone-ctdc.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + credentialsId: 'commonsdocker', + extraVars: [ + tier: "${params.Environment}", + release: "${params.Release}", + project: 'ctdc', + frontend_version: "${params.Frontend_Tag}-${BUILD_NUMBER}", + backend_version: "${params.Backend_Tag}-${BUILD_NUMBER}", + backend_git_tag: "${params.Backend_Tag}", + frontend_git_tag: "${params.Frontend_Tag}", + fence_id: "${FENCE_ID}", + enable_redis_filter: "${params.Enable_Redis_Filter}" + ], + become: true, + hostKeyChecking: false, + colorized: true) + } + + + } + } + } + + // stage('clear redis cache'){ + // when { + // expression { params.Flush_Redis } + // } + + // environment { + // TIER = "${params.Environment}" + // } + + // steps{ + // wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + + // script { + // sh label: 'database-hosts', script: '''#!/bin/bash + // echo "Creating inventory file" + // echo "[icdc]" > ${WORKSPACE}/icdc-devops/ansible/redis_hosts + // echo ${NEO4J_IP} >> ${WORKSPACE}/icdc-devops/ansible/redis_hosts + + // ''' + + // } + + // ansiblePlaybook( + // playbook: '${WORKSPACE}/icdc-devops/ansible/redis_icdc.yml', + // inventory: '${WORKSPACE}/icdc-devops/ansible/redis_hosts', + // credentialsId: 'commonsdocker', + // colorized: true) + // } + // } + // } + + } + post { + always { + sendSlackMessage() + } + // success { + + // script { + // withCredentials([string(credentialsId: 'git_credential_token', variable: 'git_token')]) { + // sh label: 'GIT-Tag Backend', script: '''#!/bin/bash + + // gitURL=$(git config remote.origin.url | sed 's|^.*//||') + // echo "Applying tag $Backend_Tag to URL: $gitURL" + // git config user.email "jenkins@bento-tools.org" + // git config user.name "Bento Jenkins" + // git tag --no-sign -a ${Backend_Tag}-icdc-${BUILD_NUMBER} -m "Jenkins tag: ${Backend_Tag}-${BUILD_NUMBER}" + // git push https://${git_token}:x-oauth-basic@${gitURL} --tags + + // ''' + + // } + // } + + // script { + // withCredentials([string(credentialsId: 'git_credential_token', variable: 'git_token')]) { + // sh label: 'GIT-Tag Frontend', script: '''#!/bin/bash + + // cd ${WORKSPACE}/icdc-frontend + // gitURL=$(git config remote.origin.url | sed 's|^.*//||') + // echo "Applying tag $Frontend_Tag to URL: $gitURL" + // git config user.email "jenkins@bento-tools.org" + // git config user.name "Bento Jenkins" + // git tag --no-sign -a ${Frontend_Tag}-${BUILD_NUMBER} -m "Jenkins tag: ${Frontend_Tag}-${BUILD_NUMBER}" + // git push https://${git_token}:x-oauth-basic@${gitURL} --tags + + // ''' + + // } + // } + + // script { + // if ("${params.Test_Params}"?.trim()) { + // echo 'Run Bento Katalon Tests' + // def params = "[${params.Test_Params}]" + // def list_params = evaluate(params) + // build quietPeriod: 300, wait: false, job: '_default/_lower/Test_Automation/Katalon_Prod', parameters: [gitParameter(name: 'Tag', value: list_params['Tag']), string(name: 'KatalonSuite', value: list_params['KatalonSuite']), extendedChoice(name: 'Profile', value: list_params['Profile']), string(name: 'EmailRecipients', value: list_params['Email'])] + // } + // } + + // } + cleanup { + + cleanWs() + + } + } +} diff --git a/jenkins/jobs/bento-ctdc/Jenkinsfile_deploy b/jenkins/jobs/bento-ctdc/Jenkinsfile_deploy new file mode 100644 index 000000000..d186b7244 --- /dev/null +++ b/jenkins/jobs/bento-ctdc/Jenkinsfile_deploy @@ -0,0 +1,283 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${CTDC_SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'commons-docker-ncias-p2236-v' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'dev,qa,stage,prod' ) + booleanParam( + defaultValue: true, + description: 'Flush the Redis cache', + name: 'Flush_Redis') + booleanParam( + defaultValue: true, + description: 'Enable the Redis filter', + name: 'Enable_Redis_Filter') + string( + defaultValue: '', + description: 'The set of parameters to use for test automation - if left blank no tests will be run', + name: 'Test_Params') + + + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + } + + } + stage('Set Environment'){ + environment { + CTDC_SLACK_URL = "${CTDC_SLACK_URL}" + VERSION = "${params.Frontend_Tag}" + } + steps { + + script { + currentBuild.displayName = "Tag: ${VERSION} Environment: ${params.Environment}" + } + + script { + switch("${params.Environment}") { + case "dev": + withCredentials([ + usernamePassword(credentialsId: 'neo4j_ctdc_dev_cred', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_CTDC_DEV_IP}" + env.TIER = "${params.Environment}" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + case "qa": + withCredentials([ + usernamePassword(credentialsId: 'neo4j_ctdc_qa_cred', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_CTDC_QA_IP}" + env.TIER = "${params.Environment}" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + case "stage": + withCredentials([ + usernamePassword(credentialsId: 'neo4j_ctdc_stage_cred', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_CTDC_STAGE_IP}" + env.TIER = "${params.Environment}" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + case "prod": + withCredentials([ + usernamePassword(credentialsId: 'neo4j_ctdc_prod_cred', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_CTDC_PROD_IP}" + env.TIER = "${params.Environment}" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + + default: + withCredentials([ + usernamePassword(credentialsId: 'neo4j_ctdc_dev_cred', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_CTDC_DEV_IP}" + env.TIER = "${params.Environment}" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + } + } + } + } + stage('copy static content') { + when { + expression { + params.Environment == "prod" || params.Environment == "stage" + } + + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'git_credential', passwordVariable: 'GIT_PASSWORD', usernameVariable: 'GIT_USERNAME'), + ]) { + + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/git-copy.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true, + extraVars: [ project: "ctdc"] + ) + } + } + } + } + + stage('deploy'){ + environment { + CTDC_SLACK_URL = "${CTDC_SLACK_URL}" + TIER = "${params.Environment}" + VERSION = "${params.Frontend_Tag}" + FE_VERSION = "${params.Frontend_Tag}" + BE_VERSION = "${params.Backend_Tag}" + } + + steps{ + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER'), + string(credentialsId: 'newrelic_license_key', variable: 'NEWRELIC_LIC_KEY'), + string(credentialsId: 'sumo_access_id', variable: 'SUMO_ACCESS_ID'), + string(credentialsId: 'sumo_access_key', variable: 'SUMO_ACCESS_KEY'), + string(credentialsId: 'bento_syslog_host', variable: 'SYSLOG_HOST'), + string(credentialsId: 'fence-id', variable: 'FENCE_ID'), + string(credentialsId: 'fence-credential', variable: 'FENCE_CREDENTIAL'), + file(credentialsId: 'ansible_host_file', variable: 'host_file'), + ]){ + sh "cp ${host_file} ${WORKSPACE}/icdc-devops/ansible/hosts" + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/deploy-cloudone-ctdc.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + credentialsId: 'commonsdocker', + extraVars: [ + tier: "${params.Environment}", + release: "${params.Release}", + project: 'ctdc', + frontend_version: "${params.Frontend_Tag}", + backend_version: "${params.Backend_Tag}", + backend_git_tag: "${params.Backend_Tag}", + frontend_git_tag: "${params.Frontend_Tag}", + fence_id: "${FENCE_ID}", + enable_redis_filter: "${params.Enable_Redis_Filter}" + ], + become: true, + hostKeyChecking: false, + colorized: true) + } + + + } + } + } + + // stage('clear redis cache'){ + // when { + // expression { params.Flush_Redis } + // } + + // environment { + // TIER = "${params.Environment}" + // } + + // steps{ + // wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + + // script { + // sh label: 'database-hosts', script: '''#!/bin/bash + // echo "Creating inventory file" + // echo "[icdc]" > ${WORKSPACE}/icdc-devops/ansible/redis_hosts + // echo ${NEO4J_IP} >> ${WORKSPACE}/icdc-devops/ansible/redis_hosts + + // ''' + + // } + + // ansiblePlaybook( + // playbook: '${WORKSPACE}/icdc-devops/ansible/redis_icdc.yml', + // inventory: '${WORKSPACE}/icdc-devops/ansible/redis_hosts', + // credentialsId: 'commonsdocker', + // colorized: true) + // } + // } + // } + + } + post { + always { + + sendSlackMessage() + + } + + // success { + + // script { + // if ("${params.Test_Params}"?.trim()) { + // echo 'Run Bento Katalon Tests' + // def params = "[${params.Test_Params}]" + // def list_params = evaluate(params) + // build quietPeriod: 300, wait: false, job: '_default/_lower/Test_Automation/Katalon_Prod', parameters: [gitParameter(name: 'Tag', value: list_params['Tag']), string(name: 'KatalonSuite', value: list_params['KatalonSuite']), extendedChoice(name: 'Profile', value: list_params['Profile']), string(name: 'EmailRecipients', value: list_params['Email'])] + // } + // } + + // } + + cleanup { + + cleanWs() + + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/bento-ctdc/Jenkinsfile_stop_site b/jenkins/jobs/bento-ctdc/Jenkinsfile_stop_site new file mode 100644 index 000000000..96c9a1c07 --- /dev/null +++ b/jenkins/jobs/bento-ctdc/Jenkinsfile_stop_site @@ -0,0 +1,112 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${ICDC_SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'commons-docker-ncias-p2236-v' + // label 'docker-maven' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'sandbox,dev,test,qa,stage,prod' ) + + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + + stage('checkout'){ + steps { + + checkout( poll: false, + changelog:false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']]]) + + } + + } + + stage('stop site'){ + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER'), + file(credentialsId: 'ansible_host_file', variable: 'host_file'), + string(credentialsId: 'fence-id', variable: 'FENCE_ID'), + string(credentialsId: 'docker_host', variable: 'DOCKER_HOST'), + string(credentialsId: 'tls_hostname', variable: 'TLS_HOSTNAME') + ]) + { + sh "cp ${host_file} ${WORKSPACE}/icdc-devops/ansible/hosts" + sh "echo ${TLS_HOSTNAME}" + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/stop-bento-ctdc.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + credentialsId: 'commonsdocker', + extraVars: [ + tier: "${params.Environment}", + project: 'ctdc' + ], + become: true, + hostKeyChecking: false, + colorized: true) + } + } + } + } + + } + post { + always { +// sendSlackMessage() + cleanWs() + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/bento-data-loader/Jenkinsfile b/jenkins/jobs/bento-data-loader/Jenkinsfile new file mode 100644 index 000000000..c20192764 --- /dev/null +++ b/jenkins/jobs/bento-data-loader/Jenkinsfile @@ -0,0 +1,318 @@ +pipeline { + agent { + node { + label 'data-loader' + } + } + parameters { + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'master', + name: 'Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/icdc-dataloader') + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'demo,gdc,dev,qa,perf,icdc,prod,poc,gitlab' ) + extendedChoice( + name: 'ProjectName', + defaultValue: 'bento', + description: 'Choose the project', + type: 'PT_SINGLE_SELECT', + value: 'icdc,ctdc,bento,ccdi,gmb,ins,github-actions,c3dc' ) + string(defaultValue: "", + description: 'S3 Folder to load data from', + name: 'S3Folder') + extendedChoice( + defaultValue: 'no', + name: 'WipeDB', + description: 'Choose yes to wipe DB', + quoteValue: false, + multiSelectDelimiter: ',', + value: 'no,yes', + type: 'PT_RADIO') + extendedChoice( + defaultValue: 'no', + name: 'CheatMode', + description: 'Bypass Data Validation', + quoteValue: false, + multiSelectDelimiter: ',', + value: 'no,yes', + type: 'PT_RADIO') + extendedChoice( + defaultValue: 'false', + name: 'SplitTransactions', + description: 'Choose true to the Split Transactions', + quoteValue: false, + multiSelectDelimiter: ',', + value: 'false,true', + type: 'PT_SINGLE_SELECT') + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'master', + name: 'ES_Backend_Branch', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/bento-backend') + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'master', + name: 'ES_Frontend_Branch', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/bento-frontend') + + } + // options { + // ansiColor('xterm') + // } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + checkout([$class: 'GitSCM', + branches: [[name: "${params.Tag}"]], + doGenerateSubmoduleConfigurations: + false, extensions: [], submoduleCfg: [], + recursiveSubmodules: true, + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-dataloader']]]) + + checkout([$class: 'GitSCM', + branches: [[name: "${params.ES_Frontend_Branch}"]], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'RelativeTargetDirectory', + relativeTargetDir: 'bento-frontend']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/bento-frontend']]]) + + checkout([$class: 'GitSCM', + branches: [[name: "${params.ES_Backend_Branch}"]], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'RelativeTargetDirectory', + relativeTargetDir: 'bento-backend']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/bento-backend']]]) + + sh 'git submodule update --init' + dir('icdc-devops'){ + git branch: 'master', + url: 'https://github.com/CBIIT/icdc-devops.git' + } + dir('bento-model'){ + git branch: 'master', + url: 'https://github.com/CBIIT/BENTO-TAILORx-model' + } + dir('ccdi-model'){ + git branch: 'main', + url: 'https://github.com/CBIIT/CCDI-Portal-ETL.git' + } + dir('gmb-model'){ + git branch: 'utf8', + url: 'https://github.com/CBIIT/gmb-model.git' + } + dir('c3dc-model'){ + git branch: 'master', + url: 'https://github.com/CBIIT/c3dc-model.git' + } + dir('ins-model'){ + git branch: 'main', + url: 'https://github.com/CBIIT/INS-ETL.git' + } + dir('ctdc-model'){ + git branch: 'master', + url: 'https://github.com/CBIIT/ctdc-model' + } + } + + } + stage('set-environment'){ + steps { + script { + switch("${params.Environment}") { + case "demo": + withCredentials([usernamePassword(credentialsId: 'demo_neo4j_user', passwordVariable: 'neo4j_password', usernameVariable: 'neo4j_user')]) { + env.NEO4J_IP = "${NEO4J_DEMO_IP}" + env.S3_FOLDER = "${params.S3Folder}" + env.TIER = "${params.Environment}" + env.WIPE_DB = "${params.WipeDB}" + env.NEO4J_PASSWORD = "${neo4j_password}" + env.CHEAT_MODE = "${params.CheatMode}" + env.PROJECT = "${params.ProjectName}" + env.SPLIT = "${params.SplitTransactions}" + } + break + case "dev": + withCredentials([usernamePassword(credentialsId: "${params.ProjectName}_${params.Environment}_neo4j_user", passwordVariable: 'neo4j_password', usernameVariable: 'neo4j_user')]) { + // env.NEO4J_IP = "${${params.ProjectName}_${params.Environment}_neo4j_ip}" + env.S3_FOLDER = "${params.S3Folder}" + env.TIER = "${params.Environment}" + env.WIPE_DB = "${params.WipeDB}" + env.NEO4J_PASSWORD = "${neo4j_password}" + env.CHEAT_MODE = "${params.CheatMode}" + env.PROJECT = "${params.ProjectName}" + env.SPLIT = "${params.SplitTransactions}" + } + break + case "poc": + withCredentials([usernamePassword(credentialsId: "${params.ProjectName}_${params.Environment}_neo4j_user", passwordVariable: 'neo4j_password', usernameVariable: 'neo4j_user')]) { + env.S3_FOLDER = "${params.S3Folder}" + env.TIER = "${params.Environment}" + env.WIPE_DB = "${params.WipeDB}" + env.NEO4J_PASSWORD = "${neo4j_password}" + env.CHEAT_MODE = "${params.CheatMode}" + env.PROJECT = "${params.ProjectName}" + env.SPLIT = "${params.SplitTransactions}" + } + break + case "gitlab": + withCredentials([usernamePassword(credentialsId: "${params.ProjectName}_${params.Environment}_neo4j_user", passwordVariable: 'neo4j_password', usernameVariable: 'neo4j_user')]) { + env.S3_FOLDER = "${params.S3Folder}" + env.TIER = "${params.Environment}" + env.WIPE_DB = "${params.WipeDB}" + env.NEO4J_PASSWORD = "${neo4j_password}" + env.CHEAT_MODE = "${params.CheatMode}" + env.PROJECT = "${params.ProjectName}" + env.SPLIT = "${params.SplitTransactions}" + } + break + case "c3dc": + withCredentials([usernamePassword(credentialsId: "${params.ProjectName}_${params.Environment}_neo4j_user", passwordVariable: 'neo4j_password', usernameVariable: 'neo4j_user')]) { + env.S3_FOLDER = "${params.S3Folder}" + env.TIER = "${params.Environment}" + env.WIPE_DB = "${params.WipeDB}" + env.NEO4J_PASSWORD = "${neo4j_password}" + env.CHEAT_MODE = "${params.CheatMode}" + env.PROJECT = "${params.ProjectName}" + env.SPLIT = "${params.SplitTransactions}" + } + break + case "gdc": + withCredentials([usernamePassword(credentialsId: 'gdc_neo4j_user', passwordVariable: 'neo4j_password', usernameVariable: 'neo4j_user')]) { + // env.NEO4J_IP = "${NEO4J_GDC_IP}" + env.S3_FOLDER = "${params.S3Folder}" + env.TIER = "${params.Environment}" + env.WIPE_DB = "${params.WipeDB}" + env.NEO4J_PASSWORD = "${neo4j_password}" + env.CHEAT_MODE = "${params.CheatMode}" + env.PROJECT = "${params.ProjectName}" + env.SPLIT = "${params.SplitTransactions}" + } + break + case "qa": + withCredentials([usernamePassword(credentialsId: 'qa_neo4j_user', passwordVariable: 'neo4j_password', usernameVariable: 'neo4j_user')]) { + // env.NEO4J_IP = "${NEO4J_QA_IP}" + env.S3_FOLDER = "${params.S3Folder}" + env.TIER = "${params.Environment}" + env.TIER = "dev" + env.WIPE_DB = "${params.WipeDB}" + env.NEO4J_PASSWORD = "${neo4j_password}" + env.CHEAT_MODE = "${params.CheatMode}" + env.PROJECT = "${params.ProjectName}" + env.SPLIT = "${params.SplitTransactions}" + } + break + case "perf": + withCredentials([usernamePassword(credentialsId: 'perf_neo4j_user', passwordVariable: 'neo4j_password', usernameVariable: 'neo4j_user')]) { + // env.NEO4J_IP = "${NEO4J_PERF_IP}" + env.S3_FOLDER = "${params.S3Folder}" + env.TIER = "${params.Environment}" + env.WIPE_DB = "${params.WipeDB}" + env.NEO4J_PASSWORD = "${neo4j_password}" + env.CHEAT_MODE = "${params.CheatMode}" + env.PROJECT = "${params.ProjectName}" + env.SPLIT = "${params.SplitTransactions}" + } + break + case "icdc": + withCredentials([usernamePassword(credentialsId: 'icdc_neo4j_user', passwordVariable: 'neo4j_password', usernameVariable: 'neo4j_user')]) { + // env.NEO4J_IP = "${NEO4J_ICDC_IP}" + env.S3_FOLDER = "${params.S3Folder}" + env.TIER = "${params.Environment}" + env.WIPE_DB = "${params.WipeDB}" + env.NEO4J_PASSWORD = "${neo4j_password}" + env.CHEAT_MODE = "${params.CheatMode}" + env.PROJECT = "${params.ProjectName}" + env.SPLIT = "${params.SplitTransactions}" + } + break + case "prod": + withCredentials([usernamePassword(credentialsId: 'prod_neo4j_user', passwordVariable: 'neo4j_password', usernameVariable: 'neo4j_user')]) { + env.NEO4J_IP = "${NEO4J_PROD_IP}" + env.S3_FOLDER = "${params.S3Folder}" + env.TIER = "${params.Environment}" + env.WIPE_DB = "${params.WipeDB}" + env.NEO4J_PASSWORD = "${neo4j_password}" + env.CHEAT_MODE = "${params.CheatMode}" + env.PROJECT = "${params.ProjectName}" + env.SPLIT = "${params.SplitTransactions}" + } + break + default: + println "Select valid option" + break + } + } + } + } + stage('load-data'){ + environment { + SLACK_URL = "${SLACK_URL}" + TIER = "${params.Environment}" + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/bento-data-loader.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + stage('flush_redis'){ + environment { + TIER = "${params.Environment}" + } + steps{ + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/redis.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + } + post { + success { + + script { + if ("${params.ES_Backend_Branch}"?.trim()) { + echo 'Reload data to Elasticsearch' + build job: 'Bento/_Data_Processing/BentoDataLoader_ES', parameters: [gitParameter(name: 'Dataloader_Branch', value: 'master'), gitParameter(name: 'Backend_Branch', value: "${params.ES_Backend_Branch}"), gitParameter(name: 'Frontend_Branch', value: "${params.ES_Frontend_Branch}"), extendedChoice(name: 'Environment', value: "${params.Environment}")] + } + } + } + + cleanup { + + cleanWs() + + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/bento-data-loader/Jenkinsfile_ES b/jenkins/jobs/bento-data-loader/Jenkinsfile_ES new file mode 100644 index 000000000..4f96266b6 --- /dev/null +++ b/jenkins/jobs/bento-data-loader/Jenkinsfile_ES @@ -0,0 +1,172 @@ +pipeline { + agent { + node { + label 'data-loader' + } + } + parameters { + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'master', + name: 'Dataloader_Branch', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/icdc-dataloader') + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'master', + name: 'Backend_Branch', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/bento-backend') + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'master', + name: 'Frontend_Branch', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/bento-frontend') + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'dev,qa,perf,gitlab' ) + } + // options { + // ansiColor('xterm') + // } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + + stage('checkout'){ + steps { + checkout([$class: 'GitSCM', + branches: [[name: "${params.Dataloader_Branch}"]], + doGenerateSubmoduleConfigurations: + false, extensions: [], submoduleCfg: [], + recursiveSubmodules: true, + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-dataloader']]]) + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Frontend_Branch}"]], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'RelativeTargetDirectory', + relativeTargetDir: 'bento-frontend']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/bento-frontend']]]) + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Backend_Branch}"]], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'RelativeTargetDirectory', + relativeTargetDir: 'bento-backend']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/bento-backend']]]) + + sh 'git submodule update --init' + + checkout([$class: 'GitSCM', + branches: [[name: "master"]], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops']]]) + + dir('bento-model'){ + git branch: 'master', + url: 'https://github.com/CBIIT/BENTO-TAILORx-model'} + + } + + } + + stage('set-environment'){ + steps { + script { + switch("${params.Environment}") { + case "dev": + withCredentials([usernamePassword(credentialsId: 'bento_dev_neo4j_user', passwordVariable: 'neo4j_password', usernameVariable: 'neo4j_user'), + string(credentialsId: 'es_dev_host', variable: 'ES_HOST')]) { + env.ES_HOST = "https://${ES_HOST}" + env.PROJECT = "bento" + env.TIER = "${params.Environment}" + env.NEO4J_PASSWORD = "${neo4j_password}" + env.NEO4J_IP = "${NEO4J_DEV_IP}" + } + break + case "qa": + withCredentials([usernamePassword(credentialsId: 'bento_qa_neo4j_user', passwordVariable: 'neo4j_password', usernameVariable: 'neo4j_user'), + string(credentialsId: 'es_qa_host', variable: 'ES_HOST')]) { + env.ES_HOST = "https://${ES_HOST}" + env.PROJECT = "bento" + env.TIER = "${params.Environment}" + env.NEO4J_PASSWORD = "${neo4j_password}" + env.NEO4J_IP = "${NEO4J_QA_IP}" + } + break + case "perf": + withCredentials([usernamePassword(credentialsId: 'bento_perf_neo4j_user', passwordVariable: 'neo4j_password', usernameVariable: 'neo4j_user'), + string(credentialsId: 'es_perf_host', variable: 'ES_HOST')]) { + env.ES_HOST = "https://${ES_HOST}" + env.PROJECT = "bento" + env.TIER = "${params.Environment}" + env.NEO4J_PASSWORD = "${neo4j_password}" + env.NEO4J_IP = "${NEO4J_PERF_IP}" + } + break + case "gitlab": + withCredentials([usernamePassword(credentialsId: 'bento_gitlab_neo4j_user', passwordVariable: 'neo4j_password', usernameVariable: 'neo4j_user'), + string(credentialsId: 'es_gitlab_host', variable: 'ES_HOST')]) { + env.ES_HOST = "https://${ES_HOST}" + env.PROJECT = "bento" + env.TIER = "${params.Environment}" + env.NEO4J_PASSWORD = "${neo4j_password}" + env.NEO4J_IP = "${NEO4J_PERF_IP}" + } + break + default: + println "Select valid option" + break + } + } + } + } + + stage('load-es-data'){ + environment { + SLACK_URL = "${SLACK_URL}" + FRONTEND_BRANCH = "${params.frontend_branch}" + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/bento-es-loader.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + + } + post { + always { + cleanWs() + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/bento-data-loader/Jenkinsfile_dumpfile b/jenkins/jobs/bento-data-loader/Jenkinsfile_dumpfile new file mode 100644 index 000000000..5995d54b1 --- /dev/null +++ b/jenkins/jobs/bento-data-loader/Jenkinsfile_dumpfile @@ -0,0 +1,215 @@ +pipeline { + agent { + node { + label 'data-loader' + } + } + parameters { + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'master', + name: 'Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/icdc-dataloader') + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'demo,gdc,dev,qa,perf,prod,icdc,ctdc' ) + string(defaultValue: "", + description: 'S3 Folder to load data from', + name: 'S3Folder') + string(defaultValue: "", + description: 'Name of the dump file to use', + name: 'DumpFileName') + string( + defaultValue: '', + description: 'The backend branch to use when loading data to Elasticsearch (if left blank this action will be skipped)', + name: 'ES_Backend_Branch') + + } + // options { + // ansiColor('xterm') + // } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + checkout([$class: 'GitSCM', + branches: [[name: "${params.Tag}"]], + doGenerateSubmoduleConfigurations: + false, extensions: [], submoduleCfg: [], + recursiveSubmodules: true, + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-dataloader']]]) + sh 'git submodule update --init' + dir('icdc-devops'){ + git branch: 'master', + url: 'https://github.com/CBIIT/icdc-devops.git' + } + // dir('bento-model'){ + // git branch: 'master', + // url: 'https://github.com/CBIIT/BENTO-TAILORx-model' + // } + } + + } + stage('set-environment'){ + steps { + script { + switch("${params.Environment}") { + case "demo": + withCredentials([usernamePassword(credentialsId: 'demo_neo4j_user', passwordVariable: 'neo4j_password', usernameVariable: 'neo4j_user')]) { + env.NEO4J_IP = "${NEO4J_DEMO_IP}" + env.S3_BUCKET = "${params.S3Folder}" + env.TIER = "${params.Environment}" + env.DUMP_FILE = "${params.DumpFileName}" + + } + break + case "dev": + withCredentials([usernamePassword(credentialsId: 'dev_neo4j_user', passwordVariable: 'neo4j_password', usernameVariable: 'neo4j_user')]) { + env.NEO4J_IP = "${NEO4J_DEV_IP}" + env.S3_BUCKET = "${params.S3Folder}" + env.TIER = "${params.Environment}" + env.DUMP_FILE = "${params.DumpFileName}" + } + break + case "gdc": + withCredentials([usernamePassword(credentialsId: 'gdc_neo4j_user', passwordVariable: 'neo4j_password', usernameVariable: 'neo4j_user')]) { + env.NEO4J_IP = "${NEO4J_GDC_IP}" + env.S3_BUCKET = "${params.S3Folder}" + env.TIER = "${params.Environment}" + env.DUMP_FILE = "${params.DumpFileName}" + } + break + case "qa": + withCredentials([usernamePassword(credentialsId: 'qa_neo4j_user', passwordVariable: 'neo4j_password', usernameVariable: 'neo4j_user')]) { + env.NEO4J_IP = "${NEO4J_QA_IP}" + env.S3_BUCKET = "${params.S3Folder}" + env.TIER = "${params.Environment}" + env.DUMP_FILE = "${params.DumpFileName}" + } + break + case "perf": + withCredentials([usernamePassword(credentialsId: 'perf_neo4j_user', passwordVariable: 'neo4j_password', usernameVariable: 'neo4j_user')]) { + env.NEO4J_IP = "${NEO4J_PERF_IP}" + env.S3_BUCKET = "${params.S3Folder}" + env.TIER = "${params.Environment}" + env.DUMP_FILE = "${params.DumpFileName}" + } + break + case "icdc": + withCredentials([usernamePassword(credentialsId: 'icdc_neo4j_user', passwordVariable: 'neo4j_password', usernameVariable: 'neo4j_user')]) { + env.NEO4J_IP = "${NEO4J_ICDC_IP}" + env.S3_BUCKET = "${params.S3Folder}" + env.TIER = "${params.Environment}" + env.DUMP_FILE = "${params.DumpFileName}" + } + break + case "ctdc": + withCredentials([usernamePassword(credentialsId: 'ctdc_neo4j_user', passwordVariable: 'neo4j_password', usernameVariable: 'neo4j_user')]) { + env.NEO4J_IP = "${NEO4J_CTDC_IP}" + env.S3_BUCKET = "${params.S3Folder}" + env.TIER = "${params.Environment}" + env.DUMP_FILE = "${params.DumpFileName}" + } + break + case "prod": + withCredentials([usernamePassword(credentialsId: 'prod_neo4j_user', passwordVariable: 'neo4j_password', usernameVariable: 'neo4j_user')]) { + env.NEO4J_IP = "${NEO4J_PROD_IP}" + env.S3_BUCKET = "${params.S3Folder}" + env.TIER = "${params.Environment}" + env.DUMP_FILE = "${params.DumpFileName}" + } + break + default: + println "Select valid option" + break + } + } + } + } + + stage('get data'){ + steps{ + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/neo4j-loader-icdc-get.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + + stash includes: "**/${DUMP_FILE}", name: 'dump_file' + + } + } + + stage('data-loader'){ + environment { + SLACK_URL = "${SLACK_URL}" + } + steps { + + sh ''' + echo [neo4j] >> ${WORKSPACE}/icdc-devops/ansible/hosts + echo ${NEO4J_IP} >> ${WORKSPACE}/icdc-devops/ansible/hosts + ''' + + unstash 'dump_file' + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/neo4j-loader.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + credentialsId: 'server_ssh_key', + colorized: true) + } + + } + } + + stage('flush_redis'){ + environment { + TIER = "${params.Environment}" + } + steps{ + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/redis.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + credentialsId: 'server_ssh_key', + colorized: true) + } + } + } + + } + post { + + success { + + script { + if ("${params.ES_Backend_Branch}"?.trim()) { + echo 'Reload data to Elasticsearch' + build job: 'Bento/_Data_Processing/BentoDataLoader_ES', parameters: [gitParameter(name: 'Dataloader_Branch', value: 'master'), gitParameter(name: 'Backend_Branch', value: "${params.ES_Backend_Branch}"), extendedChoice(name: 'Environment', value: "${params.Environment}")] + } + } + } + + cleanup { + + cleanWs() + + } + + } +} \ No newline at end of file diff --git a/jenkins/jobs/bento-demo/Jenkinsfile b/jenkins/jobs/bento-demo/Jenkinsfile new file mode 100644 index 000000000..a2714e67e --- /dev/null +++ b/jenkins/jobs/bento-demo/Jenkinsfile @@ -0,0 +1,147 @@ +//load shared library for slack notification +//@Library('shared-library')_ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "ICDC Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "icdc devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + +pipeline { + + agent { + node { + label 'cicd_microservice' + } + } + parameters { + //listGitBranches(branchFilter: '.*', credentialsId: '', defaultValue: 'master', name: 'Tag', quickFilterEnabled: false, remoteURL: 'https://github.com/CBIIT/icdc-codebase', selectedValue: 'DEFAULT', sortMode: 'NONE', tagFilter: '.*', type: 'PT_BRANCH_TAG') + // gitParameter(branchFilter: '.*', credentialsId: '', defaultValue: 'master', name: 'Tag', quickFilterEnabled: false, remoteURL: 'https://github.com/CBIIT/icdc-codebase', selectedValue: 'DEFAULT', sortMode: 'NONE', tagFilter: '.*', type: 'PT_BRANCH_TAG') + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'Develop', + name: 'Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'NONE', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/icdc-codebase') + extendedChoice( + name: 'Environment', + defaultValue: 'demo', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'demo' ) + } + // triggers { + // pollSCM('H/15 * * * 1-5') + // } + options { + timestamps() + } + tools { + maven 'Default' + jdk 'Default' + } + stages { + stage('checkout'){ + steps { + checkout([$class: 'GitSCM', + branches: [[name: "${params.Tag}"]], + doGenerateSubmoduleConfigurations: + false, extensions: [], submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-codebase']]]) + + dir('icdc-devops'){ + git branch: 'master', + url: 'https://github.com/CBIIT/icdc-devops.git' + } + } + } + stage('Set Environment'){ + steps { + script { + switch("${params.Environment}") { + case "demo": + withCredentials([string(credentialsId: 'demo_bearer', variable: 'BEARER')]) { + env.NEO4J_IP = "${NEO4J_DEMO_IP}" + env.BEARER = "${BEARER}" + env.TIER = "demo" + + } + break + default: + withCredentials([string(credentialsId: 'demo_bearer', variable: 'BEARER')]) { + env.NEO4J_IP = "${NEO4J_DEMO_IP}" + env.BEARER = "${BEARER}" + env.TIER = "demo" + } + break + } + } + } + } + stage('Build') { + environment { + SLACK_URL = "${SLACK_URL}" + VERSION = "${params.Tag}" + DEMO_URL = "bento.essential-dev.com" + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([string(credentialsId: 'demo_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'demo_neo4j_user', passwordVariable: 'NEO4J_PASSWORD', usernameVariable: 'NEO4J_USER'), + usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER'), + string(credentialsId: 'fence-id', variable: 'FENCE_ID'), + string(credentialsId: 'fence-credential', variable: 'FENCE_CREDENTIAL')]) { + + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/build-bento-demo.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + } + stage('deploy'){ + steps{ + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/deploy-bento-demo.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + } + post { + always { + + sendSlackMessage() + } + } +} diff --git a/jenkins/jobs/bento-demo/jenkinsoldfile b/jenkins/jobs/bento-demo/jenkinsoldfile new file mode 100644 index 000000000..c92d023be --- /dev/null +++ b/jenkins/jobs/bento-demo/jenkinsoldfile @@ -0,0 +1,175 @@ +//load shared library for slack notification +//@Library('shared-library')_ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "CTDC Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "icdc-demo devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + +pipeline { + + agent { + node { + label 'icdc-demo-docker-maven' + } + } + parameters { + //listGitBranches(branchFilter: '.*', credentialsId: '', defaultValue: 'master', name: 'Tag', quickFilterEnabled: false, remoteURL: 'https://github.com/CBIIT/icdc-codebase', selectedValue: 'DEFAULT', sortMode: 'NONE', tagFilter: '.*', type: 'PT_BRANCH_TAG') + // gitParameter(branchFilter: '.*', credentialsId: '', defaultValue: 'master', name: 'Tag', quickFilterEnabled: false, remoteURL: 'https://github.com/CBIIT/icdc-codebase', selectedValue: 'DEFAULT', sortMode: 'NONE', tagFilter: '.*', type: 'PT_BRANCH_TAG') + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'master', + name: 'Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/icdc-codebase') + extendedChoice( + name: 'Environment', + defaultValue: 'demo', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'demo,dev,qa,stage,prod' ) + } + // triggers { + // pollSCM('H/15 * * * 1-5') + // } + options { + timestamps() + } + tools { + maven 'Default' + jdk 'Default' + } + stages { + stage('Checkout') { + steps { + checkout([$class: 'GitSCM', branches: [[name: "${params.Tag}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[url: 'https://github.com/CBIIT/icdc-codebase']]]) + } + } + stage('Set Environment'){ + environment { + DEMO_URL = "caninecommons-demo.cancer.gov" + VERSION = "${params.Tag}" + } + steps { + script { + switch("${params.Environment}") { + case "demo": + withCredentials([file(credentialsId: 'demo_env_file', variable: 'secret_file')]) { + sh "cp ${secret_file} ${WORKSPACE}/src/main/frontend/.env" + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's,tag_version,${VERSION},g' .env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${DEMO_URL}/,g' application_example.properties" + env.TOMCAT01_IP = "${TOMCAT01_DEMO_IP}" + env.NEO4J_IP = "${NEO4J_DEMO_IP}" + } + break + default: + withCredentials([file(credentialsId: 'demo_env_file', variable: 'secret_file')]) { + sh "cp ${secret_file} ${WORKSPACE}/src/main/frontend/.env" + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's,tag_version,${VERSION},g' .env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${DEMO_URL}/,g' application_example.properties" + env.TOMCAT01_IP = "${TOMCAT01_DEMO_IP}" + env.NEO4J_IP = "${NEO4J_DEMO_IP}" + } + break + } + } + } + } + stage('Build') { + environment { + SLACK_URL = "${SLACK_URL}" + } + steps { + withCredentials([string(credentialsId: 'authorization_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'neo4j_user', passwordVariable: 'NEO4J_PASSWORD', usernameVariable: 'NEO4J_USER'), + string(credentialsId: 'fence-id', variable: 'FENCE_ID'), + string(credentialsId: 'fence-credential', variable: 'FENCE_CREDENTIAL')]) { + sh "cd ${WORKSPACE}/src/main/resources/ && mv application_example.properties application.properties" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's/localhost/${NEO4J_IP}/g' application.properties" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's/Basic 123456/${BEARER}/' application.properties" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's/password=${NEO4J_IP}/password=${NEO4J_PASSWORD}/' application.properties" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's/neo4j_username/${NEO4J_USER}/g' application.properties" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's/accessid/${FENCE_ID}/g' application.properties" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's/accesskey/${FENCE_CREDENTIAL}/g' application.properties" + + sh "mvn package -DskipTests" + + sh "mv target/ICDC-0.0.1.war target/ROOT.war" + } + } + } + stage('Deploy') { + when { + expression { + currentBuild.result == null || currentBuild.result == 'SUCCESS' + } + } + steps { + ansiColor('xterm') { + withCredentials([sshUserPrivateKey(credentialsId: 'commonsdocker', keyFileVariable: 'deployer_key', passphraseVariable: '', usernameVariable: 'deployer')]) { +sh label: '', script: ''' + +for server in $TOMCAT01_IP; +do +pushd target +scp -i $deployer_key -o StrictHostKeyChecking=no ROOT.war $deployer@${server}: +popd +ssh -i $deployer_key -T -o StrictHostKeyChecking=no $deployer@${server} << EOF +sudo docker rm -f icdc_demo +cd /local/content/docker +sudo docker pull cbiitssrepo/tomcat9 +sudo docker-compose up -d & +wait %1 +sleep 20 +docker cp ~/ROOT.war icdc_demo:/usr/local/tomcat/webapps +rm -rf ~/ROOT.war +EOF +done''' + } + } + } + } + stage('schema'){ + environment { + NEO4J_IP = "${NEO4J_IP}" + } + steps { + withCredentials([string(credentialsId: 'authorization_bearer', variable: 'BEARER')]){ + sh "curl -X POST http://${NEO4J_IP}:7474/graphql/idl/ -H 'Accept: application/json' -H 'Authorization: ${BEARER}' -d @src/main/resources/graphql/icdc.graphql" + } + } + } + } + post { + always { + sendSlackMessage() + } + } +} diff --git a/jenkins/jobs/bento-demo/sumo_migration_steps b/jenkins/jobs/bento-demo/sumo_migration_steps new file mode 100644 index 000000000..b2afe2992 --- /dev/null +++ b/jenkins/jobs/bento-demo/sumo_migration_steps @@ -0,0 +1,7 @@ +systemctl stop collector +modify user.properties +remove creds directory +systemctl start collector +suErEZJ2nz4NoD +twkLYfsrGWHrD2TLcmuSZBVnM0RvCzalDPoJKwSFtdJaZ1xQC3Lo3f6zOjaI74uO +https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Docker_Apps/Docker/01-Collect-Logs-and-Metrics-from-Docker#Step_4:_Add_a_Docker_stats_source
 \ No newline at end of file diff --git a/jenkins/jobs/bento-file-downloader/Jenkinsfile b/jenkins/jobs/bento-file-downloader/Jenkinsfile new file mode 100644 index 000000000..c6e238cf5 --- /dev/null +++ b/jenkins/jobs/bento-file-downloader/Jenkinsfile @@ -0,0 +1,191 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${FILES_SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'cicd_microservice' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'dev,qa,perf,icdc,prod' ) + extendedChoice( + name: 'Project', + defaultValue: 'bento', + description: 'Choose project to build', + type: 'PT_SINGLE_SELECT', + value: 'bento,gmb,icdc,ctdc,c3dc' ) + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'main', + name: 'Downloader_Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/bento-files') + + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + checkout([$class: 'GitSCM', + branches: [[name: "${params.Downloader_Tag}"]], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'RelativeTargetDirectory', + relativeTargetDir: 'bento-files']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/bento-files']]]) + + checkout( poll: false, + changelog:false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']]]) + + } + + } + + stage('build'){ + environment { + FILES_SLACK_URL = "${FILES_SLACK_URL}" + DOWNLOADER_VERSION = "${params.Downloader_Tag}" + } + steps { + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER')]){ + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/build-bento-file-downloader.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + tier: "${params.Environment}", + project: "${params.Project}" + ], + colorized: true) + } + } + } + } + + stage('deploy'){ + environment { + DOWNLOADER_VERSION = "${params.Downloader_Tag}-${BUILD_NUMBER}" + FILES_SLACK_URL = "${FILES_SLACK_URL}" + } + steps{ + node('cicd_microservice') { + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/deploy-bento-file-downloader.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + tier: "${params.Environment}", + project: "${params.Project}" + ], + colorized: true) + } + } + } + } + } + post { + + always { + + sendSlackMessage() + + } + + success { + + script { + withCredentials([string(credentialsId: 'git_credential', variable: 'git_token')]) { + sh label: 'GIT-Tag Downloader', script: '''#!/bin/bash + cd ${WORKSPACE}/bento-files + gitURL=$(git config remote.origin.url | sed 's|^.*//||') + echo "Applying tag $Downloader_Tag to URL: $gitURL" + git config user.email "jenkins@bento-tools.org" + git config user.name "Bento Jenkins" + git tag --no-sign -a ${Downloader_Tag}-bento-${BUILD_NUMBER} -m "Jenkins tag: ${Downloader_Tag}-${BUILD_NUMBER}" + git push https://${git_token}:x-oauth-basic@${gitURL} --tags + + ''' + + } + } + + + } + + cleanup { + + cleanWs() + + } + + } +} \ No newline at end of file diff --git a/jenkins/jobs/bento-file-downloader/Jenkinsfile_deploy b/jenkins/jobs/bento-file-downloader/Jenkinsfile_deploy new file mode 100644 index 000000000..965c58868 --- /dev/null +++ b/jenkins/jobs/bento-file-downloader/Jenkinsfile_deploy @@ -0,0 +1,104 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${FILES_SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'cicd_microservice' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'sandbox,dev,test,qa,perf,icdc,prod') + + + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('deploy'){ + environment { + FILES_SLACK_URL = "${FILES_SLACK_URL}" + TIER = "${params.Environment}" + DOWNLOADER_VERSION = "${params.Downloader_Tag}" + } + steps{ + node('cicd_microservice') { + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/deploy-bento-file-downloader.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + placeholder: 'yes', + ], + colorized: true) + } + } + } + } + + } + post { + always { + + sendSlackMessage() + + } + + cleanup { + + cleanWs() + + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/bento-gdc/Jenkinsfile b/jenkins/jobs/bento-gdc/Jenkinsfile new file mode 100644 index 000000000..5f2bf850d --- /dev/null +++ b/jenkins/jobs/bento-gdc/Jenkinsfile @@ -0,0 +1,298 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'cicd_microservice' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'gdc', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'gdc' ) + extendedChoice( + name: 'Region', + defaultValue: 'us-east-1', + description: 'Choose aws region to build', + type: 'PT_SINGLE_SELECT', + value: 'us-east-1,us-west-1' ) + +// gitParameter(branchFilter: 'origin/(.*)', +// defaultValue: 'integration', +// name: 'Frontend_Tag', +// type: 'PT_BRANCH_TAG', +// quickFilterEnabled: false, +// selectedValue: 'DEFAULT', +// sortMode: 'ASCENDING_SMART', +// tagFilter: '*', +// useRepository: 'https://github.com/CBIIT/bento-frontend') +// +// The Frontend_Tag parameter has been replaced in this job by a GUI parameter due to the fact that type "active choice reactive parameter" is not supported. +// The GUI definition for this parameter should follow Environment and use the following values: +// +// Script: +// env_value = Environment +// def gettags = ("git ls-remote -h -t https://github.com/CBIIT/bento-frontend.git").execute() +// def tags = gettags.text.readLines().collect { it.split()[1].replaceAll('refs/heads/', '').replaceAll('refs/tags/', '').replaceAll("\\^\\{\\}", '')} +// List tagnames = tags.collect{ '' + it + '' } +// +// if(env_value.equalsIgnoreCase("dev")) { +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("integration")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// } else { +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("master")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// } +// +// return tagnames +// +// Fallback Script: +// return ['integration'] +// +// NOTE: this sets the default frontend branch to "integration" and is required for builds triggered from SCM polling. +// +// Referenced parameters: +// Environment + +// gitParameter(branchFilter: 'origin/(.*)', +// defaultValue: 'Integration', +// name: 'Backend_Tag', +// type: 'PT_BRANCH_TAG', +// quickFilterEnabled: false, +// selectedValue: 'DEFAULT', +// sortMode: 'ASCENDING_SMART', +// tagFilter: '*', +// useRepository: 'https://github.com/CBIIT/bento-backend') +// +// The Backend_Tag parameter has been replaced in this job by a GUI parameter due to the fact that type "active choice reactive parameter" is not supported. +// The GUI definition for this parameter should follow Frontend_Tag and use the following values: +// +// Script: +// front_tag = Frontend_Tag +// def gettags = ("git ls-remote -h -t https://github.com/CBIIT/bento-backend.git").execute() +// def tags = gettags.text.readLines().collect { it.split()[1].replaceAll('refs/heads/', '').replaceAll('refs/tags/', '').replaceAll("\\^\\{\\}", '')} +// List tagnames = tags.collect{ '' + it + '' } +// +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase(front_tag)) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("Integration")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// +// return tagnames +// +// Fallback Script: +// return ['Integration'] +// +// NOTE: this sets the default backend branch to "Integration" and is required for builds triggered from SCM polling. The branch defined here should math the default for Frontend_Tag +// +// Referenced parameters: +// Frontend_Tag + + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { +// git branch: "${params.Backend_Tag}", +// url: 'https://github.com/CBIIT/bento-backend' + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Backend_Tag}"]], + doGenerateSubmoduleConfigurations: + false, extensions: [], submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/bento-demo-backend']]]) + +// dir('bento-frontend'){ +// git branch: "${params.Frontend_Tag}", +// url: 'https://github.com/CBIIT/bento-frontend' +// } + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Frontend_Tag}"]], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'RelativeTargetDirectory', + relativeTargetDir: 'bento-frontend']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/bento-demo-frontend']]]) + + checkout( poll: false, + changelog:false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']]]) + + } + + } + stage('Set Environment'){ + environment { + SLACK_URL = "${SLACK_URL}" + VERSION = "${params.Frontend_Tag}" + } + steps { + script { + switch("${params.Environment}") { + case "dev": + withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER')]) { + env.NEO4J_IP = "${NEO4J_DEV_IP}" + env.BEARER = "${BEARER}" + env.TIER = "dev" + + + + } + break + case "gdc": + withCredentials([string(credentialsId: 'gdc_bearer', variable: 'BEARER')]) { + env.NEO4J_IP = "${NEO4J_GDC_IP}" + env.BEARER = "${BEARER}" + env.TIER = "gdc" + + + + } + break + case "qa": + withCredentials([string(credentialsId: 'qa_bearer', variable: 'BEARER')]) { + env.NEO4J_IP = "${NEO4J_QA_IP}" + env.BEARER = "${BEARER}" + env.TIER = "qa" + + + + } + break + case "stage": + withCredentials([string(credentialsId: 'stage_bearer', variable: 'BEARER')]) { + env.NEO4J_IP = "${NEO4J_STAGE_IP}" + env.BEARER = "${BEARER}" + env.TIER = "stage" + + + + } + break + case "prod": + withCredentials([string(credentialsId: 'prod_bearer', variable: 'BEARER')]) { + env.NEO4J_IP = "${NEO4J_PROD_IP}" + env.BEARER = "${BEARER}" + env.TIER = "prod" + + + + } + break + default: + withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER')]) { + env.NEO4J_IP = "${NEO4J_DEV_IP}" + env.BEARER = "${BEARER}" + env.TIER = "dev" + + + + } + break + } + } + } + } + stage('build'){ + environment { + VERSION = "${params.Frontend_Tag}" + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER')]){ + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/gdc-build.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + } + stage('deploy'){ + environment { + REGION = "${params.REGION}" + } + steps{ + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/gdc-deploy.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + } + post { + always { + + sendSlackMessage() + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/bento-gdc/Jenkinsfile_dev b/jenkins/jobs/bento-gdc/Jenkinsfile_dev new file mode 100644 index 000000000..09b70840e --- /dev/null +++ b/jenkins/jobs/bento-gdc/Jenkinsfile_dev @@ -0,0 +1,274 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'cicd_microservice' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'sandbox,dev,test,qa,stage,prod' ) + extendedChoice( + name: 'Region', + defaultValue: 'us-east-1', + description: 'Choose aws region to build', + type: 'PT_SINGLE_SELECT', + value: 'us-east-1,us-west-1' ) + +// gitParameter(branchFilter: 'origin/(.*)', +// defaultValue: 'integration', +// name: 'Frontend_Tag', +// type: 'PT_BRANCH_TAG', +// quickFilterEnabled: false, +// selectedValue: 'DEFAULT', +// sortMode: 'ASCENDING_SMART', +// tagFilter: '*', +// useRepository: 'https://github.com/CBIIT/bento-frontend') +// +// The Frontend_Tag parameter has been replaced in this job by a GUI parameter due to the fact that type "active choice reactive parameter" is not supported. +// The GUI definition for this parameter should follow Environment and use the following values: +// +// Script: +// env_value = Environment +// def gettags = ("git ls-remote -h -t https://github.com/CBIIT/bento-frontend.git").execute() +// def tags = gettags.text.readLines().collect { it.split()[1].replaceAll('refs/heads/', '').replaceAll('refs/tags/', '').replaceAll("\\^\\{\\}", '')} +// List tagnames = tags.collect{ '' + it + '' } +// +// if(env_value.equalsIgnoreCase("dev")) { +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("integration")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// } else { +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("master")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// } +// +// return tagnames +// +// Fallback Script: +// return ['integration'] +// +// NOTE: this sets the default frontend branch to "integration" and is required for builds triggered from SCM polling. +// +// Referenced parameters: +// Environment + +// gitParameter(branchFilter: 'origin/(.*)', +// defaultValue: 'Integration', +// name: 'Backend_Tag', +// type: 'PT_BRANCH_TAG', +// quickFilterEnabled: false, +// selectedValue: 'DEFAULT', +// sortMode: 'ASCENDING_SMART', +// tagFilter: '*', +// useRepository: 'https://github.com/CBIIT/bento-backend') +// +// The Backend_Tag parameter has been replaced in this job by a GUI parameter due to the fact that type "active choice reactive parameter" is not supported. +// The GUI definition for this parameter should follow Frontend_Tag and use the following values: +// +// Script: +// front_tag = Frontend_Tag +// def gettags = ("git ls-remote -h -t https://github.com/CBIIT/bento-backend.git").execute() +// def tags = gettags.text.readLines().collect { it.split()[1].replaceAll('refs/heads/', '').replaceAll('refs/tags/', '').replaceAll("\\^\\{\\}", '')} +// List tagnames = tags.collect{ '' + it + '' } +// +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase(front_tag)) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("Integration")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// +// return tagnames +// +// Fallback Script: +// return ['Integration'] +// +// NOTE: this sets the default backend branch to "Integration" and is required for builds triggered from SCM polling. The branch defined here should math the default for Frontend_Tag +// +// Referenced parameters: +// Frontend_Tag + + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { +// git branch: "${params.Backend_Tag}", +// url: 'https://github.com/CBIIT/bento-backend' + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Backend_Tag}"]], + doGenerateSubmoduleConfigurations: + false, extensions: [], submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/bento-backend']]]) + +// dir('bento-frontend'){ +// git branch: "${params.Frontend_Tag}", +// url: 'https://github.com/CBIIT/bento-frontend' +// } + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Frontend_Tag}"]], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'RelativeTargetDirectory', + relativeTargetDir: 'bento-frontend']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/bento-frontend']]]) + + checkout( poll: false, + changelog:false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']]]) + + } + + } + stage('Set Environment'){ + environment { + SLACK_URL = "${SLACK_URL}" + } + steps { + script { + switch("${params.Environment}") { + case "dev": + withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER')]) { + env.NEO4J_IP = "${NEO4J_DEV_IP}" + env.BEARER = "${BEARER}" + env.TIER = "dev" + + } + break + case "qa": + withCredentials([string(credentialsId: 'qa_bearer', variable: 'BEARER')]) { + env.NEO4J_IP = "${NEO4J_QA_IP}" + env.BEARER = "${BEARER}" + env.TIER = "qa" + + } + break + case "stage": + withCredentials([string(credentialsId: 'stage_bearer', variable: 'BEARER')]) { + env.NEO4J_IP = "${NEO4J_STAGE_IP}" + env.BEARER = "${BEARER}" + env.TIER = "stage" + + } + break + case "prod": + withCredentials([string(credentialsId: 'prod_bearer', variable: 'BEARER')]) { + env.NEO4J_IP = "${NEO4J_PROD_IP}" + env.BEARER = "${BEARER}" + env.TIER = "prod" + + } + break + default: + withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER')]) { + env.NEO4J_IP = "${NEO4J_DEV_IP}" + env.BEARER = "${BEARER}" + env.TIER = "dev" + + } + break + } + } + } + } + stage('build'){ + environment { + TAG = "${params.Backend_Tag}" + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER')]){ + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/bento-build.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + } + stage('deploy'){ + steps{ + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/bento-deploy-dev.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + } + post { + always { + + sendSlackMessage() + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/bento-gke-data-loader/Jenkinsfile b/jenkins/jobs/bento-gke-data-loader/Jenkinsfile new file mode 100644 index 000000000..257f68724 --- /dev/null +++ b/jenkins/jobs/bento-gke-data-loader/Jenkinsfile @@ -0,0 +1,135 @@ +pipeline { + agent { + node { + label 'gke' + } + } + parameters { + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'master', + name: 'Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/icdc-dataloader') + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'demo,dev,qa,stage,prod' ) + string(defaultValue: "", + description: 'S3 Folder to load data from', + name: 'S3Folder') + extendedChoice( + defaultValue: 'no', + name: 'WipeDB', + description: 'Choose yes to wipe DB', + quoteValue: false, + multiSelectDelimiter: ',', + value: 'no,yes', + type: 'PT_RADIO') + } + // options { + // ansiColor('xterm') + // } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + checkout([$class: 'GitSCM', + branches: [[name: "${params.Tag}"]], + doGenerateSubmoduleConfigurations: + false, extensions: [], submoduleCfg: [], + recursiveSubmodules: true, + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-dataloader']]]) + sh 'git submodule update --init' + dir('icdc-devops'){ + git branch: 'master', + url: 'https://github.com/CBIIT/icdc-devops.git' + } + dir('bento-model'){ + git branch: 'bento_core_data_model_tailorx_extension', + url: 'https://github.com/CBIIT/bento-model' + } + } + + } + stage('set-environment'){ + steps { + script { + switch("${params.Environment}") { + case "demo": + withCredentials([usernamePassword(credentialsId: 'demo_neo4j_user', passwordVariable: 'neo4j_password', usernameVariable: 'neo4j_user')]) { + env.NEO4J_IP = "${NEO4J_DEMO_IP}" + env.S3_FOLDER = "${params.S3Folder}" + env.TIER = "${params.Environment}" + env.WIPE_DB = "${params.WipeDB}" + env.NEO4J_PASSWORD = "${neo4j_password}" + } + break + case "dev": + withCredentials([usernamePassword(credentialsId: 'dev_neo4j_user', passwordVariable: 'neo4j_password', usernameVariable: 'neo4j_user')]) { + env.NEO4J_IP = "${NEO4J_DEV_IP}" + env.S3_FOLDER = "${params.S3Folder}" + env.TIER = "${params.Environment}" + env.WIPE_DB = "${params.WipeDB}" + env.NEO4J_PASSWORD = "${neo4j_password}" + } + break + case "qa": + withCredentials([usernamePassword(credentialsId: 'qa_neo4j_user', passwordVariable: 'neo4j_password', usernameVariable: 'neo4j_user')]) { + env.NEO4J_IP = "${NEO4J_QA_IP}" + env.S3_FOLDER = "${params.S3Folder}" + env.TIER = "${params.Environment}" + env.TIER = "dev" + env.WIPE_DB = "${params.WipeDB}" + env.NEO4J_PASSWORD = "${neo4j_password}" + } + break + case "stage": + withCredentials([usernamePassword(credentialsId: 'stage_neo4j_user', passwordVariable: 'neo4j_password', usernameVariable: 'neo4j_user')]) { + env.NEO4J_IP = "${NEO4J_STAGE_IP}" + env.S3_FOLDER = "${params.S3Folder}" + env.TIER = "${params.Environment}" + env.WIPE_DB = "${params.WipeDB}" + env.NEO4J_PASSWORD = "${neo4j_password}" + } + break + case "prod": + withCredentials([usernamePassword(credentialsId: 'prod_neo4j_user', passwordVariable: 'neo4j_password', usernameVariable: 'neo4j_user')]) { + env.NEO4J_IP = "${NEO4J_PROD_IP}" + env.S3_FOLDER = "${params.S3Folder}" + env.TIER = "${params.Environment}" + env.WIPE_DB = "${params.WipeDB}" + env.NEO4J_PASSWORD = "${neo4j_password}" + } + break + default: + println "Select valid option" + break + } + } + } + } + stage('loader-data'){ + environment { + SLACK_URL = "${SLACK_URL}" + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/bento-data-loader.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/bento-gke/Jenkinsfile b/jenkins/jobs/bento-gke/Jenkinsfile new file mode 100644 index 000000000..012a0d683 --- /dev/null +++ b/jenkins/jobs/bento-gke/Jenkinsfile @@ -0,0 +1,211 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'gke' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'sandbox,dev,test,qa,stage,prod' ) + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'master', + name: 'Frontend_Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/bento-frontend') +// gitParameter(branchFilter: 'origin/(.*)', +// defaultValue: 'Integration', +// name: 'Backend_Tag', +// type: 'PT_BRANCH_TAG', +// quickFilterEnabled: false, +// selectedValue: 'DEFAULT', +// sortMode: 'ASCENDING_SMART', +// tagFilter: '*', +// useRepository: 'https://github.com/CBIIT/bento-backend') +// +// The Backend_Tag parameter has been replaced in this job by a GUI parameter due to the fact that type "active choice reactive parameter" is not supported. +// The GUI definition for this parameter should follow Frontend_Tag and use the following values: +// +// Script: +// front_tag = Frontend_Tag +// def gettags = ("git ls-remote -h -t https://github.com/CBIIT/bento-backend.git").execute() +// def tags = gettags.text.readLines().collect { it.split()[1].replaceAll('refs/heads/', '').replaceAll('refs/tags/', '').replaceAll("\\^\\{\\}", '')} +// List tagnames = tags.collect{ '' + it + '' } +// +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase(front_tag)) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// +// return tagnames +// +// Fallback Script: +// return ['Integration'] +// +// NOTE: this sets the default backend branch to "Integration" and is required for builds triggered from SCM polling. The branch defined here should math the default for Frontend_Tag +// +// Referenced parameters: +// Frontend_Tag + + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + checkout([$class: 'GitSCM', + branches: [[name: "${params.Backend_Tag}"]], + doGenerateSubmoduleConfigurations: + false, extensions: [], submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/bento-backend']]]) + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Frontend_Tag}"]], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'RelativeTargetDirectory', + relativeTargetDir: 'bento-frontend']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/bento-frontend']]]) + + checkout( poll: false, + changelog:false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']]]) + + } + + } + stage('Set Environment'){ + environment { + SLACK_URL = "${SLACK_URL}" + } + steps { + script { + switch("${params.Environment}") { + case "dev": + withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER')]) { + env.NEO4J_IP = "${NEO4J_DEV_IP}" + env.BEARER = "${BEARER}" + env.TIER = "dev" + } + break + case "qa": + withCredentials([string(credentialsId: 'qa_bearer', variable: 'BEARER')]) { + env.NEO4J_IP = "${NEO4J_QA_IP}" + env.BEARER = "${BEARER}" + env.TIER = "qa" + } + break + case "stage": + withCredentials([string(credentialsId: 'stage_bearer', variable: 'BEARER')]) { + env.NEO4J_IP = "${NEO4J_STAGE_IP}" + env.BEARER = "${BEARER}" + env.TIER = "stage" + } + break + case "prod": + withCredentials([string(credentialsId: 'prod_bearer', variable: 'BEARER')]) { + env.NEO4J_IP = "${NEO4J_PROD_IP}" + env.BEARER = "${BEARER}" + env.TIER = "prod" + } + break + default: + withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER')]) { + env.NEO4J_IP = "${NEO4J_DEV_IP}" + env.BEARER = "${BEARER}" + env.TIER = "dev" + } + break + } + } + } + } + stage('build'){ + environment { + TAG = "${params.Backend_Tag}" + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER')]){ + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/build-bento-gke.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + } + stage('deploy'){ + steps{ + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/deploy-bento-gke.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + } + post { + always { + + sendSlackMessage() + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/bento-icdc-alb-pipeline/Jenkinsfile b/jenkins/jobs/bento-icdc-alb-pipeline/Jenkinsfile new file mode 100644 index 000000000..3fe72cd51 --- /dev/null +++ b/jenkins/jobs/bento-icdc-alb-pipeline/Jenkinsfile @@ -0,0 +1,354 @@ +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${ICDC_SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'commons-docker-ncias-p2236-v' + // label 'docker-maven' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'sandbox,dev,test,qa,stage,prod' ) + // extendedChoice( + // name: 'Release', + // defaultValue: 'test', + // description: 'Choose the environment to build', + // type: 'PT_SINGLE_SELECT', + // value: 'test,final' ) + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + + stages { + stage('checkout'){ + // agent { + // node { + // label 'slave-ncias-d2320-c' + // } + // } + steps { + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Backend_Tag}"]], + doGenerateSubmoduleConfigurations: + false, extensions: [], submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/bento-backend']]]) + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Frontend_Tag}"]], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-frontend']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/bento-icdc-frontend']]]) + + checkout( poll: false, + changelog:false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']]]) + + } + } + stage('Set Environment'){ + environment { + ICDC_SLACK_URL = "${ICDC_SLACK_URL}" + } + steps { + script { + switch("${params.Environment}") { + case "sandbox": + withCredentials([string(credentialsId: 'sandbox_bearer', variable: 'BEARER') + ]) { + env.NEO4J_IP = "${NEO4J_SANDBOX_IP}" + env.BEARER = "${BEARER}" + env.TIER = "sandbox" + } + break + case "dev": + withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER') + ]) { + env.NEO4J_IP = "${NEO4J_ICDC_DEV_IP}" + env.BEARER = "${BEARER}" + env.TIER = "dev" + } + break + case "qa": + withCredentials([string(credentialsId: 'qa_bearer', variable: 'BEARER') + ]) { + env.NEO4J_IP = "${NEO4J_ICDC_QA_IP}" + env.BEARER = "${BEARER}" + env.TIER = "qa" + } + break + case "stage": + withCredentials([string(credentialsId: 'stage_bearer', variable: 'BEARER') + ]) { + env.NEO4J_IP = "${NEO4J_ICDC_STAGE_IP}" + env.BEARER = "${BEARER}" + env.TIER = "stage" + } + break + case "prod": + withCredentials([string(credentialsId: 'prod_bearer', variable: 'BEARER') + ]) { + env.NEO4J_IP = "${NEO4J_ICDC_PROD_IP}" + env.BEARER = "${BEARER}" + env.TIER = "prod" + } + break + default: + withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER') + ]) { + env.NEO4J_IP = "${NEO4J_ICDC_DEV_IP}" + env.BEARER = "${BEARER}" + env.TIER = "dev" + } + break + } + } + } + } + stage('build'){ + environment{ + FE_VERSION = "${params.Frontend_Tag}" + BE_VERSION = "${params.Backend_Tag}" + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER'), + file(credentialsId: 'ansible_host_file', variable: 'host_file'), + string(credentialsId: 'fence-id', variable: 'FENCE_ID'), + string(credentialsId: 'docker_host', variable: 'DOCKER_HOST'), + string(credentialsId: 'tls_hostname', variable: 'TLS_HOSTNAME') + ]) + { + sh "cp ${host_file} ${WORKSPACE}/icdc-devops/ansible/hosts" + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/build-bento-icdc.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + } + stage('set maintenance page'){ + agent { + node { + label 'slave-ncias-d2320-c' + } + } + steps { + checkout( poll: false, + changelog:false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']]]) + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER'), + file(credentialsId: 'ansible_host_file', variable: 'host_file'), + // string(credentialsId: 'listener_arn', variable: 'LISTENER_ARN'), + ]) + { + sh "cp ${host_file} ${WORKSPACE}/icdc-devops/ansible/hosts" + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/alb-ops.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + action: 'create', + project: 'icdc', + tier: "${params.Environment}" + ], + colorized: true) + } + } + } + } + stage('deploy'){ + steps{ + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER'), + string(credentialsId: 'newrelic_license_key', variable: 'NEWRELIC_LIC_KEY'), + string(credentialsId: 'sumo_access_id', variable: 'SUMO_ACCESS_ID'), + string(credentialsId: 'sumo_access_key', variable: 'SUMO_ACCESS_KEY'), + string(credentialsId: 'bento_syslog_host', variable: 'SYSLOG_HOST'), + string(credentialsId: 'docker_host', variable: 'DOCKER_HOST'), + ]){ + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/deploy-bento-icdc.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + credentialsId: 'commonsdocker', + extraVars: [ + tier: "${params.Environment}", + release: "${params.Release}", + project: 'icdc', + frontend_version: "${params.Frontend_Tag}", + backend_version: "${params.Backend_Tag}" + ], + become: true, + hostKeyChecking: false, + colorized: true) + } + } + } + } + + stage('Approval Step'){ + steps{ + script { + env.RELEASE = input message: 'Your Action is needed', + parameters: [choice(name: 'Release?', choices: 'pass\nfail', description: 'Select "pass" if you deploy to production')] + } + } + } + stage('release'){ + when { + environment name:'RELEASE', value: 'pass' + } + steps{ + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER'), + string(credentialsId: 'newrelic_license_key', variable: 'NEWRELIC_LIC_KEY'), + string(credentialsId: 'sumo_access_id', variable: 'SUMO_ACCESS_ID'), + string(credentialsId: 'sumo_access_key', variable: 'SUMO_ACCESS_KEY'), + string(credentialsId: 'bento_syslog_host', variable: 'SYSLOG_HOST'), + string(credentialsId: 'docker_host', variable: 'DOCKER_HOST'), + ]){ + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/pass-bento-icdc.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + credentialsId: 'commonsdocker', + extraVars: [ + tier: "${params.Environment}", + release: 'pass', + frontend_version: "${params.Frontend_Tag}", + backend_version: "${params.Backend_Tag}" + ], + become: true, + hostKeyChecking: false, + colorized: true) + } + } + } + } + stage('rollback'){ + when { + environment name:'RELEASE', value: 'fail' + } + steps{ + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER'), + string(credentialsId: 'newrelic_license_key', variable: 'NEWRELIC_LIC_KEY'), + string(credentialsId: 'sumo_access_id', variable: 'SUMO_ACCESS_ID'), + string(credentialsId: 'sumo_access_key', variable: 'SUMO_ACCESS_KEY'), + string(credentialsId: 'bento_syslog_host', variable: 'SYSLOG_HOST'), + string(credentialsId: 'docker_host', variable: 'DOCKER_HOST'), + ]){ + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/fail-bento-icdc.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + credentialsId: 'commonsdocker', + extraVars: [ + tier: "${params.Environment}", + release: 'fail' + ], + become: true, + hostKeyChecking: false, + colorized: true) + } + } + } + } + stage('disable maintenance page'){ + // when { + // environment name:'RELEASE', value: 'pass' + // } + agent { + node { + label 'slave-ncias-d2320-c' + } + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER'), + file(credentialsId: 'ansible_host_file', variable: 'host_file'), + // string(credentialsId: 'listener_arn', variable: 'LISTENER_ARN'), + ]) + { + sh "cp ${host_file} ${WORKSPACE}/icdc-devops/ansible/hosts" + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/alb-ops.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + action: 'remove', + project: 'icdc', + tier: "${params.Environment}" + ], + colorized: true) + } + } + } + } + } + post { + always { + sendSlackMessage() + cleanWs() + } + } +} diff --git a/jenkins/jobs/bento-icdc/Jenkinsfile b/jenkins/jobs/bento-icdc/Jenkinsfile new file mode 100644 index 000000000..84e475460 --- /dev/null +++ b/jenkins/jobs/bento-icdc/Jenkinsfile @@ -0,0 +1,356 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${ICDC_SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'commons-docker-ncias-p2236-v' + // label 'docker-maven' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'sandbox,dev,test,qa,stage,prod') + extendedChoice( + name: 'DataModelBranch', + defaultValue: 'develop', + description: 'Choose Data Model branch to build', + type: 'PT_SINGLE_SELECT', + value: 'develop,master') + booleanParam( + defaultValue: true, + description: 'Flush the Redis cache', + name: 'Flush_Redis') + booleanParam( + defaultValue: true, + description: 'Enable the Redis filter', + name: 'Enable_Redis_Filter') + string( + defaultValue: 'redis-schema.graphql', + description: 'The file to use when loading redis schema', + name: 'Redis_Schema_File') + string( + defaultValue: 'redis-filter-config.bento.yaml', + description: 'The file to use when loading redis queries', + name: 'Redis_Queries_File') + string( + defaultValue: 'test-queries.1k.bento.yaml', + description: 'The file to use when loading test queries', + name: 'Test_Queries_File') + string( + defaultValue: '', + description: 'The set of parameters to use for test automation - if left blank no tests will be run', + name: 'Test_Params') + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Backend_Tag}"]], + doGenerateSubmoduleConfigurations: + false, extensions: [], submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/bento-backend']]]) + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Frontend_Tag}"]], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-frontend']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/bento-icdc-frontend']]]) + + checkout( poll: false, + changelog:false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']]]) + + } + + } + stage('Set Environment'){ + environment { + ICDC_SLACK_URL = "${ICDC_SLACK_URL}" + } + steps { + + script { + switch("${params.Environment}") { + case "sandbox": + withCredentials([string(credentialsId: 'sandbox_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_SANDBOX_IP}" + env.BEARER = "${BEARER}" + env.TIER = "sandbox" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + case "dev": + withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_ICDC_DEV_IP}" + env.BEARER = "${BEARER}" + env.TIER = "dev" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + case "qa": + withCredentials([string(credentialsId: 'qa_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_ICDC_QA_IP}" + env.BEARER = "${BEARER}" + env.TIER = "qa" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + case "stage": + withCredentials([string(credentialsId: 'stage_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'neo4j_icdc_stage_cred', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_ICDC_STAGE_IP}" + env.BEARER = "${BEARER}" + env.TIER = "stage" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + case "prod": + withCredentials([string(credentialsId: 'prod_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'neo4j_icdc_prod_cred', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_ICDC_PROD_IP}" + env.BEARER = "${BEARER}" + env.TIER = "prod" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + default: + withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_ICDC_DEV_IP}" + env.BEARER = "${BEARER}" + env.TIER = "dev" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + } + } + } + } + stage('build'){ + environment{ + FE_VERSION = "${params.Frontend_Tag}-${BUILD_NUMBER}" + BE_VERSION = "${params.Backend_Tag}-${BUILD_NUMBER}" + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER'), + file(credentialsId: 'ansible_host_file', variable: 'host_file'), + string(credentialsId: 'fence-id', variable: 'FENCE_ID'), + string(credentialsId: 'docker_host', variable: 'DOCKER_HOST'), + string(credentialsId: 'tls_hostname', variable: 'TLS_HOSTNAME') + ]) + { + sh "cp ${host_file} ${WORKSPACE}/icdc-devops/ansible/hosts" + sh "echo ${TLS_HOSTNAME}" + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/build-bento-icdc.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + redis_schema_file: "${params.Redis_Schema_File}", + redis_init_queries_file: "${params.Redis_Queries_File}", + test_queries_file: "${params.Test_Queries_File}", + data_model_branch: "${params.DataModelBranch}", + ], + colorized: true + ) + } + } + } + } + + stage('deploy'){ + environment { + FE_VERSION = "${params.Frontend_Tag}-${BUILD_NUMBER}" + BE_VERSION = "${params.Backend_Tag}-${BUILD_NUMBER}" + } + + steps{ + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER'), + string(credentialsId: 'newrelic_license_key', variable: 'NEWRELIC_LIC_KEY'), + string(credentialsId: 'sumo_access_id', variable: 'SUMO_ACCESS_ID'), + string(credentialsId: 'sumo_access_key', variable: 'SUMO_ACCESS_KEY'), + string(credentialsId: 'bento_syslog_host', variable: 'SYSLOG_HOST'), + string(credentialsId: 'tls_hostname', variable: 'TLS_HOSTNAME') + ]){ + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/deploy-bento-icdc.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + credentialsId: 'commonsdocker', + extraVars: [ + tier: "${params.Environment}", + release: "${params.Release}", + project: 'icdc', + frontend_version: "${params.Frontend_Tag}-${BUILD_NUMBER}", + backend_version: "${params.Backend_Tag}-${BUILD_NUMBER}", + backend_git_tag: "${params.Backend_Tag}", + frontend_git_tag: "${params.Frontend_Tag}", + enable_redis_filter: "${params.Enable_Redis_Filter}" + ], + become: true, + hostKeyChecking: false, + colorized: true) + } + + + } + } + } + + stage('clear redis cache'){ + when { + expression { params.Flush_Redis } + } + + environment { + TIER = "${params.Environment}" + } + + steps{ + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + + script { + sh label: 'database-hosts', script: '''#!/bin/bash + echo "Creating inventory file" + echo "[icdc]" > ${WORKSPACE}/icdc-devops/ansible/redis_hosts + echo ${NEO4J_IP} >> ${WORKSPACE}/icdc-devops/ansible/redis_hosts + + ''' + + } + + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/redis_icdc.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/redis_hosts', + credentialsId: 'commonsdocker', + colorized: true) + } + } + } + + } + post { + always { + sendSlackMessage() + } + success { + + script { + withCredentials([string(credentialsId: 'git_credential_token', variable: 'git_token')]) { + sh label: 'GIT-Tag Backend', script: '''#!/bin/bash + + gitURL=$(git config remote.origin.url | sed 's|^.*//||') + echo "Applying tag $Backend_Tag to URL: $gitURL" + git config user.email "jenkins@bento-tools.org" + git config user.name "Bento Jenkins" + git tag --no-sign -a ${Backend_Tag}-icdc-${BUILD_NUMBER} -m "Jenkins tag: ${Backend_Tag}-${BUILD_NUMBER}" + git push https://${git_token}:x-oauth-basic@${gitURL} --tags + + ''' + + } + } + + script { + withCredentials([string(credentialsId: 'git_credential_token', variable: 'git_token')]) { + sh label: 'GIT-Tag Frontend', script: '''#!/bin/bash + + cd ${WORKSPACE}/icdc-frontend + gitURL=$(git config remote.origin.url | sed 's|^.*//||') + echo "Applying tag $Frontend_Tag to URL: $gitURL" + git config user.email "jenkins@bento-tools.org" + git config user.name "Bento Jenkins" + git tag --no-sign -a ${Frontend_Tag}-${BUILD_NUMBER} -m "Jenkins tag: ${Frontend_Tag}-${BUILD_NUMBER}" + git push https://${git_token}:x-oauth-basic@${gitURL} --tags + + ''' + + } + } + + script { + if ("${params.Test_Params}"?.trim()) { + echo 'Run Bento Katalon Tests' + def params = "[${params.Test_Params}]" + def list_params = evaluate(params) + build quietPeriod: 300, wait: false, job: '_default/_lower/Test_Automation/Katalon_Prod', parameters: [gitParameter(name: 'Tag', value: list_params['Tag']), string(name: 'KatalonSuite', value: list_params['KatalonSuite']), extendedChoice(name: 'Profile', value: list_params['Profile']), string(name: 'EmailRecipients', value: list_params['Email'])] + } + } + + } + cleanup { + + cleanWs() + + } + } +} diff --git a/jenkins/jobs/bento-icdc/Jenkinsfile_deploy b/jenkins/jobs/bento-icdc/Jenkinsfile_deploy new file mode 100644 index 000000000..0942994b7 --- /dev/null +++ b/jenkins/jobs/bento-icdc/Jenkinsfile_deploy @@ -0,0 +1,400 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${ICDC_SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'commons-docker-ncias-p2236-v' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'demo,dev,qa,stage,prod' ) + extendedChoice( + name: 'DataModelBranch', + defaultValue: 'develop', + description: 'Choose Data Model branch to build', + type: 'PT_SINGLE_SELECT', + value: 'develop,master') + booleanParam( + defaultValue: true, + description: 'Flush the Redis cache', + name: 'Flush_Redis') + booleanParam( + defaultValue: true, + description: 'Enable the Redis filter', + name: 'Enable_Redis_Filter') + string( + defaultValue: '', + description: 'The set of parameters to use for test automation - if left blank no tests will be run', + name: 'Test_Params') + +// gitParameter(branchFilter: 'origin/(.*)', +// defaultValue: 'integration', +// name: 'Frontend_Image', +// type: 'PT_BRANCH_TAG', +// quickFilterEnabled: false, +// selectedValue: 'DEFAULT', +// sortMode: 'ASCENDING_SMART', +// tagFilter: '*', +// useRepository: 'https://github.com/CBIIT/bento-frontend') +// +// The Frontend_Image parameter has been replaced in this job by a GUI parameter due to the fact that type "active choice reactive parameter" is not supported. +// The GUI definition for this parameter should follow Environment and use the following values: +// +// Script: +// env_value = Environment +// def gettags = ("git ls-remote -h -t https://github.com/CBIIT/bento-frontend.git").execute() +// def tags = gettags.text.readLines().collect { it.split()[1].replaceAll('refs/heads/', '').replaceAll('refs/tags/', '').replaceAll("\\^\\{\\}", '')} +// List tagnames = tags.collect{ '' + it + '' } +// +// if(env_value.equalsIgnoreCase("dev")) { +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("integration")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// } else { +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("master")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// } +// +// return tagnames +// +// Fallback Script: +// return ['integration'] +// +// NOTE: this sets the default frontend branch to "integration" and is required for builds triggered from SCM polling. +// +// Referenced parameters: +// Environment + +// gitParameter(branchFilter: 'origin/(.*)', +// defaultValue: 'Integration', +// name: 'Backend_Image', +// type: 'PT_BRANCH_TAG', +// quickFilterEnabled: false, +// selectedValue: 'DEFAULT', +// sortMode: 'ASCENDING_SMART', +// tagFilter: '*', +// useRepository: 'https://github.com/CBIIT/bento-backend') +// +// The Backend_Image parameter has been replaced in this job by a GUI parameter due to the fact that type "active choice reactive parameter" is not supported. +// The GUI definition for this parameter should follow Frontend_Image and use the following values: +// +// Script: +// front_tag = Frontend_Image +// def gettags = ("git ls-remote -h -t https://github.com/CBIIT/bento-backend.git").execute() +// def tags = gettags.text.readLines().collect { it.split()[1].replaceAll('refs/heads/', '').replaceAll('refs/tags/', '').replaceAll("\\^\\{\\}", '')} +// List tagnames = tags.collect{ '' + it + '' } +// +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase(front_tag)) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("Integration")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// +// return tagnames +// +// Fallback Script: +// return ['Integration'] +// +// NOTE: this sets the default backend branch to "Integration" and is required for builds triggered from SCM polling. The branch defined here should math the default for Frontend_Image +// +// Referenced parameters: +// Frontend_Image + + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + } + + } + stage('Set Environment'){ + environment { + ICDC_SLACK_URL = "${ICDC_SLACK_URL}" + VERSION = "${params.Frontend_Tag}" + } + steps { + + script { + currentBuild.displayName = "Tag: ${VERSION} Environment: ${params.Environment}" + } + + script { + switch("${params.Environment}") { + case "sandbox": + withCredentials([string(credentialsId: 'sandbox_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_SANDBOX_IP}" + env.BEARER = "${BEARER}" + env.TIER = "sandbox"env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + case "dev": + withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_ICDC_DEV_IP}" + env.BEARER = "${BEARER}" + env.TIER = "dev" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + case "qa": + withCredentials([string(credentialsId: 'qa_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_ICDC_QA_IP}" + env.BEARER = "${BEARER}" + env.TIER = "qa" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + case "stage": + withCredentials([string(credentialsId: 'stage_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_ICDC_STAGE_IP}" + env.BEARER = "${BEARER}" + env.TIER = "stage" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + case "prod": + withCredentials([string(credentialsId: 'prod_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_ICDC_PROD_IP}" + env.BEARER = "${BEARER}" + env.TIER = "prod" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + case "demo": + withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_DEMO_IP}" + env.BEARER = "${BEARER}" + env.TIER = "demo" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + default: + withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_ICDC_DEV_IP}" + env.BEARER = "${BEARER}" + env.TIER = "dev" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + } + } + } + } + stage('copy static content') { + when { + expression { + params.Environment == "prod" || params.Environment == "stage" + } + + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([string(credentialsId: 'git_credential_token',variable: 'git_token'), + ]) { + + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/git-copy.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true, + extraVars: [ project: "icdc"] + ) + } + } + } + } + + stage('deploy'){ + environment { + SLACK_URL = "${SLACK_URL}" + TIER = "${params.Environment}" + VERSION = "${params.Frontend_Tag}" + FE_VERSION = "${params.Frontend_Tag}" + BE_VERSION = "${params.Backend_Tag}" + } + + steps{ + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER'), + string(credentialsId: 'newrelic_license_key', variable: 'NEWRELIC_LIC_KEY'), + string(credentialsId: 'sumo_access_id', variable: 'SUMO_ACCESS_ID'), + string(credentialsId: 'sumo_access_key', variable: 'SUMO_ACCESS_KEY'), + string(credentialsId: 'bento_syslog_host', variable: 'SYSLOG_HOST'), + string(credentialsId: 'tls_hostname', variable: 'TLS_HOSTNAME'), + file(credentialsId: 'ansible_host_file', variable: 'host_file'), + ]){ + sh "cp ${host_file} ${WORKSPACE}/icdc-devops/ansible/hosts" + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/deploy-bento-icdc.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + credentialsId: 'commonsdocker', + extraVars: [ + tier: "${params.Environment}", + release: "${params.Release}", + project: 'icdc', + frontend_version: "${params.Frontend_Tag}", + backend_version: "${params.Backend_Tag}", + backend_git_tag: "${params.Backend_Tag}", + frontend_git_tag: "${params.Frontend_Tag}", + enable_redis_filter: "${params.Enable_Redis_Filter}", + data_model_branch: "${params.DataModelBranch}", + ], + become: true, + hostKeyChecking: false, + colorized: true) + } + + + } + } + } + + stage('clear redis cache'){ + when { + expression { params.Flush_Redis } + } + + environment { + TIER = "${params.Environment}" + } + + steps{ + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + + script { + sh label: 'database-hosts', script: '''#!/bin/bash + echo "Creating inventory file" + echo "[icdc]" > ${WORKSPACE}/icdc-devops/ansible/redis_hosts + echo ${NEO4J_IP} >> ${WORKSPACE}/icdc-devops/ansible/redis_hosts + + ''' + + } + + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/redis_icdc.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/redis_hosts', + credentialsId: 'commonsdocker', + colorized: true) + } + } + } + + } + post { + always { + + sendSlackMessage() + + } + + success { + + script { + if ("${params.Test_Params}"?.trim()) { + echo 'Run Bento Katalon Tests' + def params = "[${params.Test_Params}]" + def list_params = evaluate(params) + build quietPeriod: 300, wait: false, job: '_default/_lower/Test_Automation/Katalon_Prod', parameters: [gitParameter(name: 'Tag', value: list_params['Tag']), string(name: 'KatalonSuite', value: list_params['KatalonSuite']), extendedChoice(name: 'Profile', value: list_params['Profile']), string(name: 'EmailRecipients', value: list_params['Email'])] + } + } + + } + + cleanup { + + cleanWs() + + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/bento-icdc/Jenkinsfile_dev b/jenkins/jobs/bento-icdc/Jenkinsfile_dev new file mode 100644 index 000000000..71f1a6b57 --- /dev/null +++ b/jenkins/jobs/bento-icdc/Jenkinsfile_dev @@ -0,0 +1,212 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${ICDC_SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'commons-docker-ncias-p2236-v' + // label 'docker-maven' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'dev' ) + + + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Backend_Tag}"]], + doGenerateSubmoduleConfigurations: + false, extensions: [], submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/bento-backend']]]) + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Frontend_Tag}"]], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-frontend']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/bento-icdc-frontend']]]) + + checkout( poll: false, + changelog:false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']]]) + + } + + } + stage('Set Environment'){ + environment { + ICDC_SLACK_URL = "${ICDC_SLACK_URL}" + } + steps { + script { + switch("${params.Environment}") { + case "sandbox": + withCredentials([string(credentialsId: 'sandbox_bearer', variable: 'BEARER') + ]) { + env.NEO4J_IP = "${NEO4J_SANDBOX_IP}" + env.BEARER = "${BEARER}" + env.TIER = "sandbox" + + } + break + case "dev": + withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER') + ]) { + env.NEO4J_IP = "${NEO4J_ICDC_DEV_IP}" + env.BEARER = "${BEARER}" + env.TIER = "dev" + + } + break + case "qa": + withCredentials([string(credentialsId: 'qa_bearer', variable: 'BEARER') + ]) { + env.NEO4J_IP = "${NEO4J_ICDC_QA_IP}" + env.BEARER = "${BEARER}" + env.TIER = "qa" + } + break + case "stage": + withCredentials([string(credentialsId: 'stage_bearer', variable: 'BEARER') + ]) { + env.NEO4J_IP = "${NEO4J_ICDC_STAGE_IP}" + env.BEARER = "${BEARER}" + env.TIER = "stage" + + } + break + case "prod": + withCredentials([string(credentialsId: 'prod_bearer', variable: 'BEARER') + ]) { + env.NEO4J_IP = "${NEO4J_ICDC_PROD_IP}" + env.BEARER = "${BEARER}" + env.TIER = "prod" + + } + break + default: + withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER') + ]) { + env.NEO4J_IP = "${NEO4J_ICDC_DEV_IP}" + env.BEARER = "${BEARER}" + env.TIER = "dev" + + } + break + } + } + } + } + stage('build'){ + environment{ + FE_VERSION = "${params.Frontend_Tag}" + BE_VERSION = "${params.Backend_Tag}" + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER'), + file(credentialsId: 'ansible_host_file', variable: 'host_file'), + string(credentialsId: 'fence-id', variable: 'FENCE_ID'), + string(credentialsId: 'docker_host', variable: 'DOCKER_HOST'), + string(credentialsId: 'tls_hostname', variable: 'TLS_HOSTNAME') + ]) + { + sh "cp ${host_file} ${WORKSPACE}/icdc-devops/ansible/hosts" + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/build-bento-icdc.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + } + stage('deploy'){ + + steps{ + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER'), + string(credentialsId: 'newrelic_license_key', variable: 'NEWRELIC_LIC_KEY'), + string(credentialsId: 'sumo_access_id', variable: 'SUMO_ACCESS_ID'), + string(credentialsId: 'sumo_access_key', variable: 'SUMO_ACCESS_KEY'), + string(credentialsId: 'bento_syslog_host', variable: 'SYSLOG_HOST'), + string(credentialsId: 'docker_host', variable: 'DOCKER_HOST'), + ]){ + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/deploy-bento-icdc-dev.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + credentialsId: 'commonsdocker', + extraVars: [ + tier: "${params.Environment}"], + become: true, + hostKeyChecking: false, + colorized: true) + } + + + } + } + } + } +// post { +// always { +// +// sendSlackMessage() +// } +// } +} \ No newline at end of file diff --git a/jenkins/jobs/bento-icdc/Jenkinsfile_stop_site b/jenkins/jobs/bento-icdc/Jenkinsfile_stop_site new file mode 100644 index 000000000..45d8bc6a9 --- /dev/null +++ b/jenkins/jobs/bento-icdc/Jenkinsfile_stop_site @@ -0,0 +1,112 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${ICDC_SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'commons-docker-ncias-p2236-v' + // label 'docker-maven' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'sandbox,dev,test,qa,stage,prod' ) + + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + + stage('checkout'){ + steps { + + checkout( poll: false, + changelog:false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']]]) + + } + + } + + stage('stop site'){ + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER'), + file(credentialsId: 'ansible_host_file', variable: 'host_file'), + string(credentialsId: 'fence-id', variable: 'FENCE_ID'), + string(credentialsId: 'docker_host', variable: 'DOCKER_HOST'), + string(credentialsId: 'tls_hostname', variable: 'TLS_HOSTNAME') + ]) + { + sh "cp ${host_file} ${WORKSPACE}/icdc-devops/ansible/hosts" + sh "echo ${TLS_HOSTNAME}" + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/stop-bento-icdc.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + credentialsId: 'commonsdocker', + extraVars: [ + tier: "${params.Environment}", + project: 'icdc' + ], + become: true, + hostKeyChecking: false, + colorized: true) + } + } + } + } + + } + post { + always { +// sendSlackMessage() + cleanWs() + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/bento/Jenkinsfile b/jenkins/jobs/bento/Jenkinsfile new file mode 100644 index 000000000..5b333297a --- /dev/null +++ b/jenkins/jobs/bento/Jenkinsfile @@ -0,0 +1,501 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'cicd_microservice' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'sandbox,dev,test,qa,perf,prod' ) + extendedChoice( + name: 'Region', + defaultValue: 'us-east-1', + description: 'Choose aws region to build', + type: 'PT_SINGLE_SELECT', + value: 'us-east-1,us-west-1' ) + booleanParam( + defaultValue: true, + description: 'Use the Redis cache', + name: 'Use_Redis') + booleanParam( + defaultValue: true, + description: 'Flush the Redis cache', + name: 'Flush_Redis') + booleanParam( + defaultValue: true, + description: 'Enable the Elasticsearch filter', + name: 'Enable_ES_Filter') + booleanParam( + defaultValue: true, + description: 'Reload Elasticsearch data', + name: 'Reload_ES_Data') + string( + defaultValue: 'test-queries.1k.bento.yaml', + description: 'The file to use when loading test queries', + name: 'Test_Queries_File') + string( + defaultValue: '', + description: 'The set of parameters to use for test automation - if left blank no tests will be run', + name: 'Test_Params') + +// gitParameter(branchFilter: 'origin/(.*)', +// defaultValue: 'integration', +// name: 'Frontend_Tag', +// type: 'PT_BRANCH_TAG', +// quickFilterEnabled: false, +// selectedValue: 'DEFAULT', +// sortMode: 'ASCENDING_SMART', +// tagFilter: '*', +// useRepository: 'https://github.com/CBIIT/bento-frontend') +// +// The Frontend_Tag parameter has been replaced in this job by a GUI parameter due to the fact that type "active choice reactive parameter" is not supported. +// The GUI definition for this parameter should follow Environment and use the following values: +// +// Script: +// env_value = Environment +// def gettags = ("git ls-remote -h https://github.com/CBIIT/bento-frontend.git").execute() +// def tags = gettags.text.readLines().collect { it.split()[1].replaceAll('refs/heads/', '').replaceAll('refs/tags/', '').replaceAll("\\^\\{\\}", '')} +// List tagnames = tags.collect{ '' + it + '' } +// +// if(env_value.equalsIgnoreCase("dev")) { +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("integration")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// } else { +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("master")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// } +// +// return tagnames +// +// Fallback Script: +// return ['integration'] +// +// NOTE: this sets the default frontend branch to "integration" and is required for builds triggered from SCM polling. +// +// Referenced parameters: +// Environment + +// gitParameter(branchFilter: 'origin/(.*)', +// defaultValue: 'Integration', +// name: 'Backend_Tag', +// type: 'PT_BRANCH_TAG', +// quickFilterEnabled: false, +// selectedValue: 'DEFAULT', +// sortMode: 'ASCENDING_SMART', +// tagFilter: '*', +// useRepository: 'https://github.com/CBIIT/bento-backend') +// +// The Backend_Tag parameter has been replaced in this job by a GUI parameter due to the fact that type "active choice reactive parameter" is not supported. +// The GUI definition for this parameter should follow Frontend_Tag and use the following values: +// +// Script: +// front_tag = Frontend_Tag +// def gettags = ("git ls-remote -h https://github.com/CBIIT/bento-backend.git").execute() +// def tags = gettags.text.readLines().collect { it.split()[1].replaceAll('refs/heads/', '').replaceAll('refs/tags/', '').replaceAll("\\^\\{\\}", '')} +// List tagnames = tags.collect{ '' + it + '' } +// +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase(front_tag)) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("Integration")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// +// return tagnames +// +// Fallback Script: +// return ['Integration'] +// +// NOTE: this sets the default backend branch to "Integration" and is required for builds triggered from SCM polling. The branch defined here should math the default for Frontend_Tag +// +// Referenced parameters: +// Frontend_Tag + + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { +// git branch: "${params.Backend_Tag}", +// url: 'https://github.com/CBIIT/bento-backend' + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Backend_Tag}"]], + doGenerateSubmoduleConfigurations: + false, extensions: [], submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/bento-backend']]]) + +// dir('bento-frontend'){ +// git branch: "${params.Frontend_Tag}", +// url: 'https://github.com/CBIIT/bento-frontend' +// } + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Frontend_Tag}"]], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'RelativeTargetDirectory', + relativeTargetDir: 'bento-frontend']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/bento-frontend']]]) + + checkout( poll: false, + changelog:false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']]]) + + } + + } + stage('Set Environment'){ + environment { + SLACK_URL = "${SLACK_URL}" + } + steps { + script { + + switch("${params.Environment}") { + case "dev": + withCredentials([string(credentialsId: 'es_dev_host', variable: 'ES_HOST'), + usernamePassword(credentialsId: 'dev_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_DEV_IP}" + env.TIER = "dev" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + env.ES_HOST = "${ES_HOST}" + + } + break + case "qa": + withCredentials([string(credentialsId: 'es_qa_host', variable: 'ES_HOST'), + usernamePassword(credentialsId: 'qa_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_QA_IP}" + env.TIER = "qa" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + env.ES_HOST = "${ES_HOST}" + + } + break + case "perf": + withCredentials([string(credentialsId: 'es_perf_host', variable: 'ES_HOST'), + usernamePassword(credentialsId: 'perf_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_PERF_IP}" + env.TIER = "perf" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + env.ES_HOST = "${ES_HOST}" + + } + break + case "prod": + withCredentials([usernamePassword(credentialsId: 'prod_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_PROD_IP}" + env.TIER = "prod" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + env.SLACK_CHANNEL = "#system-alerts-prod" + + } + break + default: + withCredentials([usernamePassword(credentialsId: 'dev_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_DEV_IP}" + env.TIER = "dev" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + } + } + } + } + + stage('build'){ + environment { + FE_VERSION = "${params.Frontend_Tag}" + BE_VERSION = "${params.Backend_Tag}" + } + steps { + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER')]){ + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/bento-build.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + redis_schema_file: "", + redis_init_queries_file: "", + test_queries_file: "${params.Test_Queries_File}", + ], + colorized: true) + } + } + } + } + + stage('clear redis cache'){ + agent { label 'data-loader' } + when { + expression { params.Flush_Redis } + } + + environment { + TIER = "${params.Environment}" + } + + steps{ + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/redis.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + + stage('deploy'){ + environment { + FE_VERSION = "${params.Frontend_Tag}-${BUILD_NUMBER}" + BE_VERSION = "${params.Backend_Tag}-${BUILD_NUMBER}" + SLACK_CHANNEL = "#system-alerts" + URL_DOMAIN = "bento-tools.org" + } + steps{ + node('cicd_microservice') { + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/bento-deploy.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + enable_redis: "${params.Use_Redis}", + enable_es_filter: "${params.Enable_ES_Filter}", + ], + colorized: true) + } + + script { + + sh label: 'Env-Updates', script: '''#!/bin/bash + + yum -y install python3 + + cd icdc-devops/monitoring + pip3 install -r requirements.txt + + ''' + + withCredentials([string(credentialsId: 'newrelic_api_key', variable: 'API_KEY')]) { + sh "python3 $WORKSPACE/icdc-devops/monitoring/releases/add_apm_release.py -p bento -t $TIER -v $FE_VERSION/$BE_VERSION -k $API_KEY" + sh "python3 $WORKSPACE/icdc-devops/monitoring/monitor_update.py -p bento -t $TIER -k $API_KEY" + sh "python3 $WORKSPACE/icdc-devops/monitoring/dashboards/add_tier_dashboards.py -p bento -t $TIER -k $API_KEY" + } + + } + + } + } + } + + stage('update database monitoring agents'){ + steps{ + node('data-loader') { + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + echo "Recreating inventory file" + sh "cp ${WORKSPACE}/icdc-devops/ansible/hosts ${WORKSPACE}/icdc-devops/ansible/hosts.bak" + sh "echo [agent_setup] >> ${WORKSPACE}/icdc-devops/ansible/hosts" + sh "echo ${NEO4J_IP} ansible_ssh_private_key_file=/home/bento/.ssh/devops ansible_ssh_user=bento >> ${WORKSPACE}/icdc-devops/ansible/hosts" + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/agent-setup.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + env: "${params.Environment}", + app_type: 'database', + app_name: 'neo4j', + project: 'bento', + log_path: '/var/log/neo4j/*.log', + ], + colorized: true) + } + } + } + } + + } + post { + + always { + + sendSlackMessage() + + } + + success { + + script { + withCredentials([string(credentialsId: 'git_credential', variable: 'git_token')]) { + sh label: 'GIT-Tag Backend', script: '''#!/bin/bash + + gitURL=$(git config remote.origin.url | sed 's|^.*//||') + echo "Applying tag $Backend_Tag to URL: $gitURL" + git config user.email "jenkins@bento-tools.org" + git config user.name "Bento Jenkins" + git tag --no-sign -a ${Backend_Tag}-bento-${BUILD_NUMBER} -m "Jenkins tag: ${Backend_Tag}-${BUILD_NUMBER}" + git push https://${git_token}:x-oauth-basic@${gitURL} --tags + + ''' + + } + } + + script { + withCredentials([string(credentialsId: 'git_credential', variable: 'git_token')]) { + sh label: 'GIT-Tag Frontend', script: '''#!/bin/bash + + cd ${WORKSPACE}/bento-frontend + gitURL=$(git config remote.origin.url | sed 's|^.*//||') + echo "Applying tag $Frontend_Tag to URL: $gitURL" + git config user.email "jenkins@bento-tools.org" + git config user.name "Bento Jenkins" + git tag --no-sign -a ${Frontend_Tag}-${BUILD_NUMBER} -m "Jenkins tag: ${Frontend_Tag}-${BUILD_NUMBER}" + git push https://${git_token}:x-oauth-basic@${gitURL} --tags + + ''' + + } + } + + script { + if ("${params.Test_Params}"?.trim()) { + echo 'Run Bento Katalon Tests' + def params = "[${params.Test_Params}]" + def list_params = evaluate(params) + build quietPeriod: 300, wait: false, job: 'Test_Automation/Katalon_prod', parameters: [gitParameter(name: 'Tag', value: list_params['Tag']), string(name: 'KatalonSuite', value: list_params['KatalonSuite']), extendedChoice(name: 'Profile', value: list_params['Profile']), string(name: 'EmailRecipients', value: list_params['Email'])] + } + } + + script { + if (params.Reload_ES_Data) { + echo 'Reload data to Elasticsearch' + build job: 'Bento/_Data_Processing/BentoDataLoader_ES', parameters: [gitParameter(name: 'Dataloader_Branch', value: 'master'), gitParameter(name: 'Frontend_Branch', value: "${params.Frontend_Tag}"), gitParameter(name: 'Backend_Branch', value: "${params.Backend_Tag}"), extendedChoice(name: 'Environment', value: "${params.Environment}")] + } + } + + } + + cleanup { + + cleanWs() + + } + + } +} \ No newline at end of file diff --git a/jenkins/jobs/bento/Jenkinsfile_bento_image b/jenkins/jobs/bento/Jenkinsfile_bento_image new file mode 100644 index 000000000..d1b5ca8f7 --- /dev/null +++ b/jenkins/jobs/bento/Jenkinsfile_bento_image @@ -0,0 +1,98 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + + agent { + node { + label 'cicd_microservice' + } + } + + options { + ansiColor('xterm') + } + + stages{ + stage('checkout'){ + steps { + + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + //branches: [[name: '*/master']], + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + } + } + + stage('build'){ + environment { + SLACK_URL = "${SLACK_URL}" + } + steps{ + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER')]){ + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/build-image-bento.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + dockerhub_path: "cbiitssrepo", + ], + extras: '-vvv', + colorized: true) + } + } + } + } + } + post { + always { + + sendSlackMessage() + + } + + cleanup { + + cleanWs() + + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/bento/Jenkinsfile_ctdc b/jenkins/jobs/bento/Jenkinsfile_ctdc new file mode 100644 index 000000000..01285c2ba --- /dev/null +++ b/jenkins/jobs/bento/Jenkinsfile_ctdc @@ -0,0 +1,436 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'cicd_microservice' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'dev' ) + extendedChoice( + name: 'Region', + defaultValue: 'us-east-1', + description: 'Choose aws region to build', + type: 'PT_SINGLE_SELECT', + value: 'us-east-1' ) + booleanParam( + defaultValue: true, + description: 'Use the Redis cache', + name: 'Use_Redis') + booleanParam( + defaultValue: false, + description: 'Flush the Redis cache', + name: 'Flush_Redis') + booleanParam( + defaultValue: true, + description: 'Enable the Redis filter', + name: 'Enable_Redis_Filter') + string( + defaultValue: 'redis-schema.graphql', + description: 'The file to use when loading redis schema', + name: 'Redis_Schema_File') + string( + defaultValue: 'redis-filter-config.bento.yaml', + description: 'The file to use when loading redis queries', + name: 'Redis_Queries_File') + string( + defaultValue: 'test-queries.1k.bento.yaml', + description: 'The file to use when loading test queries', + name: 'Test_Queries_File') + string( + defaultValue: '', + description: 'The commit id to checkout for the frontend', + name: 'fe_commit_id') + string( + defaultValue: '', + description: 'The commit id to checkout for the backend', + name: 'be_commit_id') + +// gitParameter(branchFilter: 'origin/(.*)', +// defaultValue: 'integration', +// name: 'Frontend_Tag', +// type: 'PT_BRANCH_TAG', +// quickFilterEnabled: false, +// selectedValue: 'DEFAULT', +// sortMode: 'ASCENDING_SMART', +// tagFilter: '*', +// useRepository: 'https://github.com/CBIIT/bento-ctdc-frontend') +// +// The Frontend_Tag parameter has been replaced in this job by a GUI parameter due to the fact that type "active choice reactive parameter" is not supported. +// The GUI definition for this parameter should follow Environment and use the following values: +// +// Script: +// env_value = Environment +// def gettags = ("git ls-remote -h -t https://github.com/CBIIT/bento-ctdc-frontend.git").execute() +// def tags = gettags.text.readLines().collect { it.split()[1].replaceAll('refs/heads/', '').replaceAll('refs/tags/', '').replaceAll("\\^\\{\\}", '')} +// List tagnames = tags.collect{ '' + it + '' } +// +// if(env_value.equalsIgnoreCase("dev")) { +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("integration")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// } else { +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("master")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// } +// +// return tagnames +// +// Fallback Script: +// return ['integration'] +// +// NOTE: this sets the default frontend branch to "integration" and is required for builds triggered from SCM polling. +// +// Referenced parameters: +// Environment + +// gitParameter(branchFilter: 'origin/(.*)', +// defaultValue: 'Integration', +// name: 'Backend_Tag', +// type: 'PT_BRANCH_TAG', +// quickFilterEnabled: false, +// selectedValue: 'DEFAULT', +// sortMode: 'ASCENDING_SMART', +// tagFilter: '*', +// useRepository: 'https://github.com/CBIIT/bento-backend') +// +// The Backend_Tag parameter has been replaced in this job by a GUI parameter due to the fact that type "active choice reactive parameter" is not supported. +// The GUI definition for this parameter should follow Frontend_Tag and use the following values: +// +// Script: +// front_tag = Frontend_Tag +// def gettags = ("git ls-remote -h -t https://github.com/CBIIT/bento-backend.git").execute() +// def tags = gettags.text.readLines().collect { it.split()[1].replaceAll('refs/heads/', '').replaceAll('refs/tags/', '').replaceAll("\\^\\{\\}", '')} +// List tagnames = tags.collect{ '' + it + '' } +// +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase(front_tag)) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("Integration")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// +// return tagnames +// +// Fallback Script: +// return ['Integration'] +// +// NOTE: this sets the default backend branch to "Integration" and is required for builds triggered from SCM polling. The branch defined here should math the default for Frontend_Tag +// +// Referenced parameters: +// Frontend_Tag + + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Backend_Tag}"]], + doGenerateSubmoduleConfigurations: + false, extensions: [], submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/bento-backend']]]) + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Frontend_Tag}"]], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'RelativeTargetDirectory', + relativeTargetDir: 'ctdc-frontend']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/bento-ctdc-frontend']]]) + + checkout( poll: false, + changelog:false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']]]) + + } + + } + stage('Set Environment'){ + environment { + SLACK_URL = "${SLACK_URL}" + VERSION = "${params.Frontend_Tag}" + } + steps { + script { + switch("${params.Environment}") { + case "dev": + withCredentials([usernamePassword(credentialsId: 'ctdc_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_CTDC_IP}" + env.TIER = "dev" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + default: + withCredentials([usernamePassword(credentialsId: 'ctdc_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_CTDC_IP}" + env.TIER = "dev" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + } + } + } + } + + stage('build'){ + environment { + VERSION = "${params.Frontend_Tag}" + FE_VERSION = "${params.Frontend_Tag}" + BE_VERSION = "${params.Backend_Tag}" + } + steps { + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER')]){ + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/bento-build.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + project: 'ctdc', + redis_schema_file: "${params.Redis_Schema_File}", + redis_init_queries_file: "${params.Redis_Queries_File}", + test_queries_file: "${params.Test_Queries_File}", + frontend_commit_id: "${params.fe_commit_id}", + backend_commit_id: "${params.be_commit_id}", + ], + colorized: true) + } + } + } + } + + stage('clear redis cache'){ + agent { label 'data-loader' } + when { + expression { params.Flush_Redis } + } + + environment { + TIER = "${params.Environment}" + } + + steps{ + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/redis.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + + stage('deploy'){ + environment { + VERSION = "${params.Frontend_Tag}" + FE_VERSION = "${params.Frontend_Tag}-${BUILD_NUMBER}" + BE_VERSION = "${params.Backend_Tag}-${BUILD_NUMBER}" + } + steps{ + node('cicd_microservice') { + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/bento-deploy.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + project: 'ctdc', + enable_redis: "${params.Use_Redis}", + enable_redis_filter: "${params.Enable_Redis_Filter}", + ], + colorized: true) + } + } + } + } + + stage('update database monitoring agents'){ + steps{ + node('data-loader') { + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + echo "Recreating inventory file" + sh "cp ${WORKSPACE}/icdc-devops/ansible/hosts ${WORKSPACE}/icdc-devops/ansible/hosts.bak" + sh "echo [agent_setup] >> ${WORKSPACE}/icdc-devops/ansible/hosts" + sh "echo ${NEO4J_IP} ansible_ssh_private_key_file=/home/bento/.ssh/devops ansible_ssh_user=bento >> ${WORKSPACE}/icdc-devops/ansible/hosts" + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/agent-setup.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + env: "${params.Environment}", + app_type: 'database', + app_name: 'neo4j', + project: 'ctdc', + log_path: '/var/log/neo4j/*.log', + ], + colorized: true) + } + } + } + } + + } + post { + + always { + + sendSlackMessage() + + } + +// success { +// +// script { +// withCredentials([usernamePassword(credentialsId: 'github_user', passwordVariable: 'git_password', usernameVariable: 'git_user')]) { +// sh label: 'GIT-Tag Backend', script: '''#!/bin/bash +// +// gitURL=$(git config remote.origin.url | sed 's|^.*//||') +// echo "Applying tag $Backend_Tag to URL: $gitURL" +// git config user.email "jenkins@bento-tools.org" +// git config user.name "Bento Jenkins" +// git tag --no-sign -a ${Backend_Tag}-bento-${BUILD_NUMBER} -m "Jenkins tag: ${Backend_Tag}-${BUILD_NUMBER}" +// git push https://${git_user}:${git_password}@${gitURL} --tags +// +// ''' +// +// } +// } +// +// script { +// withCredentials([usernamePassword(credentialsId: 'github_user', passwordVariable: 'git_password', usernameVariable: 'git_user')]) { +// sh label: 'GIT-Tag Frontend', script: '''#!/bin/bash +// +// cd ${WORKSPACE}/bento-frontend +// gitURL=$(git config remote.origin.url | sed 's|^.*//||') +// echo "Applying tag $Frontend_Tag to URL: $gitURL" +// git config user.email "jenkins@bento-tools.org" +// git config user.name "Bento Jenkins" +// git tag --no-sign -a ${Frontend_Tag}-${BUILD_NUMBER} -m "Jenkins tag: ${Frontend_Tag}-${BUILD_NUMBER}" +// git push https://${git_user}:${git_password}@${gitURL} --tags +// +// ''' +// +// } +// } +// +// } + + cleanup { + + cleanWs() + + } + + } +} \ No newline at end of file diff --git a/jenkins/jobs/bento/Jenkinsfile_deploy b/jenkins/jobs/bento/Jenkinsfile_deploy new file mode 100644 index 000000000..2d7e9038d --- /dev/null +++ b/jenkins/jobs/bento/Jenkinsfile_deploy @@ -0,0 +1,458 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'cicd_microservice' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'sandbox,dev,test,qa,perf,icdc,prod' ) + extendedChoice( + name: 'Region', + defaultValue: 'us-east-1', + description: 'Choose aws region to build', + type: 'PT_SINGLE_SELECT', + value: 'us-east-1,us-west-1' ) + booleanParam( + defaultValue: true, + description: 'Use the Redis cache', + name: 'Use_Redis') + booleanParam( + defaultValue: true, + description: 'Flush the Redis cache', + name: 'Flush_Redis') + booleanParam( + defaultValue: true, + description: 'Enable the Elasticsearch filter', + name: 'Enable_ES_Filter') + booleanParam( + defaultValue: true, + description: 'Reload Elasticsearch data', + name: 'Reload_ES_Data') + string( + defaultValue: '', + description: 'The set of parameters to use for test automation - if left blank no tests will be run', + name: 'Test_Params') + +// gitParameter(branchFilter: 'origin/(.*)', +// defaultValue: 'integration', +// name: 'Frontend_Image', +// type: 'PT_BRANCH_TAG', +// quickFilterEnabled: false, +// selectedValue: 'DEFAULT', +// sortMode: 'ASCENDING_SMART', +// tagFilter: '*', +// useRepository: 'https://github.com/CBIIT/bento-frontend') +// +// The Frontend_Image parameter has been replaced in this job by a GUI parameter due to the fact that type "active choice reactive parameter" is not supported. +// The GUI definition for this parameter should follow Environment and use the following values: +// +// Script: +// env_value = Environment +// def gettags = ("git ls-remote -h -t https://github.com/CBIIT/bento-frontend.git").execute() +// def tags = gettags.text.readLines().collect { it.split()[1].replaceAll('refs/heads/', '').replaceAll('refs/tags/', '').replaceAll("\\^\\{\\}", '')} +// List tagnames = tags.collect{ '' + it + '' } +// +// if(env_value.equalsIgnoreCase("dev")) { +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("integration")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// } else { +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("master")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// } +// +// return tagnames +// +// Fallback Script: +// return ['integration'] +// +// NOTE: this sets the default frontend branch to "integration" and is required for builds triggered from SCM polling. +// +// Referenced parameters: +// Environment + +// gitParameter(branchFilter: 'origin/(.*)', +// defaultValue: 'Integration', +// name: 'Backend_Image', +// type: 'PT_BRANCH_TAG', +// quickFilterEnabled: false, +// selectedValue: 'DEFAULT', +// sortMode: 'ASCENDING_SMART', +// tagFilter: '*', +// useRepository: 'https://github.com/CBIIT/bento-backend') +// +// The Backend_Image parameter has been replaced in this job by a GUI parameter due to the fact that type "active choice reactive parameter" is not supported. +// The GUI definition for this parameter should follow Frontend_Image and use the following values: +// +// Script: +// front_tag = Frontend_Image +// def gettags = ("git ls-remote -h -t https://github.com/CBIIT/bento-backend.git").execute() +// def tags = gettags.text.readLines().collect { it.split()[1].replaceAll('refs/heads/', '').replaceAll('refs/tags/', '').replaceAll("\\^\\{\\}", '')} +// List tagnames = tags.collect{ '' + it + '' } +// +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase(front_tag)) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("Integration")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// +// return tagnames +// +// Fallback Script: +// return ['Integration'] +// +// NOTE: this sets the default backend branch to "Integration" and is required for builds triggered from SCM polling. The branch defined here should math the default for Frontend_Image +// +// Referenced parameters: +// Frontend_Image + + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ +// stage('checkout'){ +// steps { +// git branch: "${params.Backend_Image}", +// url: 'https://github.com/CBIIT/bento-backend' +// +// checkout([$class: 'GitSCM', +// branches: [[name: "${params.Backend_Image}"]], +// doGenerateSubmoduleConfigurations: +// false, extensions: [], submoduleCfg: [], +// userRemoteConfigs: +// [[url: 'https://github.com/CBIIT/bento-backend']]]) +// +// dir('bento-frontend'){ +// git branch: "${params.Frontend_Image}", +// url: 'https://github.com/CBIIT/bento-frontend' +// } +// +// checkout([$class: 'GitSCM', +// branches: [[name: "${params.Frontend_Image}"]], +// doGenerateSubmoduleConfigurations: false, +// extensions: [[$class: 'RelativeTargetDirectory', +// relativeTargetDir: 'bento-frontend']], +// submoduleCfg: [], +// userRemoteConfigs: +// [[url: 'https://github.com/CBIIT/bento-frontend']]]) +// +// checkout( changelog:false, +// poll: false, +// scm: [$class: 'GitSCM', +// branches: [[name: '*/master']], +// doGenerateSubmoduleConfigurations: false, +// extensions: [[$class: 'DisableRemotePoll'], +// [$class: 'PathRestriction', excludedRegions: '*'], +// [$class: 'RelativeTargetDirectory', +// relativeTargetDir: 'icdc-devops']], +// submoduleCfg: [], +// userRemoteConfigs: +// [[url: 'https://github.com/CBIIT/icdc-devops.git']] +// ]) +// +// } +// +// } + stage('Set Environment'){ + environment { + SLACK_URL = "${SLACK_URL}" + } + steps { + + script { + currentBuild.displayName = "Tag: ${params.Frontend_Image} Environment: ${params.Environment}" + } + + script { + switch("${params.Environment}") { + case "dev": + withCredentials([string(credentialsId: 'es_dev_host', variable: 'ES_HOST'), + usernamePassword(credentialsId: 'dev_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_DEV_IP}" + env.TIER = "dev" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + env.ES_HOST = "${ES_HOST}" + + } + break + case "qa": + withCredentials([string(credentialsId: 'es_qa_host', variable: 'ES_HOST'), + usernamePassword(credentialsId: 'qa_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_QA_IP}" + env.TIER = "qa" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + env.ES_HOST = "${ES_HOST}" + + } + break + case "perf": + withCredentials([string(credentialsId: 'es_perf_host', variable: 'ES_HOST'), + usernamePassword(credentialsId: 'perf_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_PERF_IP}" + env.TIER = "perf" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + // UPDATE THIS TO USE CREDENTIALS WHEN PERF ES IS ENABLED + env.ES_HOST = "${ES_HOST}" + + } + break + case "prod": + withCredentials([usernamePassword(credentialsId: 'prod_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_PROD_IP}" + env.TIER = "prod" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + env.SLACK_CHANNEL = "#system-alerts-prod" + + } + break + case "icdc": + withCredentials([usernamePassword(credentialsId: 'icdc_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_ICDC_IP}" + env.TIER = "icdc" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + default: + withCredentials([usernamePassword(credentialsId: 'dev_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_DEV_IP}" + env.TIER = "dev" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + } + } + } + } + + stage('clear redis cache'){ + agent { label 'data-loader' } + when { + expression { params.Flush_Redis } + } + + environment { + TIER = "${params.Environment}" + } + + steps{ + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/redis.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + + stage('deploy'){ + environment { + SLACK_URL = "${SLACK_URL}" + TIER = "${params.Environment}" + FE_VERSION = "${params.Frontend_Image}" + BE_VERSION = "${params.Backend_Image}" + SLACK_CHANNEL = "#system-alerts" + URL_DOMAIN = "bento-tools.org" + } + steps{ + node('cicd_microservice') { + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/bento-deploy.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + enable_redis: "${params.Use_Redis}", + enable_es_filter: "${params.Enable_ES_Filter}", + ], + colorized: true) + } + + script { + + sh label: 'Env-Updates', script: '''#!/bin/bash + + yum -y install python3 + + cd icdc-devops/monitoring + pip3 install -r requirements.txt + + ''' + + withCredentials([string(credentialsId: 'newrelic_api_key', variable: 'API_KEY')]) { + sh "python3 $WORKSPACE/icdc-devops/monitoring/releases/add_apm_release.py -p bento -t $TIER -v $FE_VERSION/$BE_VERSION -k $API_KEY" + sh "python3 $WORKSPACE/icdc-devops/monitoring/monitor_update.py -p bento -t $TIER -k $API_KEY" + sh "python3 $WORKSPACE/icdc-devops/monitoring/dashboards/add_tier_dashboards.py -p bento -t $TIER -k $API_KEY" + } + + } + + } + } + } + + stage('update database monitoring agents'){ + steps{ + node('data-loader') { + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + echo "Recreating inventory file" + sh "cp ${WORKSPACE}/icdc-devops/ansible/hosts ${WORKSPACE}/icdc-devops/ansible/hosts.bak" + sh "echo [agent_setup] >> ${WORKSPACE}/icdc-devops/ansible/hosts" + sh "echo ${NEO4J_IP} ansible_ssh_private_key_file=/home/bento/.ssh/devops ansible_ssh_user=bento >> ${WORKSPACE}/icdc-devops/ansible/hosts" + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/agent-setup.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + env: "${params.Environment}", + app_type: 'database', + app_name: 'neo4j', + project: 'bento', + log_path: '/var/log/neo4j/*.log', + ], + colorized: true) + } + } + } + } + + } + post { + always { + + sendSlackMessage() + + } + + success { + + script { + if ("${params.Test_Params}"?.trim()) { + echo 'Run Bento Katalon Tests' + def params = "[${params.Test_Params}]" + def list_params = evaluate(params) + build quietPeriod: 300, wait: false, job: 'Test_Automation/Katalon_prod', parameters: [gitParameter(name: 'Tag', value: list_params['Tag']), string(name: 'KatalonSuite', value: list_params['KatalonSuite']), extendedChoice(name: 'Profile', value: list_params['Profile']), string(name: 'EmailRecipients', value: list_params['Email'])] + } + } + + script { + if (params.Reload_ES_Data) { + echo 'Reload data to Elasticsearch' + String be_tag = "${params.Backend_Image}" + String be_branch = "${be_tag}".substring(0, "${be_tag}".lastIndexOf("-")) + "-bento" + "${be_tag}".substring("${be_tag}".lastIndexOf("-"), "${be_tag}".length()) + build job: 'Bento/_Data_Processing/BentoDataLoader_ES', parameters: [gitParameter(name: 'Dataloader_Branch', value: 'master'), gitParameter(name: 'Frontend_Branch', value: "${params.Frontend_Image}"), gitParameter(name: 'Backend_Branch', value: "${be_branch}"), extendedChoice(name: 'Environment', value: "${params.Environment}")] + } + } + + } + + cleanup { + + cleanWs() + + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/bento/Jenkinsfile_docs b/jenkins/jobs/bento/Jenkinsfile_docs new file mode 100644 index 000000000..3a932085c --- /dev/null +++ b/jenkins/jobs/bento/Jenkinsfile_docs @@ -0,0 +1,127 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + + agent { + node { + label 'cicd_microservice' + } + } + + options { + ansiColor('xterm') + } + + stages{ + stage('checkout'){ + steps { + + checkout( changelog:true, + poll: true, + scm: [$class: 'GitSCM', + branches: [[name: '**']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'RelativeTargetDirectory', + relativeTargetDir: 'docrepo']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/bento-docs.git']] + ]) + + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + } + } + + stage('build'){ + environment { + SLACK_URL = "${SLACK_URL}" + } + steps{ + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/bento-docs.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + } + post { + always { + + sendSlackMessage() + + } + + success { + + script { + withCredentials([usernamePassword(credentialsId: 'github_user', passwordVariable: 'git_password', usernameVariable: 'git_user')]) { + sh label: 'GIT Commit Doc Changes', script: '''#!/bin/bash + + cd ./bento-docs + gitCommit=$(git rev-parse HEAD) + gitURL=$(git config remote.origin.url | sed 's|^.*//||') + echo "Committing documentation changes to URL: $gitURL" + echo "Commit triggered by change: $gitCommit" + git config user.email "jenkins@bento-tools.org" + git config user.name "Bento Jenkins" + git add docs + git commit -m "Documentation changes added by Jenkins: triggered by commit ${gitCommit}" + git push https://${git_user}:${git_password}@${gitURL} --all + + ''' + + } + } + + } + + cleanup { + + cleanWs() + + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/bento/Jenkinsfile_icdc b/jenkins/jobs/bento/Jenkinsfile_icdc new file mode 100644 index 000000000..bdad8865e --- /dev/null +++ b/jenkins/jobs/bento/Jenkinsfile_icdc @@ -0,0 +1,433 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'cicd_microservice' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'icdc', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'icdc' ) + extendedChoice( + name: 'Region', + defaultValue: 'us-east-1', + description: 'Choose aws region to build', + type: 'PT_SINGLE_SELECT', + value: 'us-east-1' ) + booleanParam( + defaultValue: false, + description: 'Flush the Redis cache', + name: 'Flush_Redis') + booleanParam( + defaultValue: true, + description: 'Enable the Redis filter', + name: 'Enable_Redis_Filter') + string( + defaultValue: 'redis-schema.graphql', + description: 'The file to use when loading redis schema', + name: 'Redis_Schema_File') + string( + defaultValue: 'redis-filter-config.bento.yaml', + description: 'The file to use when loading redis queries', + name: 'Redis_Queries_File') + string( + defaultValue: 'test-queries.1k.bento.yaml', + description: 'The file to use when loading test queries', + name: 'Test_Queries_File') + string( + defaultValue: '', + description: 'The commit id to checkout for the frontend', + name: 'fe_commit_id') + string( + defaultValue: '', + description: 'The commit id to checkout for the backend', + name: 'be_commit_id') + +// gitParameter(branchFilter: 'origin/(.*)', +// defaultValue: 'integration', +// name: 'Frontend_Tag', +// type: 'PT_BRANCH_TAG', +// quickFilterEnabled: false, +// selectedValue: 'DEFAULT', +// sortMode: 'ASCENDING_SMART', +// tagFilter: '*', +// useRepository: 'https://github.com/CBIIT/bento-frontend') +// +// The Frontend_Tag parameter has been replaced in this job by a GUI parameter due to the fact that type "active choice reactive parameter" is not supported. +// The GUI definition for this parameter should follow Environment and use the following values: +// +// Script: +// env_value = Environment +// def gettags = ("git ls-remote -h -t https://github.com/CBIIT/bento-frontend.git").execute() +// def tags = gettags.text.readLines().collect { it.split()[1].replaceAll('refs/heads/', '').replaceAll('refs/tags/', '').replaceAll("\\^\\{\\}", '')} +// List tagnames = tags.collect{ '' + it + '' } +// +// if(env_value.equalsIgnoreCase("dev")) { +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("integration")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// } else { +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("master")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// } +// +// return tagnames +// +// Fallback Script: +// return ['integration'] +// +// NOTE: this sets the default frontend branch to "integration" and is required for builds triggered from SCM polling. +// +// Referenced parameters: +// Environment + +// gitParameter(branchFilter: 'origin/(.*)', +// defaultValue: 'Integration', +// name: 'Backend_Tag', +// type: 'PT_BRANCH_TAG', +// quickFilterEnabled: false, +// selectedValue: 'DEFAULT', +// sortMode: 'ASCENDING_SMART', +// tagFilter: '*', +// useRepository: 'https://github.com/CBIIT/bento-backend') +// +// The Backend_Tag parameter has been replaced in this job by a GUI parameter due to the fact that type "active choice reactive parameter" is not supported. +// The GUI definition for this parameter should follow Frontend_Tag and use the following values: +// +// Script: +// front_tag = Frontend_Tag +// def gettags = ("git ls-remote -h -t https://github.com/CBIIT/bento-backend.git").execute() +// def tags = gettags.text.readLines().collect { it.split()[1].replaceAll('refs/heads/', '').replaceAll('refs/tags/', '').replaceAll("\\^\\{\\}", '')} +// List tagnames = tags.collect{ '' + it + '' } +// +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase(front_tag)) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("Integration")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// +// return tagnames +// +// Fallback Script: +// return ['Integration'] +// +// NOTE: this sets the default backend branch to "Integration" and is required for builds triggered from SCM polling. The branch defined here should math the default for Frontend_Tag +// +// Referenced parameters: +// Frontend_Tag + + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Backend_Tag}"]], + doGenerateSubmoduleConfigurations: + false, extensions: [], submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/bento-backend']]]) + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Frontend_Tag}"]], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'RelativeTargetDirectory', + relativeTargetDir: 'bento-frontend']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/bento-icdc-frontend']]]) + + checkout( poll: false, + changelog:false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']]]) + + } + + } + stage('Set Environment'){ + environment { + SLACK_URL = "${SLACK_URL}" + VERSION = "${params.Frontend_Tag}" + } + steps { + script { + switch("${params.Environment}") { + case "icdc": + withCredentials([string(credentialsId: 'icdc_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'icdc_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_ICDC_IP}" + env.BEARER = "${BEARER}" + env.TIER = "icdc" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + default: + withCredentials([string(credentialsId: 'icdc_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'icdc_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_ICDC_IP}" + env.BEARER = "${BEARER}" + env.TIER = "icdc" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + } + } + } + } + + stage('build'){ + environment { + VERSION = "${params.Frontend_Tag}" + FE_VERSION = "${params.Frontend_Tag}" + BE_VERSION = "${params.Backend_Tag}" + } + steps { + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER')]){ + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/bento-build.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + redis_schema_file: "${params.Redis_Schema_File}", + redis_init_queries_file: "${params.Redis_Queries_File}", + test_queries_file: "${params.Test_Queries_File}", + frontend_commit_id: "${params.fe_commit_id}", + backend_commit_id: "${params.be_commit_id}", + ], + colorized: true) + } + } + } + } + + stage('clear redis cache'){ + agent { label 'data-loader' } + when { + expression { params.Flush_Redis } + } + + environment { + TIER = "${params.Environment}" + } + + steps{ + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/redis.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + + stage('deploy'){ + environment { + VERSION = "${params.Frontend_Tag}" + FE_VERSION = "${params.Frontend_Tag}-${BUILD_NUMBER}" + BE_VERSION = "${params.Backend_Tag}-${BUILD_NUMBER}" + } + steps{ + node('cicd_microservice') { + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/bento-deploy.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + enable_redis_filter: "${params.Enable_Redis_Filter}", + ], + colorized: true) + } + } + } + } + + stage('update database monitoring agents'){ + steps{ + node('data-loader') { + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + echo "Recreating inventory file" + sh "cp ${WORKSPACE}/icdc-devops/ansible/hosts ${WORKSPACE}/icdc-devops/ansible/hosts.bak" + sh "echo [agent_setup] >> ${WORKSPACE}/icdc-devops/ansible/hosts" + sh "echo ${NEO4J_IP} ansible_ssh_private_key_file=/home/bento/.ssh/devops ansible_ssh_user=bento >> ${WORKSPACE}/icdc-devops/ansible/hosts" + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/agent-setup.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + env: "${params.Environment}", + app_type: 'database', + app_name: 'neo4j', + project: 'bento', + log_path: '/var/log/neo4j/*.log', + ], + colorized: true) + } + } + } + } + + } + post { + + always { + + sendSlackMessage() + + } + +// success { +// +// script { +// withCredentials([usernamePassword(credentialsId: 'github_user', passwordVariable: 'git_password', usernameVariable: 'git_user')]) { +// sh label: 'GIT-Tag Backend', script: '''#!/bin/bash +// +// gitURL=$(git config remote.origin.url | sed 's|^.*//||') +// echo "Applying tag $Backend_Tag to URL: $gitURL" +// git config user.email "jenkins@bento-tools.org" +// git config user.name "Bento Jenkins" +// git tag --no-sign -a ${Backend_Tag}-bento-${BUILD_NUMBER} -m "Jenkins tag: ${Backend_Tag}-${BUILD_NUMBER}" +// git push https://${git_user}:${git_password}@${gitURL} --tags +// +// ''' +// +// } +// } +// +// script { +// withCredentials([usernamePassword(credentialsId: 'github_user', passwordVariable: 'git_password', usernameVariable: 'git_user')]) { +// sh label: 'GIT-Tag Frontend', script: '''#!/bin/bash +// +// cd ${WORKSPACE}/bento-frontend +// gitURL=$(git config remote.origin.url | sed 's|^.*//||') +// echo "Applying tag $Frontend_Tag to URL: $gitURL" +// git config user.email "jenkins@bento-tools.org" +// git config user.name "Bento Jenkins" +// git tag --no-sign -a ${Frontend_Tag}-${BUILD_NUMBER} -m "Jenkins tag: ${Frontend_Tag}-${BUILD_NUMBER}" +// git push https://${git_user}:${git_password}@${gitURL} --tags +// +// ''' +// +// } +// } +// +// } + + cleanup { + + cleanWs() + + } + + } +} \ No newline at end of file diff --git a/jenkins/jobs/c3dc/Jenkinsfile b/jenkins/jobs/c3dc/Jenkinsfile new file mode 100644 index 000000000..54757e93c --- /dev/null +++ b/jenkins/jobs/c3dc/Jenkinsfile @@ -0,0 +1,307 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'cicd_microservice' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'dev' ) + string( + defaultValue: 'c3dc', + description: 'Project Name', + name: 'ProjectName') + booleanParam( + defaultValue: true, + description: 'Use the Redis cache', + name: 'Use_Redis') + booleanParam( + defaultValue: false, + description: 'Flush the Redis cache', + name: 'Flush_Redis') + booleanParam( + defaultValue: true, + description: 'Enable the Redis filter', + name: 'Enable_Redis_Filter') + string( + defaultValue: 'redis-schema.graphql', + description: 'The file to use when loading redis schema', + name: 'Redis_Schema_File') + string( + defaultValue: 'redis-filter-config.bento.yaml', + description: 'The file to use when loading redis queries', + name: 'Redis_Queries_File') + string( + defaultValue: 'test-queries.1k.bento.yaml', + description: 'The file to use when loading test queries', + name: 'Test_Queries_File') + string( + defaultValue: '', + description: 'The commit id to checkout for the frontend', + name: 'fe_commit_id') + string( + defaultValue: '', + description: 'The commit id to checkout for the backend', + name: 'be_commit_id') + + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Backend_Tag}"]], + doGenerateSubmoduleConfigurations: + false, extensions: [], submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/bento-backend']]]) + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Frontend_Tag}"]], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'RelativeTargetDirectory', + relativeTargetDir: 'c3dc-frontend']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/bento-c3dc-frontend']]]) + + checkout( poll: false, + changelog:false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']]]) + + } + + } + stage('Set Environment'){ + environment { + SLACK_URL = "${SLACK_URL}" + } + steps { + script { + switch("${params.Environment}") { + case "dev": + withCredentials([usernamePassword(credentialsId: "${params.ProjectName}_${params.Environment}_neo4j_user", passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.TIER = "${params.Environment}" + env.NEO4J_IP = "${NEO4J_C3DC_IP}" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + default: + withCredentials([usernamePassword(credentialsId: "${params.ProjectName}_${params.Environment}_neo4j_user", passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.TIER = "${params.Environment}" + env.NEO4J_IP = "${NEO4J_C3DC_IP}" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + } + } + } + } + + stage('build'){ + environment { + FE_VERSION = "${params.Frontend_Tag}" + BE_VERSION = "${params.Backend_Tag}" + } + steps { + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER')]){ + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/c3dc-build.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + project: "${params.ProjectName}", + redis_schema_file: "${params.Redis_Schema_File}", + redis_init_queries_file: "${params.Redis_Queries_File}", + test_queries_file: "${params.Test_Queries_File}", + frontend_commit_id: "${params.fe_commit_id}", + backend_commit_id: "${params.be_commit_id}", + ], + colorized: true) + } + } + } + } + + stage('clear redis cache'){ + agent { label 'data-loader' } + when { + expression { params.Flush_Redis } + } + + environment { + TIER = "${params.Environment}" + } + + steps{ + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/redis.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + + stage('deploy'){ + environment { + FE_VERSION = "${params.Frontend_Tag}-${BUILD_NUMBER}" + BE_VERSION = "${params.Backend_Tag}-${BUILD_NUMBER}" + } + steps{ + node('cicd_microservice') { + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/c3dc-deploy.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + project: "${params.ProjectName}", + enable_redis: "${params.Use_Redis}", + enable_redis_filter: "${params.Enable_Redis_Filter}", + ], + colorized: true) + } + } + } + } + + stage('update database monitoring agents'){ + steps{ + node('data-loader') { + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + echo "Recreating inventory file" + sh "cp ${WORKSPACE}/icdc-devops/ansible/hosts ${WORKSPACE}/icdc-devops/ansible/hosts.bak" + sh "echo [agent_setup] >> ${WORKSPACE}/icdc-devops/ansible/hosts" + sh "echo ${NEO4J_IP} ansible_ssh_private_key_file=/home/bento/.ssh/devops ansible_ssh_user=bento >> ${WORKSPACE}/icdc-devops/ansible/hosts" + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/agent-setup.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + env: "${params.Environment}", + app_type: 'database', + app_name: 'neo4j', + project: "${params.ProjectName}", + log_path: '/var/log/neo4j/*.log', + ], + colorized: true) + } + } + } + } + + } + post { + + always { + + sendSlackMessage() + + } + + + cleanup { + + cleanWs() + + } + + } +} \ No newline at end of file diff --git a/jenkins/jobs/c3dc/Jenkinsfile_deploy b/jenkins/jobs/c3dc/Jenkinsfile_deploy new file mode 100644 index 000000000..047ecb6aa --- /dev/null +++ b/jenkins/jobs/c3dc/Jenkinsfile_deploy @@ -0,0 +1,418 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'cicd_microservice' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'dev' ) + extendedChoice( + name: 'Region', + defaultValue: 'us-east-1', + description: 'Choose aws region to build', + type: 'PT_SINGLE_SELECT', + value: 'us-east-1' ) + booleanParam( + defaultValue: true, + description: 'Use the Redis cache', + name: 'Use_Redis') + booleanParam( + defaultValue: true, + description: 'Flush the Redis cache', + name: 'Flush_Redis') + booleanParam( + defaultValue: true, + description: 'Enable the Elasticsearch filter', + name: 'Enable_ES_Filter') + booleanParam( + defaultValue: true, + description: 'Reload Elasticsearch data', + name: 'Reload_ES_Data') + string( + defaultValue: '', + description: 'The set of parameters to use for test automation - if left blank no tests will be run', + name: 'Test_Params') + +// gitParameter(branchFilter: 'origin/(.*)', +// defaultValue: 'integration', +// name: 'Frontend_Image', +// type: 'PT_BRANCH_TAG', +// quickFilterEnabled: false, +// selectedValue: 'DEFAULT', +// sortMode: 'ASCENDING_SMART', +// tagFilter: '*', +// useRepository: 'https://github.com/CBIIT/bento-frontend') +// +// The Frontend_Image parameter has been replaced in this job by a GUI parameter due to the fact that type "active choice reactive parameter" is not supported. +// The GUI definition for this parameter should follow Environment and use the following values: +// +// Script: +// env_value = Environment +// def gettags = ("git ls-remote -h -t https://github.com/CBIIT/bento-frontend.git").execute() +// def tags = gettags.text.readLines().collect { it.split()[1].replaceAll('refs/heads/', '').replaceAll('refs/tags/', '').replaceAll("\\^\\{\\}", '')} +// List tagnames = tags.collect{ '' + it + '' } +// +// if(env_value.equalsIgnoreCase("dev")) { +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("integration")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// } else { +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("master")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// } +// +// return tagnames +// +// Fallback Script: +// return ['integration'] +// +// NOTE: this sets the default frontend branch to "integration" and is required for builds triggered from SCM polling. +// +// Referenced parameters: +// Environment + +// gitParameter(branchFilter: 'origin/(.*)', +// defaultValue: 'Integration', +// name: 'Backend_Image', +// type: 'PT_BRANCH_TAG', +// quickFilterEnabled: false, +// selectedValue: 'DEFAULT', +// sortMode: 'ASCENDING_SMART', +// tagFilter: '*', +// useRepository: 'https://github.com/CBIIT/bento-backend') +// +// The Backend_Image parameter has been replaced in this job by a GUI parameter due to the fact that type "active choice reactive parameter" is not supported. +// The GUI definition for this parameter should follow Frontend_Image and use the following values: +// +// Script: +// front_tag = Frontend_Image +// def gettags = ("git ls-remote -h -t https://github.com/CBIIT/bento-backend.git").execute() +// def tags = gettags.text.readLines().collect { it.split()[1].replaceAll('refs/heads/', '').replaceAll('refs/tags/', '').replaceAll("\\^\\{\\}", '')} +// List tagnames = tags.collect{ '' + it + '' } +// +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase(front_tag)) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("Integration")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// +// return tagnames +// +// Fallback Script: +// return ['Integration'] +// +// NOTE: this sets the default backend branch to "Integration" and is required for builds triggered from SCM polling. The branch defined here should math the default for Frontend_Image +// +// Referenced parameters: +// Frontend_Image + + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ +// stage('checkout'){ +// steps { +// git branch: "${params.Backend_Image}", +// url: 'https://github.com/CBIIT/bento-backend' +// +// checkout([$class: 'GitSCM', +// branches: [[name: "${params.Backend_Image}"]], +// doGenerateSubmoduleConfigurations: +// false, extensions: [], submoduleCfg: [], +// userRemoteConfigs: +// [[url: 'https://github.com/CBIIT/bento-backend']]]) +// +// dir('bento-frontend'){ +// git branch: "${params.Frontend_Image}", +// url: 'https://github.com/CBIIT/bento-frontend' +// } +// +// checkout([$class: 'GitSCM', +// branches: [[name: "${params.Frontend_Image}"]], +// doGenerateSubmoduleConfigurations: false, +// extensions: [[$class: 'RelativeTargetDirectory', +// relativeTargetDir: 'bento-frontend']], +// submoduleCfg: [], +// userRemoteConfigs: +// [[url: 'https://github.com/CBIIT/bento-frontend']]]) +// +// checkout( changelog:false, +// poll: false, +// scm: [$class: 'GitSCM', +// branches: [[name: '*/master']], +// doGenerateSubmoduleConfigurations: false, +// extensions: [[$class: 'DisableRemotePoll'], +// [$class: 'PathRestriction', excludedRegions: '*'], +// [$class: 'RelativeTargetDirectory', +// relativeTargetDir: 'icdc-devops']], +// submoduleCfg: [], +// userRemoteConfigs: +// [[url: 'https://github.com/CBIIT/icdc-devops.git']] +// ]) +// +// } +// +// } + stage('Set Environment'){ + environment { + SLACK_URL = "${SLACK_URL}" + } + steps { + + script { + currentBuild.displayName = "Tag: ${params.Frontend_Image} Environment: ${params.Environment}" + } + + script { + switch("${params.Environment}") { + case "dev": + withCredentials([string(credentialsId: 'es_dev_host', variable: 'ES_HOST'), + usernamePassword(credentialsId: 'dev_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_DEV_IP}" + env.TIER = "dev" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + env.ES_HOST = "${ES_HOST}" + + } + break + default: + withCredentials([usernamePassword(credentialsId: 'dev_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_DEV_IP}" + env.TIER = "dev" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + } + } + } + } + + stage('clear redis cache'){ + agent { label 'data-loader' } + when { + expression { params.Flush_Redis } + } + + environment { + TIER = "${params.Environment}" + } + + steps{ + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/redis.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + + stage('deploy'){ + environment { + SLACK_URL = "${SLACK_URL}" + PROJECT = "${params.ProjectName}", + TIER = "${params.Environment}" + FE_VERSION = "${params.Frontend_Image}" + BE_VERSION = "${params.Backend_Image}" + SLACK_CHANNEL = "#system-alerts" + URL_DOMAIN = "bento-tools.org" + } + steps{ + node('cicd_microservice') { + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/c3dc-deploy.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + project: "${params.ProjectName}", + enable_redis: "${params.Use_Redis}", + enable_es_filter: "${params.Enable_ES_Filter}", + ], + colorized: true) + } + + script { + + sh label: 'Env-Updates', script: '''#!/bin/bash + + yum -y install python3 + + cd icdc-devops/monitoring + pip3 install -r requirements.txt + + ''' + + withCredentials([string(credentialsId: 'newrelic_api_key', variable: 'API_KEY')]) { + sh "python3 $WORKSPACE/icdc-devops/monitoring/releases/add_apm_release.py -p $PROJECT -t $TIER -v $FE_VERSION/$BE_VERSION -k $API_KEY" + sh "python3 $WORKSPACE/icdc-devops/monitoring/monitor_update.py -p $PROJECT -t $TIER -k $API_KEY" + sh "python3 $WORKSPACE/icdc-devops/monitoring/dashboards/add_tier_dashboards.py -p $PROJECT -t $TIER -k $API_KEY" + } + + } + + } + } + } + + stage('update database monitoring agents'){ + steps{ + node('data-loader') { + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + echo "Recreating inventory file" + sh "cp ${WORKSPACE}/icdc-devops/ansible/hosts ${WORKSPACE}/icdc-devops/ansible/hosts.bak" + sh "echo [agent_setup] >> ${WORKSPACE}/icdc-devops/ansible/hosts" + sh "echo ${NEO4J_IP} ansible_ssh_private_key_file=/home/bento/.ssh/devops ansible_ssh_user=bento >> ${WORKSPACE}/icdc-devops/ansible/hosts" + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/agent-setup.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + env: "${params.Environment}", + app_type: 'database', + app_name: 'neo4j', + project: "${params.ProjectName}", + log_path: '/var/log/neo4j/*.log', + ], + colorized: true) + } + } + } + } + + } + post { + always { + + sendSlackMessage() + + } + + success { + + script { + if ("${params.Test_Params}"?.trim()) { + echo 'Run Katalon Tests' + def params = "[${params.Test_Params}]" + def list_params = evaluate(params) + build quietPeriod: 300, wait: false, job: 'Test_Automation/Katalon_prod', parameters: [gitParameter(name: 'Tag', value: list_params['Tag']), string(name: 'KatalonSuite', value: list_params['KatalonSuite']), extendedChoice(name: 'Profile', value: list_params['Profile']), string(name: 'EmailRecipients', value: list_params['Email'])] + } + } + + script { + if (params.Reload_ES_Data) { + echo 'Reload data to Elasticsearch' + String tag = "${params.Backend_Image}" + String branch = "${tag}".substring(0, "${tag}".lastIndexOf("-")) + "-bento" + "${tag}".substring("${tag}".lastIndexOf("-"), "${tag}".length()) + build job: 'Bento/_Data_Processing/BentoDataLoader_ES', parameters: [gitParameter(name: 'Dataloader_Branch', value: 'master'), gitParameter(name: 'Backend_Branch', value: "${branch}"), extendedChoice(name: 'Environment', value: "${params.Environment}")] + } + } + + } + + cleanup { + + cleanWs() + + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/canine-testing-sandbox/Jenkinsfile b/jenkins/jobs/canine-testing-sandbox/Jenkinsfile new file mode 100644 index 000000000..40ab657e4 --- /dev/null +++ b/jenkins/jobs/canine-testing-sandbox/Jenkinsfile @@ -0,0 +1,121 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + coffee_image = ":hot-coffee:" + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${coffee_image}\n Details at: ${env.BUILD_URL}console", + fallback: "ICDC Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "Selenium Automated Testing", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + +pipeline { + + agent { + node { + //label 'cicd-selenium' + label 'selenium-testing' + //label 'slave-ncias-d2320-c' + //label 'commons-docker-ncias-p2236-v' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'qa', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'sandbox,dev,qa,stage' ) + } + options { + timestamps() + } + tools { + maven 'Default' + jdk 'Default' + } + stages { + stage('Checkout') { + steps { + git branch: 'master', + url: 'https://github.com/CBIIT/icdc_uitest', + credentialsId: 'vdonkor' + } + } + stage('Set Environment'){ + environment { + SANDBOX_URL = "k9dc.essential-dev.com" + DEV_URL = "caninecommons-dev.cancer.gov" + QA_URL = "caninecommons-qa.cancer.gov" + STAGE_URL = "caninecommons-stage.cancer.gov" + } + steps { + script { + switch("${params.Environment}") { + case "sandbox": + sh "cd ${WORKSPACE}/src/test/resources/test_data/ && sed -i 's/caninecommons-qa.cancer.gov/${ SANDBOX_URL}/g' config.properties" + break + case "dev": + sh "cd ${WORKSPACE}/src/test/resources/test_data/ && sed -i 's/caninecommons-qa.cancer.gov/${ DEV_URL}/g' config.properties" + break + case "qa": + sh "cd ${WORKSPACE}/src/test/resources/test_data/ && cat config.properties" + break + case "stage": + sh "cd ${WORKSPACE}/src/test/resources/test_data/ && sed -i 's/caninecommons-qa.cancer.gov/${ STAGE_URL}/g' config.properties" + break + default: + println "Select valid option" + break + } + } + } + } + stage('Build') { + steps { + sh """ + mvn clean install + cd target/site/cucumber-reports + zip -r cucumber-html-reports.zip cucumber-html-reports + """ + } + } + stage ('Cucumber Reports'){ + steps { + cucumberSlackSend channel:'automated-testing', json:'target/cucumber.json' + } + } + + } + post { + always { + cucumber failedFeaturesNumber: -1, failedScenariosNumber: -1, failedStepsNumber: -1, fileIncludePattern: '**/*.json', jsonReportDirectory: 'target', pendingStepsNumber: -1, skippedStepsNumber: -1, sortingMethod: 'ALPHABETICAL', undefinedStepsNumber: -1 + emailext attachmentsPattern: 'target/site/cucumber-reports/cucumber-html-reports.zip', body: 'See attached results from recent integration Job run.', subject: '$PROJECT_NAME - Build # $BUILD_NUMBER - $BUILD_STATUS!', to: 'afag.ibrahimova@nih.gov;vincent.donkor@nih.gov' + //sendSlackMessage() + // println "${env.STEP_PASSED}" + //laxmi.lolla@nih.gov; amit.mukherjee@nih.gov; + } + } +} + + diff --git a/jenkins/jobs/canine-testing-sandbox/pipeline.groovy b/jenkins/jobs/canine-testing-sandbox/pipeline.groovy new file mode 100644 index 000000000..93250e14a --- /dev/null +++ b/jenkins/jobs/canine-testing-sandbox/pipeline.groovy @@ -0,0 +1,18 @@ +pipelineJob('icdc/canine-pipeline') { + + def repo = 'https://github.com/CBIIT/icdc-devops.git' + description("canine testing pipeline Job") + + definition { + cpsScm { + scm { + git { + remote { url(repo) } + branches('master', '**/feature*') + scriptPath('jenkins/jobs/canine-testing/Jenkinsfile') + extensions { } // required to avoid tagging + } + } + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/canine-testing/Jenkinsfile b/jenkins/jobs/canine-testing/Jenkinsfile new file mode 100644 index 000000000..b741a290d --- /dev/null +++ b/jenkins/jobs/canine-testing/Jenkinsfile @@ -0,0 +1,121 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + coffee_image = ":hot-coffee:" + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${coffee_image}\n Details at: ${env.BUILD_URL}console", + fallback: "ICDC Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "Selenium Automated Testing", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + +pipeline { + + agent { + node { + //label 'cicd-selenium' + //label 'selenium-testing' + //label 'slave-ncias-d2320-c' + label 'commons-docker-ncias-p2236-v' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'qa', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'sandbox,dev,qa,stage' ) + } + options { + timestamps() + } + tools { + maven 'Default' + jdk 'Default' + } + stages { + stage('Checkout') { + steps { + git branch: 'master', + url: 'https://github.com/CBIIT/icdc_uitest', + credentialsId: 'vdonkor' + } + } + stage('Set Environment'){ + environment { + SANDBOX_URL = "k9dc.essential-dev.com" + DEV_URL = "caninecommons-dev.cancer.gov" + QA_URL = "caninecommons-qa.cancer.gov" + STAGE_URL = "caninecommons-stage.cancer.gov" + } + steps { + script { + switch("${params.Environment}") { + case "sandbox": + sh "cd ${WORKSPACE}/src/test/resources/test_data/ && sed -i 's/caninecommons-qa.cancer.gov/${ SANDBOX_URL}/g' config.properties" + break + case "dev": + sh "cd ${WORKSPACE}/src/test/resources/test_data/ && sed -i 's/caninecommons-qa.cancer.gov/${ DEV_URL}/g' config.properties" + break + case "qa": + sh "cd ${WORKSPACE}/src/test/resources/test_data/ && cat config.properties" + break + case "stage": + sh "cd ${WORKSPACE}/src/test/resources/test_data/ && sed -i 's/caninecommons-qa.cancer.gov/${ STAGE_URL}/g' config.properties" + break + default: + println "Select valid option" + break + } + } + } + } + stage('Build') { + steps { + sh """ + mvn clean install + cd target/site/cucumber-reports + zip -r cucumber-html-reports.zip cucumber-html-reports + """ + } + } + stage ('Cucumber Reports'){ + steps { + cucumberSlackSend channel:'automated-testing', json:'target/cucumber.json' + } + } + + } + post { + always { + cucumber failedFeaturesNumber: -1, failedScenariosNumber: -1, failedStepsNumber: -1, fileIncludePattern: '**/*.json', jsonReportDirectory: 'target', pendingStepsNumber: -1, skippedStepsNumber: -1, sortingMethod: 'ALPHABETICAL', undefinedStepsNumber: -1 + emailext attachmentsPattern: 'target/site/cucumber-reports/cucumber-html-reports.zip', body: 'See attached results from recent integration Job run.', subject: '$PROJECT_NAME - Build # $BUILD_NUMBER - $BUILD_STATUS!', to: 'Matthew.beyers@nih.gov;vincent.donkor@nih.gov;barry.alkis@nih.gov;laxmi.lolla@nih.gov;amit.mukherjee@nih.gov' + //sendSlackMessage() + // println "${env.STEP_PASSED}" + //laxmi.lolla@nih.gov; amit.mukherjee@nih.gov; + } + } +} + + diff --git a/jenkins/jobs/canine-testing/pipeline.groovy b/jenkins/jobs/canine-testing/pipeline.groovy new file mode 100644 index 000000000..93250e14a --- /dev/null +++ b/jenkins/jobs/canine-testing/pipeline.groovy @@ -0,0 +1,18 @@ +pipelineJob('icdc/canine-pipeline') { + + def repo = 'https://github.com/CBIIT/icdc-devops.git' + description("canine testing pipeline Job") + + definition { + cpsScm { + scm { + git { + remote { url(repo) } + branches('master', '**/feature*') + scriptPath('jenkins/jobs/canine-testing/Jenkinsfile') + extensions { } // required to avoid tagging + } + } + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/ccdc/Jenkinsfile b/jenkins/jobs/ccdc/Jenkinsfile new file mode 100644 index 000000000..12143b684 --- /dev/null +++ b/jenkins/jobs/ccdc/Jenkinsfile @@ -0,0 +1,282 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'cicd_microservice_ccdc' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'dev,qa,stage,prod,demo' ) + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'main', + name: 'Backend_Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: ' https://github.com/CBIIT/CCDC-WebService') + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'main', + name: 'Frontend_Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: ' https://github.com/CBIIT/CCDC-WebPortal') + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'main', + name: 'Etl_Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: ' https://github.com/CBIIT/CCDC-ETL') + string(defaultValue: "etl-digest-files", + description: 'S3 Folder to load data from', + name: 'S3Folder') + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Backend_Tag}"]], + doGenerateSubmoduleConfigurations: + false, extensions: [], submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/CCDC-WebService']]]) + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Frontend_Tag}"]], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'RelativeTargetDirectory', + relativeTargetDir: 'CCDC-WebPortal']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/CCDC-WebPortal']]]) + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Etl_Tag}"]], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'RelativeTargetDirectory', + relativeTargetDir: 'CCDC-ETL']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/CCDC-ETL']]]) + + checkout( poll: false, + changelog:false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']]]) + + } + + } + + stage('Set Environment'){ + environment { + SLACK_URL = "${SLACK_URL}" + VERSION = "${params.Frontend_Tag}" + FE_VERSION = "${params.Frontend_Tag}" + BE_VERSION = "${params.Backend_Tag}" + ETL_VERSION = "${params.Etl_Tag}" + S3_FOLDER = "${params.S3Folder}" + } + steps { + script { + switch("${params.Environment}") { + case "dev": + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER')]){ + env.TIER = "dev" + s3_folder: "${params.S3Folder}" + } + break + case "qa": + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER')]){ + env.TIER = "qa" + s3_folder: "${params.S3Folder}" + } + break + default: + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER')]) { + env.TIER = "dev" + s3_folder: "${params.S3Folder}" + } + break + } + } + } + } + + stage('build'){ + environment { + VERSION = "${params.Frontend_Tag}" + FE_VERSION = "${params.Frontend_Tag}" + BE_VERSION = "${params.Backend_Tag}" + ETL_VERSION = "${params.Etl_Tag}" + S3_FOLDER = "${params.S3Folder}" + } + steps { + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER')]){ + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/ccdc-build.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + tier: "${params.Environment}", + s3_folder: "${params.S3Folder}" + ], + colorized: true) + } + } + } + } + stage('deploy'){ + environment { + VERSION = "${params.Frontend_Tag}" + FE_VERSION = "${params.Frontend_Tag}-${BUILD_NUMBER}" + BE_VERSION = "${params.Backend_Tag}-${BUILD_NUMBER}" + ETL_VERSION = "${params.Etl_Tag}-${BUILD_NUMBER}" + S3_FOLDER = "${params.S3Folder}" + SLACK_CHANNEL = "#system-alerts" + URL_DOMAIN = "bento-tools.org" + } + steps{ + node('cicd_microservice_ccdc') { + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/ccdc-deploy.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + tier: "${params.Environment}", + s3_folder: "${params.S3Folder}" + ], + colorized: true) + } + + } + } + } + + } + post { + + always { + + sendSlackMessage() + + } + + // success { + + // // script { + // // withCredentials([string(credentialsId: 'git_credential', variable: 'git_token')]) { + // // sh label: 'GIT-Tag Backend', script: '''#!/bin/bash + + // // gitURL=$(git config remote.origin.url | sed 's|^.*//||') + // // echo "Applying tag $Backend_Tag to URL: $gitURL" + // // git config user.email "jenkins@bento-tools.org" + // // git config user.name "Bento Jenkins" + // // git tag --no-sign -a ${Backend_Tag}-bento-${BUILD_NUMBER} -m "Jenkins tag: ${Backend_Tag}-${BUILD_NUMBER}" + // // git push https://${git_token}:x-oauth-basic@${gitURL} --tags + + // // ''' + + // // } + // // } + + // script { + // withCredentials([string(credentialsId: 'git_credential', variable: 'git_token')]) { + // sh label: 'GIT-Tag Frontend', script: '''#!/bin/bash + + // cd ${WORKSPACE}/CCDC-WebPortal + // gitURL=$(git config remote.origin.url | sed 's|^.*//||') + // echo "Applying tag $Frontend_Tag to URL: $gitURL" + // git config user.email "jenkins@bento-tools.org" + // git config user.name "Bento Jenkins" + // git tag --no-sign -a ${Frontend_Tag}-${BUILD_NUMBER} -m "Jenkins tag: ${Frontend_Tag}-${BUILD_NUMBER}" + // git push https://${git_token}:x-oauth-basic@${gitURL} --tags + + // ''' + + // } + // } + + // } + + cleanup { + + cleanWs() + + } + + } +} \ No newline at end of file diff --git a/jenkins/jobs/ccdc/Jenkinsfile_ETL b/jenkins/jobs/ccdc/Jenkinsfile_ETL new file mode 100644 index 000000000..a65c0e26f --- /dev/null +++ b/jenkins/jobs/ccdc/Jenkinsfile_ETL @@ -0,0 +1,178 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'cicd_microservice_ccdc' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'dev,qa,stage,prod' ) + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'main', + name: 'Etl_Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: ' https://github.com/CBIIT/CCDC-ETL') + string(defaultValue: "etl-digest-files", + description: 'S3 Folder to load data from', + name: 'S3Folder') + extendedChoice( + name: 'Deployment_Type', + defaultValue: 're-depployment', + description: 'Choose the type fo deployment', + type: 'PT_SINGLE_SELECT', + value: 'initial-setup,re-deployment' ) + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Etl_Tag}"]], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'RelativeTargetDirectory', + relativeTargetDir: 'CCDC-ETL']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/CCDC-ETL']]]) + + checkout( poll: false, + changelog:false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']]]) + + } + + } + + stage('Set Environment'){ + environment { + SLACK_URL = "${SLACK_URL}" + ETL_VERSION = "${params.Etl_Tag}" + S3_FOLDER = "${params.S3Folder}" + DEPLOYMENT_TYPE = "${params.Deployment_Type}" + } + steps { + script { + switch("${params.Environment}") { + case "dev": + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER')]){ + env.TIER = "dev" + } + break + case "qa": + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER')]){ + env.TIER = "qa" + } + break + default: + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER')]) { + env.TIER = "dev" + } + break + } + } + } + } + + stage('build'){ + environment { + VERSION = "${params.Etl_Tag}" + ETL_VERSION = "${params.Etl_Tag}" + S3_FOLDER = "${params.S3Folder}" + } + steps { + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER')]){ + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/ccdc-build-etl.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + tier: "${params.Environment}", + s3_folder: "${params.S3Folder}", + deployment_type: "${params.Deployment_Type}", + stack_name: "ccdc", + rds_port: "{{ lookup('aws_ssm', 'rds_port', region='us-east-1' ) }}", + rds_user: "{{ lookup('aws_ssm', 'rds_user', region='us-east-1' ) }}", + rds_password: "{{ lookup('aws_ssm', '{{stack_name}}-{{tier}}-rds-password', region='us-east-1' ) }}", + es_host: "{{ lookup('aws_ssm', '{{stack_name}}-{{tier}}-es-host', region='us-east-1' ) }}", + es_port: "{{ lookup('aws_ssm', 'es_port', region='us-east-1' ) }}", + digest_file_folder: "{{ lookup('env','S3_FOLDER') }}" + ], + colorized: true) + } + } + } + } + + } + post { + + always { + + sendSlackMessage() + + } + + cleanup { + + cleanWs() + + } + + } +} \ No newline at end of file diff --git a/jenkins/jobs/ccdc/Jenkinsfile_deploy b/jenkins/jobs/ccdc/Jenkinsfile_deploy new file mode 100644 index 000000000..214771021 --- /dev/null +++ b/jenkins/jobs/ccdc/Jenkinsfile_deploy @@ -0,0 +1,472 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'cicd_microservice' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'sandbox,dev,test,qa,perf,icdc,prod' ) + extendedChoice( + name: 'Region', + defaultValue: 'us-east-1', + description: 'Choose aws region to build', + type: 'PT_SINGLE_SELECT', + value: 'us-east-1,us-west-1' ) + booleanParam( + defaultValue: true, + description: 'Use the Redis cache', + name: 'Use_Redis') + booleanParam( + defaultValue: true, + description: 'Flush the Redis cache', + name: 'Flush_Redis') + booleanParam( + defaultValue: true, + description: 'Enable the Elasticsearch filter', + name: 'Enable_ES_Filter') + booleanParam( + defaultValue: true, + description: 'Reload Elasticsearch data', + name: 'Reload_ES_Data') + string( + defaultValue: '', + description: 'The set of parameters to use for test automation - if left blank no tests will be run', + name: 'Test_Params') + +// gitParameter(branchFilter: 'origin/(.*)', +// defaultValue: 'integration', +// name: 'Frontend_Image', +// type: 'PT_BRANCH_TAG', +// quickFilterEnabled: false, +// selectedValue: 'DEFAULT', +// sortMode: 'ASCENDING_SMART', +// tagFilter: '*', +// useRepository: 'https://github.com/CBIIT/bento-frontend') +// +// The Frontend_Image parameter has been replaced in this job by a GUI parameter due to the fact that type "active choice reactive parameter" is not supported. +// The GUI definition for this parameter should follow Environment and use the following values: +// +// Script: +// env_value = Environment +// def gettags = ("git ls-remote -h -t https://github.com/CBIIT/bento-frontend.git").execute() +// def tags = gettags.text.readLines().collect { it.split()[1].replaceAll('refs/heads/', '').replaceAll('refs/tags/', '').replaceAll("\\^\\{\\}", '')} +// List tagnames = tags.collect{ '' + it + '' } +// +// if(env_value.equalsIgnoreCase("dev")) { +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("integration")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// } else { +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("master")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// } +// +// return tagnames +// +// Fallback Script: +// return ['integration'] +// +// NOTE: this sets the default frontend branch to "integration" and is required for builds triggered from SCM polling. +// +// Referenced parameters: +// Environment + +// gitParameter(branchFilter: 'origin/(.*)', +// defaultValue: 'Integration', +// name: 'Backend_Image', +// type: 'PT_BRANCH_TAG', +// quickFilterEnabled: false, +// selectedValue: 'DEFAULT', +// sortMode: 'ASCENDING_SMART', +// tagFilter: '*', +// useRepository: 'https://github.com/CBIIT/bento-backend') +// +// The Backend_Image parameter has been replaced in this job by a GUI parameter due to the fact that type "active choice reactive parameter" is not supported. +// The GUI definition for this parameter should follow Frontend_Image and use the following values: +// +// Script: +// front_tag = Frontend_Image +// def gettags = ("git ls-remote -h -t https://github.com/CBIIT/bento-backend.git").execute() +// def tags = gettags.text.readLines().collect { it.split()[1].replaceAll('refs/heads/', '').replaceAll('refs/tags/', '').replaceAll("\\^\\{\\}", '')} +// List tagnames = tags.collect{ '' + it + '' } +// +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase(front_tag)) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("Integration")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// +// return tagnames +// +// Fallback Script: +// return ['Integration'] +// +// NOTE: this sets the default backend branch to "Integration" and is required for builds triggered from SCM polling. The branch defined here should math the default for Frontend_Image +// +// Referenced parameters: +// Frontend_Image + + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ +// stage('checkout'){ +// steps { +// git branch: "${params.Backend_Image}", +// url: 'https://github.com/CBIIT/bento-backend' +// +// checkout([$class: 'GitSCM', +// branches: [[name: "${params.Backend_Image}"]], +// doGenerateSubmoduleConfigurations: +// false, extensions: [], submoduleCfg: [], +// userRemoteConfigs: +// [[url: 'https://github.com/CBIIT/bento-backend']]]) +// +// dir('bento-frontend'){ +// git branch: "${params.Frontend_Image}", +// url: 'https://github.com/CBIIT/bento-frontend' +// } +// +// checkout([$class: 'GitSCM', +// branches: [[name: "${params.Frontend_Image}"]], +// doGenerateSubmoduleConfigurations: false, +// extensions: [[$class: 'RelativeTargetDirectory', +// relativeTargetDir: 'bento-frontend']], +// submoduleCfg: [], +// userRemoteConfigs: +// [[url: 'https://github.com/CBIIT/bento-frontend']]]) +// +// checkout( changelog:false, +// poll: false, +// scm: [$class: 'GitSCM', +// branches: [[name: '*/master']], +// doGenerateSubmoduleConfigurations: false, +// extensions: [[$class: 'DisableRemotePoll'], +// [$class: 'PathRestriction', excludedRegions: '*'], +// [$class: 'RelativeTargetDirectory', +// relativeTargetDir: 'icdc-devops']], +// submoduleCfg: [], +// userRemoteConfigs: +// [[url: 'https://github.com/CBIIT/icdc-devops.git']] +// ]) +// +// } +// +// } + stage('Set Environment'){ + environment { + SLACK_URL = "${SLACK_URL}" + VERSION = "${params.Frontend_Image}" + } + steps { + + script { + currentBuild.displayName = "Tag: ${VERSION} Environment: ${params.Environment}" + } + + script { + switch("${params.Environment}") { + case "dev": + withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER'), + string(credentialsId: 'es_dev_host', variable: 'ES_HOST'), + usernamePassword(credentialsId: 'dev_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_DEV_IP}" + env.BEARER = "${BEARER}" + env.TIER = "dev" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + env.ES_HOST = "${ES_HOST}" + + } + break + case "qa": + withCredentials([string(credentialsId: 'qa_bearer', variable: 'BEARER'), + string(credentialsId: 'es_qa_host', variable: 'ES_HOST'), + usernamePassword(credentialsId: 'qa_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_QA_IP}" + env.BEARER = "${BEARER}" + env.TIER = "qa" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + env.ES_HOST = "${ES_HOST}" + + } + break + case "perf": + withCredentials([string(credentialsId: 'perf_bearer', variable: 'BEARER'), + string(credentialsId: 'es_perf_host', variable: 'ES_HOST'), + usernamePassword(credentialsId: 'perf_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_PERF_IP}" + env.BEARER = "${BEARER}" + env.TIER = "perf" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + // UPDATE THIS TO USE CREDENTIALS WHEN PERF ES IS ENABLED + env.ES_HOST = "${ES_HOST}" + + } + break + case "prod": + withCredentials([string(credentialsId: 'prod_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'prod_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_PROD_IP}" + env.BEARER = "${BEARER}" + env.TIER = "prod" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + env.SLACK_CHANNEL = "#system-alerts-prod" + + } + break + case "icdc": + withCredentials([string(credentialsId: 'icdc_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'icdc_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_ICDC_IP}" + env.BEARER = "${BEARER}" + env.TIER = "icdc" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + default: + withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'dev_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_DEV_IP}" + env.BEARER = "${BEARER}" + env.TIER = "dev" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + } + } + } + } + + stage('clear redis cache'){ + agent { label 'data-loader' } + when { + expression { params.Flush_Redis } + } + + environment { + TIER = "${params.Environment}" + } + + steps{ + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/redis.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + + stage('deploy'){ + environment { + SLACK_URL = "${SLACK_URL}" + TIER = "${params.Environment}" + VERSION = "${params.Frontend_Image}" + FE_VERSION = "${params.Frontend_Image}" + BE_VERSION = "${params.Backend_Image}" + SLACK_CHANNEL = "#system-alerts" + URL_DOMAIN = "bento-tools.org" + } + steps{ + node('cicd_microservice') { + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/bento-deploy.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + enable_redis: "${params.Use_Redis}", + enable_es_filter: "${params.Enable_ES_Filter}", + ], + colorized: true) + } + + script { + + sh label: 'Env-Updates', script: '''#!/bin/bash + + yum -y install python3 + + cd icdc-devops/monitoring + pip3 install -r requirements.txt + + ''' + + withCredentials([string(credentialsId: 'newrelic_api_key', variable: 'API_KEY')]) { + sh "python3 $WORKSPACE/icdc-devops/monitoring/releases/add_apm_release.py -p bento -t $TIER -v $FE_VERSION/$BE_VERSION -k $API_KEY" + sh "python3 $WORKSPACE/icdc-devops/monitoring/monitor_update.py -p bento -t $TIER -k $API_KEY" + sh "python3 $WORKSPACE/icdc-devops/monitoring/dashboards/add_tier_dashboards.py -p bento -t $TIER -k $API_KEY" + } + + } + + } + } + } + + stage('update database monitoring agents'){ + steps{ + node('data-loader') { + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + echo "Recreating inventory file" + sh "cp ${WORKSPACE}/icdc-devops/ansible/hosts ${WORKSPACE}/icdc-devops/ansible/hosts.bak" + sh "echo [agent_setup] >> ${WORKSPACE}/icdc-devops/ansible/hosts" + sh "echo ${NEO4J_IP} ansible_ssh_private_key_file=/home/bento/.ssh/devops ansible_ssh_user=bento >> ${WORKSPACE}/icdc-devops/ansible/hosts" + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/agent-setup.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + env: "${params.Environment}", + app_type: 'database', + app_name: 'neo4j', + project: 'bento', + log_path: '/var/log/neo4j/*.log', + ], + colorized: true) + } + } + } + } + + } + post { + always { + + sendSlackMessage() + + } + + success { + + script { + if ("${params.Test_Params}"?.trim()) { + echo 'Run Bento Katalon Tests' + def params = "[${params.Test_Params}]" + def list_params = evaluate(params) + build quietPeriod: 300, wait: false, job: 'Test_Automation/Katalon_prod', parameters: [gitParameter(name: 'Tag', value: list_params['Tag']), string(name: 'KatalonSuite', value: list_params['KatalonSuite']), extendedChoice(name: 'Profile', value: list_params['Profile']), string(name: 'EmailRecipients', value: list_params['Email'])] + } + } + + script { + if (params.Reload_ES_Data) { + echo 'Reload data to Elasticsearch' + String tag = "${params.Backend_Image}" + String branch = "${tag}".substring(0, "${tag}".lastIndexOf("-")) + "-bento" + "${tag}".substring("${tag}".lastIndexOf("-"), "${tag}".length()) + build job: 'Bento/_Data_Processing/BentoDataLoader_ES', parameters: [gitParameter(name: 'Dataloader_Branch', value: 'master'), gitParameter(name: 'Backend_Branch', value: "${branch}"), extendedChoice(name: 'Environment', value: "${params.Environment}")] + } + } + + } + + cleanup { + + cleanWs() + + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/ccdi/Jenkinsfile b/jenkins/jobs/ccdi/Jenkinsfile new file mode 100644 index 000000000..19f0f901f --- /dev/null +++ b/jenkins/jobs/ccdi/Jenkinsfile @@ -0,0 +1,421 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'cicd_microservice' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'sandbox,dev,test,qa,perf,icdc,prod' ) + extendedChoice( + name: 'Region', + defaultValue: 'us-east-1', + description: 'Choose aws region to build', + type: 'PT_SINGLE_SELECT', + value: 'us-east-1,us-west-1' ) + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'master', + name: 'Backend_Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/CCDI-Portal-Service') + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'master', + name: 'Frontend_Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/CCDI-Portal-WebPortal') + booleanParam( + defaultValue: true, + description: 'Flush the Redis cache', + name: 'Flush_Redis') + booleanParam( + defaultValue: true, + description: 'Enable the Redis filter', + name: 'Enable_Redis_Filter') + string( + defaultValue: 'redis-schema.graphql', + description: 'The file to use when loading redis schema', + name: 'Redis_Schema_File') + string( + defaultValue: 'redis-filter-config.bento.yaml', + description: 'The file to use when loading redis queries', + name: 'Redis_Queries_File') + string( + defaultValue: 'test-queries.1k.bento.yaml', + description: 'The file to use when loading test queries', + name: 'Test_Queries_File') + + + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + +// git branch: "${params.Backend_Tag}", +// url: 'https://github.com/CBIIT/bento-backend' + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Backend_Tag}"]], + doGenerateSubmoduleConfigurations: false, + //extensions: [[$class: 'RelativeTargetDirectory', + extensions: [], submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/CCDI-Portal-Service']]]) + + + //checkout([$class: 'GitSCM', + // branches: [[name: "${params.Backend_Tag}"]], + // doGenerateSubmoduleConfigurations: + // false, extensions: [], submoduleCfg: [], + // userRemoteConfigs: + // [[url: 'https://github.com/CBIIT/CCDI-Portal-Service']]]) + //[[url: 'https://github.com/CBIIT/bento-backend']]]) + +// dir('bento-frontend'){ +// git branch: "${params.Frontend_Tag}", +// url: 'https://github.com/CBIIT/bento-frontend' +// } + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Frontend_Tag}"]], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'RelativeTargetDirectory', + relativeTargetDir: 'CCDI-Portal-WebPortal']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/CCDI-Portal-WebPortal']]]) + + checkout( poll: false, + changelog:false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']]]) + + } + + } + stage('Set Environment'){ + environment { + SLACK_URL = "${SLACK_URL}" + VERSION = "${params.Frontend_Tag}" + } + steps { + script { + + switch("${params.Environment}") { + case "dev": + withCredentials([string(credentialsId: 'ccdi_dev_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'ccdi_dev_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_CCDI_DEV_IP}" + env.BEARER = "${BEARER}" + env.TIER = "dev" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + case "qa": + withCredentials([string(credentialsId: 'ccdi_qa_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'ccdi_qa_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_CCDI_QA_IP}" + env.BEARER = "${BEARER}" + env.TIER = "qa" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + case "perf": + withCredentials([string(credentialsId: 'ccdi_stage_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'ccdi_stage_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_CCDI_STAGE_IP}" + env.BEARER = "${BEARER}" + env.TIER = "stage" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + case "prod": + withCredentials([string(credentialsId: 'ccdi_prod_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'ccdi_prod_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_CCDI_PROD_IP}" + env.BEARER = "${BEARER}" + env.TIER = "prod" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + + default: + withCredentials([string(credentialsId: 'ccdi_dev_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'ccdi_dev_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_CCDI_DEV_IP}" + env.BEARER = "${BEARER}" + env.TIER = "dev" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + } + } + } + } + + stage('build'){ + environment { + VERSION = "${params.Frontend_Tag}" + FE_VERSION = "${params.Frontend_Tag}" + BE_VERSION = "${params.Backend_Tag}" + } + steps { + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER')]){ + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/build-bento-ccdi.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + redis_schema_file: "${params.Redis_Schema_File}", + redis_init_queries_file: "${params.Redis_Queries_File}", + test_queries_file: "${params.Test_Queries_File}", + ], + colorized: true) + } + } + } + } + + // stage('clear redis cache'){ + // agent { label 'data-loader' } + // when { + // expression { params.Flush_Redis } + // } + + // environment { + // TIER = "${params.Environment}" + // } + + // steps{ + // checkout( changelog:false, + // poll: false, + // scm: [$class: 'GitSCM', + // branches: [[name: '*/master']], + // doGenerateSubmoduleConfigurations: false, + // extensions: [[$class: 'DisableRemotePoll'], + // [$class: 'PathRestriction', excludedRegions: '*'], + // [$class: 'RelativeTargetDirectory', + // relativeTargetDir: 'icdc-devops']], + // submoduleCfg: [], + // userRemoteConfigs: + // [[url: 'https://github.com/CBIIT/icdc-devops.git']] + // ]) + + // wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + // ansiblePlaybook( + // playbook: '${WORKSPACE}/icdc-devops/ansible/redis.yml', + // inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + // colorized: true) + // } + // } + // } + + stage('deploy'){ + + agent { + node { + label 'bastion-host' + } + } + environment { + VERSION = "${params.Frontend_Tag}" + FE_VERSION = "${params.Frontend_Tag}-${BUILD_NUMBER}" + BE_VERSION = "${params.Backend_Tag}-${BUILD_NUMBER}" + } + + steps{ + /**node('cicd_microservice') { + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: 'master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) **/ + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER'), + file(credentialsId: 'ccdi_ansible_host_file', variable: 'host_file'), + ]){ + sh "cp ${host_file} ${WORKSPACE}/ansible/hosts" + ansiblePlaybook( + playbook: '${WORKSPACE}/ansible/deploy-bento-ccdi.yml', + inventory: '${WORKSPACE}/ansible/hosts', + credentialsId: 'server_ssh_key', + extraVars: [ + enable_redis_filter: "${params.Enable_Redis_Filter}", + ], + become: true, + hostKeyChecking: false, + colorized: true) + } + } + } + } + + // stage('update database monitoring agents'){ + // steps{ + // node('data-loader') { + // checkout( changelog:false, + // poll: false, + // scm: [$class: 'GitSCM', + // branches: [[name: '*/master']], + // doGenerateSubmoduleConfigurations: false, + // extensions: [[$class: 'DisableRemotePoll'], + // [$class: 'PathRestriction', excludedRegions: '*'], + // [$class: 'RelativeTargetDirectory', + // relativeTargetDir: 'icdc-devops']], + // submoduleCfg: [], + // userRemoteConfigs: + // [[url: 'https://github.com/CBIIT/icdc-devops.git']] + // ]) + + // echo "Recreating inventory file" + // sh "cp ${WORKSPACE}/icdc-devops/ansible/hosts ${WORKSPACE}/icdc-devops/ansible/hosts.bak" + // sh "echo [agent_setup] >> ${WORKSPACE}/icdc-devops/ansible/hosts" + // sh "echo ${NEO4J_IP} ansible_ssh_private_key_file=/home/bento/.ssh/devops ansible_ssh_user=bento >> ${WORKSPACE}/icdc-devops/ansible/hosts" + // wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + // ansiblePlaybook( + // playbook: '${WORKSPACE}/icdc-devops/ansible/agent-setup.yml', + // inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + // extraVars: [ + // env: "${params.Environment}", + // app_type: 'database', + // app_name: 'neo4j', + // project: 'bento', + // log_path: '/var/lib/neo4j/logs/*.log', + // ], + // colorized: true) + // } + // } + // } + // } + + } + post { + + always { + + sendSlackMessage() + + } + + success { + + script { + withCredentials([usernamePassword(credentialsId: 'github_user', passwordVariable: 'git_password', usernameVariable: 'git_user')]) { + sh label: 'GIT-Tag Backend', script: '''#!/bin/bash + + gitURL=$(git config remote.origin.url | sed 's|^.*//||') + echo "Applying tag $Backend_Tag to URL: $gitURL" + git config user.email "jenkins@bento-tools.org" + git config user.name "Bento Jenkins" + git tag --no-sign -a ${Backend_Tag}-bento-${BUILD_NUMBER} -m "Jenkins tag: ${Backend_Tag}-${BUILD_NUMBER}" + git push https://${git_user}:${git_password}@${gitURL} --tags + + ''' + + } + } + + script { + withCredentials([usernamePassword(credentialsId: 'github_user', passwordVariable: 'git_password', usernameVariable: 'git_user')]) { + sh label: 'GIT-Tag Frontend', script: '''#!/bin/bash + + cd ${WORKSPACE}/bento-frontend + gitURL=$(git config remote.origin.url | sed 's|^.*//||') + echo "Applying tag $Frontend_Tag to URL: $gitURL" + git config user.email "jenkins@bento-tools.org" + git config user.name "Bento Jenkins" + git tag --no-sign -a ${Frontend_Tag}-${BUILD_NUMBER} -m "Jenkins tag: ${Frontend_Tag}-${BUILD_NUMBER}" + git push https://${git_user}:${git_password}@${gitURL} --tags + + ''' + + } + } + + } + + cleanup { + + cleanWs() + + } + + } +} \ No newline at end of file diff --git a/jenkins/jobs/ccdi/Jenkinsfile_deploy b/jenkins/jobs/ccdi/Jenkinsfile_deploy new file mode 100644 index 000000000..cf15629e5 --- /dev/null +++ b/jenkins/jobs/ccdi/Jenkinsfile_deploy @@ -0,0 +1,230 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'cicd_microservice' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'sandbox,dev,test,qa,perf,icdc,prod' ) + extendedChoice( + name: 'Region', + defaultValue: 'us-east-1', + description: 'Choose aws region to build', + type: 'PT_SINGLE_SELECT', + value: 'us-east-1,us-west-1' ) + booleanParam( + defaultValue: true, + description: 'Flush the Redis cache', + name: 'Flush_Redis') + booleanParam( + defaultValue: true, + description: 'Enable the Redis filter', + name: 'Enable_Redis_Filter') + + + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + + stage('Set Environment'){ + environment { + SLACK_URL = "${SLACK_URL}" + VERSION = "${params.Frontend_Image}" + } + steps { + + script { + currentBuild.displayName = "Tag: ${VERSION} Environment: ${params.Environment}" + } + + script { + switch("${params.Environment}") { + case "dev": + withCredentials([string(credentialsId: 'ccdi_dev_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'ccdi_dev_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_CCDI_DEV_IP}" + env.BEARER = "${BEARER}" + env.TIER = "dev" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + case "qa": + withCredentials([string(credentialsId: 'ccdi_qa_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'ccdi_qa_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_CCDI_QA_IP}" + env.BEARER = "${BEARER}" + env.TIER = "qa" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + case "perf": + withCredentials([string(credentialsId: 'ccdi_stage_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'ccdi_stage_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_CCDI_STAGE_IP}" + env.BEARER = "${BEARER}" + env.TIER = "stage" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + case "prod": + withCredentials([string(credentialsId: 'ccdi_prod_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'ccdi_prod_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_CCDI_PROD_IP}" + env.BEARER = "${BEARER}" + env.TIER = "prod" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + + default: + withCredentials([string(credentialsId: 'ccdi_dev_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'ccdi_dev_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_CCDI_DEV_IP}" + env.BEARER = "${BEARER}" + env.TIER = "dev" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + } + } + } + } + + stage('clear redis cache'){ + agent { label 'data-loader' } + when { + expression { params.Flush_Redis } + } + + environment { + TIER = "${params.Environment}" + } + + steps{ + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/redis.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + + stage('deploy'){ + environment { + SLACK_URL = "${SLACK_URL}" + TIER = "${params.Environment}" + VERSION = "${params.Frontend_Image}" + FE_VERSION = "${params.Frontend_Image}" + BE_VERSION = "${params.Backend_Image}" + } + steps{ + node('cicd_microservice') { + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/deploy-bento-ccdi.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + enable_redis_filter: "${params.Enable_Redis_Filter}", + ], + colorized: true) + } + } + } + } + + } + post { + always { + + sendSlackMessage() + + } + + cleanup { + + cleanWs() + + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/cloudone-ctdc/Jenkinsfile b/jenkins/jobs/cloudone-ctdc/Jenkinsfile new file mode 100644 index 000000000..baf039f7d --- /dev/null +++ b/jenkins/jobs/cloudone-ctdc/Jenkinsfile @@ -0,0 +1,327 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${ICDC_SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'commons-docker-ncias-p2236-v' + // label 'docker-maven' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'sandbox,dev,test,qa,stage,prod') + booleanParam( + defaultValue: true, + description: 'Flush the Redis cache', + name: 'Flush_Redis') + booleanParam( + defaultValue: true, + description: 'Enable the Redis filter', + name: 'Enable_Redis_Filter') + string( + defaultValue: 'redis-schema.graphql', + description: 'The file to use when loading redis schema', + name: 'Redis_Schema_File') + string( + defaultValue: 'redis-filter-config.bento.yaml', + description: 'The file to use when loading redis queries', + name: 'Redis_Queries_File') + string( + defaultValue: 'test-queries.1k.bento.yaml', + description: 'The file to use when loading test queries', + name: 'Test_Queries_File') + string( + defaultValue: '', + description: 'The set of parameters to use for test automation - if left blank no tests will be run', + name: 'Test_Params') + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Backend_Tag}"]], + doGenerateSubmoduleConfigurations: + false, extensions: [], submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/bento-backend']]]) + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Frontend_Tag}"]], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'RelativeTargetDirectory', + relativeTargetDir: 'ctdc-frontend']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/bento-ctdc-frontend']]]) + + checkout( poll: false, + changelog:false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']]]) + + } + + } + stage('Set Environment'){ + environment { + ICDC_SLACK_URL = "${ICDC_SLACK_URL}" + } + steps { + + script { + switch("${params.Environment}") { + case "dev": + withCredentials([usernamePassword(credentialsId: 'neo4j_ctdc_dev_cred', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_ICDC_DEV_IP}" + env.TIER = "${params.Environment}" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + case "qa": + withCredentials([usernamePassword(credentialsId: 'neo4j_ctdc_qa_cred', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_ICDC_QA_IP}" + env.TIER = "${params.Environment}" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + case "stage": + withCredentials([usernamePassword(credentialsId: 'neo4j_ctdc_stage_cred', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_ICDC_STAGE_IP}" + env.TIER = "${params.Environment}" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + case "prod": + withCredentials([usernamePassword(credentialsId: 'neo4j_ctdc_prod_cred', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_ICDC_PROD_IP}" + env.TIER = "${params.Environment}" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + default: + withCredentials([usernamePassword(credentialsId: 'neo4j_ctdc_dev_cred', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_ICDC_DEV_IP}" + env.TIER = "${params.Environment}" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + } + } + } + } + stage('build'){ + environment{ + FE_VERSION = "${params.Frontend_Tag}-${BUILD_NUMBER}" + BE_VERSION = "${params.Backend_Tag}-${BUILD_NUMBER}" + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER'), + file(credentialsId: 'ansible_host_file', variable: 'host_file'), + string(credentialsId: 'fence-id', variable: 'FENCE_ID'), + string(credentialsId: 'docker_host', variable: 'DOCKER_HOST'), + string(credentialsId: 'tls_hostname', variable: 'TLS_HOSTNAME') + ]) + { + sh "cp ${host_file} ${WORKSPACE}/icdc-devops/ansible/hosts" + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/build-cloudone-ctdc.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + redis_schema_file: "${params.Redis_Schema_File}", + redis_init_queries_file: "${params.Redis_Queries_File}", + test_queries_file: "${params.Test_Queries_File}", + project: "ctdc" + ], + colorized: true + ) + } + } + } + } + + stage('deploy'){ + environment { + FE_VERSION = "${params.Frontend_Tag}-${BUILD_NUMBER}" + BE_VERSION = "${params.Backend_Tag}-${BUILD_NUMBER}" + } + + steps{ + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER'), + string(credentialsId: 'newrelic_license_key', variable: 'NEWRELIC_LIC_KEY'), + string(credentialsId: 'sumo_access_id', variable: 'SUMO_ACCESS_ID'), + string(credentialsId: 'sumo_access_key', variable: 'SUMO_ACCESS_KEY'), + string(credentialsId: 'bento_syslog_host', variable: 'SYSLOG_HOST'), + ]){ + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/deploy-cloudone-ctdc.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + credentialsId: 'commonsdocker', + extraVars: [ + tier: "${params.Environment}", + release: "${params.Release}", + project: 'ctdc', + frontend_version: "${params.Frontend_Tag}-${BUILD_NUMBER}", + backend_version: "${params.Backend_Tag}-${BUILD_NUMBER}", + backend_git_tag: "${params.Backend_Tag}", + frontend_git_tag: "${params.Frontend_Tag}", + enable_redis_filter: "${params.Enable_Redis_Filter}" + ], + become: true, + hostKeyChecking: false, + colorized: true) + } + + + } + } + } + + // stage('clear redis cache'){ + // when { + // expression { params.Flush_Redis } + // } + + // environment { + // TIER = "${params.Environment}" + // } + + // steps{ + // wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + + // script { + // sh label: 'database-hosts', script: '''#!/bin/bash + // echo "Creating inventory file" + // echo "[icdc]" > ${WORKSPACE}/icdc-devops/ansible/redis_hosts + // echo ${NEO4J_IP} >> ${WORKSPACE}/icdc-devops/ansible/redis_hosts + + // ''' + + // } + + // ansiblePlaybook( + // playbook: '${WORKSPACE}/icdc-devops/ansible/redis_icdc.yml', + // inventory: '${WORKSPACE}/icdc-devops/ansible/redis_hosts', + // credentialsId: 'commonsdocker', + // colorized: true) + // } + // } + // } + + } + post { + always { + sendSlackMessage() + } + // success { + + // script { + // withCredentials([string(credentialsId: 'git_credential_token', variable: 'git_token')]) { + // sh label: 'GIT-Tag Backend', script: '''#!/bin/bash + + // gitURL=$(git config remote.origin.url | sed 's|^.*//||') + // echo "Applying tag $Backend_Tag to URL: $gitURL" + // git config user.email "jenkins@bento-tools.org" + // git config user.name "Bento Jenkins" + // git tag --no-sign -a ${Backend_Tag}-icdc-${BUILD_NUMBER} -m "Jenkins tag: ${Backend_Tag}-${BUILD_NUMBER}" + // git push https://${git_token}:x-oauth-basic@${gitURL} --tags + + // ''' + + // } + // } + + // script { + // withCredentials([string(credentialsId: 'git_credential_token', variable: 'git_token')]) { + // sh label: 'GIT-Tag Frontend', script: '''#!/bin/bash + + // cd ${WORKSPACE}/icdc-frontend + // gitURL=$(git config remote.origin.url | sed 's|^.*//||') + // echo "Applying tag $Frontend_Tag to URL: $gitURL" + // git config user.email "jenkins@bento-tools.org" + // git config user.name "Bento Jenkins" + // git tag --no-sign -a ${Frontend_Tag}-${BUILD_NUMBER} -m "Jenkins tag: ${Frontend_Tag}-${BUILD_NUMBER}" + // git push https://${git_token}:x-oauth-basic@${gitURL} --tags + + // ''' + + // } + // } + + // script { + // if ("${params.Test_Params}"?.trim()) { + // echo 'Run Bento Katalon Tests' + // def params = "[${params.Test_Params}]" + // def list_params = evaluate(params) + // build quietPeriod: 300, wait: false, job: '_default/_lower/Test_Automation/Katalon_Prod', parameters: [gitParameter(name: 'Tag', value: list_params['Tag']), string(name: 'KatalonSuite', value: list_params['KatalonSuite']), extendedChoice(name: 'Profile', value: list_params['Profile']), string(name: 'EmailRecipients', value: list_params['Email'])] + // } + // } + + // } + cleanup { + + cleanWs() + + } + } +} diff --git a/jenkins/jobs/cloudone-ctdc/Jenkinsfile_deploy b/jenkins/jobs/cloudone-ctdc/Jenkinsfile_deploy new file mode 100644 index 000000000..569b536d7 --- /dev/null +++ b/jenkins/jobs/cloudone-ctdc/Jenkinsfile_deploy @@ -0,0 +1,393 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${ICDC_SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'commons-docker-ncias-p2236-v' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'demo,dev,qa,stage,prod' ) + booleanParam( + defaultValue: true, + description: 'Flush the Redis cache', + name: 'Flush_Redis') + booleanParam( + defaultValue: true, + description: 'Enable the Redis filter', + name: 'Enable_Redis_Filter') + string( + defaultValue: '', + description: 'The set of parameters to use for test automation - if left blank no tests will be run', + name: 'Test_Params') + +// gitParameter(branchFilter: 'origin/(.*)', +// defaultValue: 'integration', +// name: 'Frontend_Image', +// type: 'PT_BRANCH_TAG', +// quickFilterEnabled: false, +// selectedValue: 'DEFAULT', +// sortMode: 'ASCENDING_SMART', +// tagFilter: '*', +// useRepository: 'https://github.com/CBIIT/bento-frontend') +// +// The Frontend_Image parameter has been replaced in this job by a GUI parameter due to the fact that type "active choice reactive parameter" is not supported. +// The GUI definition for this parameter should follow Environment and use the following values: +// +// Script: +// env_value = Environment +// def gettags = ("git ls-remote -h -t https://github.com/CBIIT/bento-frontend.git").execute() +// def tags = gettags.text.readLines().collect { it.split()[1].replaceAll('refs/heads/', '').replaceAll('refs/tags/', '').replaceAll("\\^\\{\\}", '')} +// List tagnames = tags.collect{ '' + it + '' } +// +// if(env_value.equalsIgnoreCase("dev")) { +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("integration")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// } else { +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("master")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// } +// +// return tagnames +// +// Fallback Script: +// return ['integration'] +// +// NOTE: this sets the default frontend branch to "integration" and is required for builds triggered from SCM polling. +// +// Referenced parameters: +// Environment + +// gitParameter(branchFilter: 'origin/(.*)', +// defaultValue: 'Integration', +// name: 'Backend_Image', +// type: 'PT_BRANCH_TAG', +// quickFilterEnabled: false, +// selectedValue: 'DEFAULT', +// sortMode: 'ASCENDING_SMART', +// tagFilter: '*', +// useRepository: 'https://github.com/CBIIT/bento-backend') +// +// The Backend_Image parameter has been replaced in this job by a GUI parameter due to the fact that type "active choice reactive parameter" is not supported. +// The GUI definition for this parameter should follow Frontend_Image and use the following values: +// +// Script: +// front_tag = Frontend_Image +// def gettags = ("git ls-remote -h -t https://github.com/CBIIT/bento-backend.git").execute() +// def tags = gettags.text.readLines().collect { it.split()[1].replaceAll('refs/heads/', '').replaceAll('refs/tags/', '').replaceAll("\\^\\{\\}", '')} +// List tagnames = tags.collect{ '' + it + '' } +// +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase(front_tag)) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("Integration")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// +// return tagnames +// +// Fallback Script: +// return ['Integration'] +// +// NOTE: this sets the default backend branch to "Integration" and is required for builds triggered from SCM polling. The branch defined here should math the default for Frontend_Image +// +// Referenced parameters: +// Frontend_Image + + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + } + + } + stage('Set Environment'){ + environment { + ICDC_SLACK_URL = "${ICDC_SLACK_URL}" + VERSION = "${params.Frontend_Tag}" + } + steps { + + script { + currentBuild.displayName = "Tag: ${VERSION} Environment: ${params.Environment}" + } + + script { + switch("${params.Environment}") { + case "sandbox": + withCredentials([string(credentialsId: 'sandbox_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_SANDBOX_IP}" + env.BEARER = "${BEARER}" + env.TIER = "sandbox"env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + case "dev": + withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_ICDC_DEV_IP}" + env.BEARER = "${BEARER}" + env.TIER = "dev" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + case "qa": + withCredentials([string(credentialsId: 'qa_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_ICDC_QA_IP}" + env.BEARER = "${BEARER}" + env.TIER = "qa" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + case "stage": + withCredentials([string(credentialsId: 'stage_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_ICDC_STAGE_IP}" + env.BEARER = "${BEARER}" + env.TIER = "stage" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + case "prod": + withCredentials([string(credentialsId: 'prod_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_ICDC_PROD_IP}" + env.BEARER = "${BEARER}" + env.TIER = "prod" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + case "demo": + withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_DEMO_IP}" + env.BEARER = "${BEARER}" + env.TIER = "demo" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + default: + withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_ICDC_DEV_IP}" + env.BEARER = "${BEARER}" + env.TIER = "dev" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + } + } + } + } + stage('copy static content') { + when { + expression { + params.Environment == "prod" || params.Environment == "stage" + } + + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'git_credential', passwordVariable: 'GIT_PASSWORD', usernameVariable: 'GIT_USERNAME'), + ]) { + + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/git-copy.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true, + extraVars: [ project: "icdc"] + ) + } + } + } + } + + stage('deploy'){ + environment { + SLACK_URL = "${SLACK_URL}" + TIER = "${params.Environment}" + VERSION = "${params.Frontend_Tag}" + FE_VERSION = "${params.Frontend_Tag}" + BE_VERSION = "${params.Backend_Tag}" + } + + steps{ + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER'), + string(credentialsId: 'newrelic_license_key', variable: 'NEWRELIC_LIC_KEY'), + string(credentialsId: 'sumo_access_id', variable: 'SUMO_ACCESS_ID'), + string(credentialsId: 'sumo_access_key', variable: 'SUMO_ACCESS_KEY'), + string(credentialsId: 'bento_syslog_host', variable: 'SYSLOG_HOST'), + string(credentialsId: 'tls_hostname', variable: 'TLS_HOSTNAME'), + file(credentialsId: 'ansible_host_file', variable: 'host_file'), + ]){ + sh "cp ${host_file} ${WORKSPACE}/icdc-devops/ansible/hosts" + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/deploy-bento-icdc.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + credentialsId: 'commonsdocker', + extraVars: [ + tier: "${params.Environment}", + release: "${params.Release}", + project: 'icdc', + frontend_version: "${params.Frontend_Tag}", + backend_version: "${params.Backend_Tag}", + backend_git_tag: "${params.Backend_Tag}", + frontend_git_tag: "${params.Frontend_Tag}", + enable_redis_filter: "${params.Enable_Redis_Filter}" + ], + become: true, + hostKeyChecking: false, + colorized: true) + } + + + } + } + } + + stage('clear redis cache'){ + when { + expression { params.Flush_Redis } + } + + environment { + TIER = "${params.Environment}" + } + + steps{ + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + + script { + sh label: 'database-hosts', script: '''#!/bin/bash + echo "Creating inventory file" + echo "[icdc]" > ${WORKSPACE}/icdc-devops/ansible/redis_hosts + echo ${NEO4J_IP} >> ${WORKSPACE}/icdc-devops/ansible/redis_hosts + + ''' + + } + + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/redis_icdc.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/redis_hosts', + credentialsId: 'commonsdocker', + colorized: true) + } + } + } + + } + post { + always { + + sendSlackMessage() + + } + + success { + + script { + if ("${params.Test_Params}"?.trim()) { + echo 'Run Bento Katalon Tests' + def params = "[${params.Test_Params}]" + def list_params = evaluate(params) + build quietPeriod: 300, wait: false, job: '_default/_lower/Test_Automation/Katalon_Prod', parameters: [gitParameter(name: 'Tag', value: list_params['Tag']), string(name: 'KatalonSuite', value: list_params['KatalonSuite']), extendedChoice(name: 'Profile', value: list_params['Profile']), string(name: 'EmailRecipients', value: list_params['Email'])] + } + } + + } + + cleanup { + + cleanWs() + + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/cloudone-ctdc/Jenkinsfile_dev b/jenkins/jobs/cloudone-ctdc/Jenkinsfile_dev new file mode 100644 index 000000000..71f1a6b57 --- /dev/null +++ b/jenkins/jobs/cloudone-ctdc/Jenkinsfile_dev @@ -0,0 +1,212 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${ICDC_SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'commons-docker-ncias-p2236-v' + // label 'docker-maven' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'dev' ) + + + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Backend_Tag}"]], + doGenerateSubmoduleConfigurations: + false, extensions: [], submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/bento-backend']]]) + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Frontend_Tag}"]], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-frontend']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/bento-icdc-frontend']]]) + + checkout( poll: false, + changelog:false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']]]) + + } + + } + stage('Set Environment'){ + environment { + ICDC_SLACK_URL = "${ICDC_SLACK_URL}" + } + steps { + script { + switch("${params.Environment}") { + case "sandbox": + withCredentials([string(credentialsId: 'sandbox_bearer', variable: 'BEARER') + ]) { + env.NEO4J_IP = "${NEO4J_SANDBOX_IP}" + env.BEARER = "${BEARER}" + env.TIER = "sandbox" + + } + break + case "dev": + withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER') + ]) { + env.NEO4J_IP = "${NEO4J_ICDC_DEV_IP}" + env.BEARER = "${BEARER}" + env.TIER = "dev" + + } + break + case "qa": + withCredentials([string(credentialsId: 'qa_bearer', variable: 'BEARER') + ]) { + env.NEO4J_IP = "${NEO4J_ICDC_QA_IP}" + env.BEARER = "${BEARER}" + env.TIER = "qa" + } + break + case "stage": + withCredentials([string(credentialsId: 'stage_bearer', variable: 'BEARER') + ]) { + env.NEO4J_IP = "${NEO4J_ICDC_STAGE_IP}" + env.BEARER = "${BEARER}" + env.TIER = "stage" + + } + break + case "prod": + withCredentials([string(credentialsId: 'prod_bearer', variable: 'BEARER') + ]) { + env.NEO4J_IP = "${NEO4J_ICDC_PROD_IP}" + env.BEARER = "${BEARER}" + env.TIER = "prod" + + } + break + default: + withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER') + ]) { + env.NEO4J_IP = "${NEO4J_ICDC_DEV_IP}" + env.BEARER = "${BEARER}" + env.TIER = "dev" + + } + break + } + } + } + } + stage('build'){ + environment{ + FE_VERSION = "${params.Frontend_Tag}" + BE_VERSION = "${params.Backend_Tag}" + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER'), + file(credentialsId: 'ansible_host_file', variable: 'host_file'), + string(credentialsId: 'fence-id', variable: 'FENCE_ID'), + string(credentialsId: 'docker_host', variable: 'DOCKER_HOST'), + string(credentialsId: 'tls_hostname', variable: 'TLS_HOSTNAME') + ]) + { + sh "cp ${host_file} ${WORKSPACE}/icdc-devops/ansible/hosts" + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/build-bento-icdc.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + } + stage('deploy'){ + + steps{ + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER'), + string(credentialsId: 'newrelic_license_key', variable: 'NEWRELIC_LIC_KEY'), + string(credentialsId: 'sumo_access_id', variable: 'SUMO_ACCESS_ID'), + string(credentialsId: 'sumo_access_key', variable: 'SUMO_ACCESS_KEY'), + string(credentialsId: 'bento_syslog_host', variable: 'SYSLOG_HOST'), + string(credentialsId: 'docker_host', variable: 'DOCKER_HOST'), + ]){ + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/deploy-bento-icdc-dev.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + credentialsId: 'commonsdocker', + extraVars: [ + tier: "${params.Environment}"], + become: true, + hostKeyChecking: false, + colorized: true) + } + + + } + } + } + } +// post { +// always { +// +// sendSlackMessage() +// } +// } +} \ No newline at end of file diff --git a/jenkins/jobs/cloudone-ctdc/Jenkinsfile_stop_site b/jenkins/jobs/cloudone-ctdc/Jenkinsfile_stop_site new file mode 100644 index 000000000..45d8bc6a9 --- /dev/null +++ b/jenkins/jobs/cloudone-ctdc/Jenkinsfile_stop_site @@ -0,0 +1,112 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${ICDC_SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'commons-docker-ncias-p2236-v' + // label 'docker-maven' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'sandbox,dev,test,qa,stage,prod' ) + + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + + stage('checkout'){ + steps { + + checkout( poll: false, + changelog:false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']]]) + + } + + } + + stage('stop site'){ + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER'), + file(credentialsId: 'ansible_host_file', variable: 'host_file'), + string(credentialsId: 'fence-id', variable: 'FENCE_ID'), + string(credentialsId: 'docker_host', variable: 'DOCKER_HOST'), + string(credentialsId: 'tls_hostname', variable: 'TLS_HOSTNAME') + ]) + { + sh "cp ${host_file} ${WORKSPACE}/icdc-devops/ansible/hosts" + sh "echo ${TLS_HOSTNAME}" + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/stop-bento-icdc.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + credentialsId: 'commonsdocker', + extraVars: [ + tier: "${params.Environment}", + project: 'icdc' + ], + become: true, + hostKeyChecking: false, + colorized: true) + } + } + } + } + + } + post { + always { +// sendSlackMessage() + cleanWs() + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/cloudone-ecs/Jenkinsfile b/jenkins/jobs/cloudone-ecs/Jenkinsfile new file mode 100644 index 000000000..4621d24bf --- /dev/null +++ b/jenkins/jobs/cloudone-ecs/Jenkinsfile @@ -0,0 +1,154 @@ +//load shared library for slack notification +//@Library('shared-library')_ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "ICDC Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "icdc devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + +pipeline { + + agent { + node { + label 'slave-ncias-d2320-c' + } + } + parameters { + //listGitBranches(branchFilter: '.*', credentialsId: '', defaultValue: 'master', name: 'Tag', quickFilterEnabled: false, remoteURL: 'https://github.com/CBIIT/icdc-codebase', selectedValue: 'DEFAULT', sortMode: 'NONE', tagFilter: '.*', type: 'PT_BRANCH_TAG') + // gitParameter(branchFilter: '.*', credentialsId: '', defaultValue: 'master', name: 'Tag', quickFilterEnabled: false, remoteURL: 'https://github.com/CBIIT/icdc-codebase', selectedValue: 'DEFAULT', sortMode: 'NONE', tagFilter: '.*', type: 'PT_BRANCH_TAG') + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'Develop', + name: 'Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'NONE', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/icdc-codebase') + extendedChoice( + name: 'Environment', + defaultValue: 'ecs-dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'ecs-dev' ) + } + // triggers { + // pollSCM('H/15 * * * 1-5') + // } + options { + timestamps() + } + tools { + maven 'Default' + jdk 'Default' + } + stages { + stage('checkout'){ + steps { + checkout([$class: 'GitSCM', + branches: [[name: "${params.Tag}"]], + doGenerateSubmoduleConfigurations: + false, extensions: [], submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-codebase']]]) + + dir('icdc-devops'){ + git branch: 'master', + url: 'https://github.com/CBIIT/icdc-devops.git' + } + } + } + stage('Set Environment'){ + steps { + script { + switch("${params.Environment}") { + case "ecs-dev": + withCredentials([string(credentialsId: 'ecs_dev_bearer', variable: 'BEARER')]) { + env.NEO4J_IP = "${NEO4J_ECS_DEV_IP}" + env.BEARER = "${BEARER}" + env.TIER = "ecs-dev" + + } + break + default: + withCredentials([string(credentialsId: 'ecs_dev_bearer', variable: 'BEARER')]) { + env.NEO4J_IP = "${NEO4J_ECS_DEV_IP}" + env.BEARER = "${BEARER}" + env.TIER = "ecs-dev" + } + break + } + } + } + } + stage('Build') { + environment { + VERSION = "${params.Tag}" + ECS_DEV_URL = "caninecommons-ecs-dev.nci.nih.gov" + SLACK_URL = "${SLACK_ECS_ICDC_URL}" + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([string(credentialsId: 'ecs_dev_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'neo4j_ecs_dev_user', passwordVariable: 'NEO4J_PASSWORD', usernameVariable: 'NEO4J_USER'), + usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER'), + string(credentialsId: 'fence-id', variable: 'FENCE_ID'), + string(credentialsId: 'ecr_repo', variable: 'ECR'), + string(credentialsId: 'fence-credential', variable: 'FENCE_CREDENTIAL')]) { + + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/build-cloudone-ecs.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + } + stage('deploy'){ + steps{ + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([string(credentialsId: 'dev_alb_target_arn', variable: 'ALB_TARGET_ARN'), + string(credentialsId: 'dev_service_role_arn', variable: 'SERVICE_ROLE_ARN') + ]){ + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/deploy-cloudone-ecs.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + + } + } + } + } + post { + + always { + + sendSlackMessage() + } + } +} diff --git a/jenkins/jobs/cloudone/Jenkinsfile b/jenkins/jobs/cloudone/Jenkinsfile new file mode 100644 index 000000000..3057d2925 --- /dev/null +++ b/jenkins/jobs/cloudone/Jenkinsfile @@ -0,0 +1,224 @@ +//load shared library for slack notification +//@Library('shared-library')_ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "ICDC Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "icdc devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${ICDC_SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + +pipeline { + + agent { + node { + label 'docker-maven' + } + } + parameters { + //listGitBranches(branchFilter: '.*', credentialsId: '', defaultValue: 'master', name: 'Tag', quickFilterEnabled: false, remoteURL: 'https://github.com/CBIIT/icdc-codebase', selectedValue: 'DEFAULT', sortMode: 'NONE', tagFilter: '.*', type: 'PT_BRANCH_TAG') + // gitParameter(branchFilter: '.*', credentialsId: '', defaultValue: 'master', name: 'Tag', quickFilterEnabled: false, remoteURL: 'https://github.com/CBIIT/icdc-codebase', selectedValue: 'DEFAULT', sortMode: 'NONE', tagFilter: '.*', type: 'PT_BRANCH_TAG') + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'Develop', + name: 'Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'NONE', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/icdc-codebase') + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'sandbox,dev,qa,stage,prod' ) + } + // triggers { + // pollSCM('H/15 * * * 1-5') + // } + options { + timestamps() + } + tools { + maven 'Default' + jdk 'Default' + } + stages { + stage('Checkout') { + steps { + checkout([$class: 'GitSCM', branches: [[name: "${params.Tag}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[url: 'https://github.com/CBIIT/icdc-codebase']]]) + } + } + stage('Set Environment'){ + environment { + SANDBOX_URL = "k9dc.essential-dev.com" + DEV_URL = "caninecommons-dev.cancer.gov" + QA_URL = "caninecommons-qa.cancer.gov" + STAGE_URL = "caninecommons-stage.cancer.gov" + PROD_URL = "caninecommons.cancer.gov" + VERSION = "${params.Tag}" + } + steps { + script { + switch("${params.Environment}") { + case "sandbox": + withCredentials([file(credentialsId: 'sandbox_env_file', variable: 'secret_file')]) { + sh "cp ${secret_file} ${WORKSPACE}/src/main/frontend/.env" + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's,tag_version,${VERSION},g' .env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${SANDBOX_URL}/,g' application_example.properties" + env.TOMCAT01_IP = "${TOMCAT01_IP}" + env.TOMCAT02_IP = "${TOMCAT02_IP}" + env.NEO4J_IP = "${NEO4J_IP}" + } + break + case "dev": + withCredentials([file(credentialsId: 'dev_env_file', variable: 'secret_file')]) { + sh "cp ${secret_file} ${WORKSPACE}/src/main/frontend/.env" + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's,tag_version,${VERSION},g' .env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${DEV_URL}/,g' application_example.properties" + env.TOMCAT01_IP = "${TOMCAT01_DEV_IP}" + env.TOMCAT02_IP = "${TOMCAT02_DEV_IP}" + env.NEO4J_IP = "${NEO4J_DEV_IP}" + } + break + case "qa": + withCredentials([file(credentialsId: 'qa_env_file', variable: 'secret_file')]) { + sh "cp ${secret_file} ${WORKSPACE}/src/main/frontend/.env" + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's,tag_version,${VERSION},g' .env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${QA_URL}/,g' application_example.properties" + env.TOMCAT01_IP = "${TOMCAT01_QA_IP}" + env.TOMCAT02_IP = "${TOMCAT02_QA_IP}" + env.NEO4J_IP = "${NEO4J_QA_IP}" + } + break + case "stage": + withCredentials([file(credentialsId: 'stage_env_file', variable: 'secret_file')]) { + sh "cp ${secret_file} ${WORKSPACE}/src/main/frontend/.env" + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's,tag_version,${VERSION},g' .env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${STAGE_URL}/,g' application_example.properties" + env.TOMCAT01_IP = "${TOMCAT01_STAGE_IP}" + env.TOMCAT02_IP = "${TOMCAT02_STAGE_IP}" + env.NEO4J_IP = "${NEO4J_STAGE_IP}" + } + break + case "prod": + withCredentials([file(credentialsId: 'prod_env_file', variable: 'secret_file')]) { + sh "cp ${secret_file} ${WORKSPACE}/src/main/frontend/.env" + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's,tag_version,${VERSION},g' .env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${PROD_URL}/,g' application_example.properties" + env.TOMCAT01_IP = "${TOMCAT01_PROD_IP}" + env.TOMCAT02_IP = "${TOMCAT02_PROD_IP}" + env.NEO4J_IP = "${NEO4J_PROD_IP}" + } + break + default: + withCredentials([file(credentialsId: 'dev_env_file', variable: 'secret_file')]) { + sh "cp ${secret_file} ${WORKSPACE}/src/main/frontend/.env" + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's,tag_version,${VERSION},g' .env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${DEV_URL}/,g' application_example.properties" + env.TOMCAT01_IP = "${TOMCAT01_DEV_IP}" + env.TOMCAT02_IP = "${TOMCAT02_DEV_IP}" + env.NEO4J_IP = "${NEO4J_DEV_IP}" + } + break + } + } + } + } + stage('Build') { + environment { + ICDC_SLACK_URL = "${ICDC_SLACK_URL}" + } + steps { + withCredentials([string(credentialsId: 'authorization_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'neo4j_user', passwordVariable: 'NEO4J_PASSWORD', usernameVariable: 'NEO4J_USER'), + string(credentialsId: 'fence-id', variable: 'FENCE_ID'), + string(credentialsId: 'fence-credential', variable: 'FENCE_CREDENTIAL')]) { + sh '''export NODE_OPTIONS=--max-old-space-size=8000''' + sh '''export MAVEN_OPTS="-Xmx256m -XX:MaxPermSize=512m"''' + sh "cd ${WORKSPACE}/src/main/resources/ && mv application_example.properties application.properties" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's/localhost/${NEO4J_IP}/g' application.properties" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's/Basic 123456/${BEARER}/' application.properties" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's/password=${NEO4J_IP}/password=${NEO4J_PASSWORD}/' application.properties" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's/neo4j_username/${NEO4J_USER}/g' application.properties" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's/accessid/${FENCE_ID}/g' application.properties" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's/accesskey/${FENCE_CREDENTIAL}/g' application.properties" + + sh "mvn package -DskipTests" + + sh "mv target/ICDC-0.0.1.war target/ROOT.war" + } + } + } + stage('Deploy') { + when { + expression { + currentBuild.result == null || currentBuild.result == 'SUCCESS' + } + } + steps { + ansiColor('xterm') { + withCredentials([sshUserPrivateKey(credentialsId: 'commonsdocker', keyFileVariable: 'deployer_key', passphraseVariable: '', usernameVariable: 'deployer')]) { +sh label: '', script: ''' + +for server in $TOMCAT02_IP $TOMCAT01_IP; +do +pushd target +scp -i $deployer_key -o StrictHostKeyChecking=no ROOT.war $deployer@${server}: +popd +ssh -i $deployer_key -T -o StrictHostKeyChecking=no $deployer@${server} << EOF +sudo docker rm -f k9dc +cd /local/content/docker +sudo docker pull cbiitssrepo/bento-backend:release +sudo docker-compose up -d & +wait %1 +sleep 20 +docker cp ~/ROOT.war k9dc:/usr/local/tomcat/webapps +rm -rf ~/ROOT.war +EOF +done''' + } + } + } + } + stage('schema'){ + environment { + NEO4J_IP = "${NEO4J_IP}" + } + steps { + withCredentials([string(credentialsId: 'authorization_bearer', variable: 'BEARER')]){ + sh "curl -X POST http://${NEO4J_IP}:7474/graphql/idl/ -H 'Accept: application/json' -H 'Authorization: ${BEARER}' -d @src/main/resources/graphql/icdc.graphql" + } + } + } + } + post { + always { + + sendSlackMessage() + } + } +} diff --git a/jobs/icdc/pipeline.groovy b/jenkins/jobs/cloudone/pipeline.groovy similarity index 100% rename from jobs/icdc/pipeline.groovy rename to jenkins/jobs/cloudone/pipeline.groovy diff --git a/jenkins/jobs/ctdc-agents/Jenkinsfile b/jenkins/jobs/ctdc-agents/Jenkinsfile new file mode 100644 index 000000000..83ceae0b3 --- /dev/null +++ b/jenkins/jobs/ctdc-agents/Jenkinsfile @@ -0,0 +1,243 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${ICDC_SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'commons-docker-ncias-p2236-v' + } + } + + parameters { + + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'dev,qa' ) + +// The UI_Instances parameter has been replaced in this job by a GUI parameter due to the fact that type "active choice reactive parameter" is not supported. +// The GUI definition for this parameter should follow Environment and use the following values: +// +// Script: +// if (env.equals("dev")) +// { +// return ["nciws-d1092-c,nciws-d2001-c"] +// } +// else if (env.equals("qa")) +// { +// return ["nciws-q2024-c,nciws-q2025-c"] +// } +// else +// { +// return ["You must select a valid environment"] +// } +// +// Fallback Script: +// None +// +// Choice Type: +// Check Boxes +// +// Referenced parameters: +// Environment + +// The DB_Instances parameter has been replaced in this job by a GUI parameter due to the fact that type "active choice reactive parameter" is not supported. +// The GUI definition for this parameter should follow Environment and use the following values: +// +// Script: +// if (env.equals("dev")) +// { +// return ["ncias-d2224-c"] +// } +// else if (env.equals("qa")) +// { +// return ["ncias-q2251-c"] +// } +// else +// { +// return ["You must select a valid environment"] +// } +// +// Fallback Script: +// None +// +// Choice Type: +// Check Boxes +// +// Referenced parameters: +// Environment + + } + + options { + ansiColor('xterm') + } + + tools { + maven 'Default' + jdk 'Default' + } + + stages{ + stage('checkout'){ + steps { + + checkout( poll: false, + changelog:false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']]]) + + } + + } + + stage('DB Agents'){ + when { + expression { params.DB_Instances } + } + environment { + ICDC_SLACK_URL = "${ICDC_SLACK_URL}" + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + + withCredentials([string(credentialsId: 'sumo_access_id', variable: 'SUMO_ACCESS_ID'), + string(credentialsId: 'sumo_access_key', variable: 'SUMO_ACCESS_KEY'), + string(credentialsId: 'newrelic_license_key', variable: 'NEWRELIC_LICENSE_KEY') + ]) + { + + script { + sh label: 'database-hosts', script: '''#!/bin/bash + echo "Creating inventory file" + echo "[agent_setup]" > ${WORKSPACE}/icdc-devops/ansible/hosts + + if [[ $(wc -l < ${DB_Instances} | cut -d ',' -f2 ) -ge 0 ]];then + echo ${DB_Instances} | cut -d ',' -f1 >> ${WORKSPACE}/icdc-devops/ansible/hosts + echo ${DB_Instances} | cut -d ',' -f2 >> ${WORKSPACE}/icdc-devops/ansible/hosts + else + echo ${DB_Instances} >> ${WORKSPACE}/icdc-devops/ansible/hosts + fi + + ''' + + } + + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/agent-setup-icdc.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + credentialsId: 'commonsdocker', + extraVars: [ + env: "${params.Environment}", + app_type: 'db', + project: 'ctdc', + log_path: '/var/log/neo4j', + sumo_access_id: "${SUMO_ACCESS_ID}", + sumo_access_key: "${SUMO_ACCESS_KEY}", + newrelic_license_key: "${NEWRELIC_LICENSE_KEY}" + ], + colorized: true) + + } + } + } + } + + stage('UI Agents'){ + when { + expression { params.UI_Instances } + } + environment { + ICDC_SLACK_URL = "${ICDC_SLACK_URL}" + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + + withCredentials([string(credentialsId: 'sumo_access_id', variable: 'SUMO_ACCESS_ID'), + string(credentialsId: 'sumo_access_key', variable: 'SUMO_ACCESS_KEY'), + string(credentialsId: 'newrelic_license_key', variable: 'NEWRELIC_LICENSE_KEY') + ]) + { + + script { + sh label: 'ui-hosts', script: '''#!/bin/bash + echo "Creating inventory file" + echo "[agent_setup]" > ${WORKSPACE}/icdc-devops/ansible/hosts + + if [[ $(wc -l < ${UI_Instances} | cut -d ',' -f2 ) -ge 0 ]];then + echo ${UI_Instances} | cut -d ',' -f1 >> ${WORKSPACE}/icdc-devops/ansible/hosts + echo ${UI_Instances} | cut -d ',' -f2 >> ${WORKSPACE}/icdc-devops/ansible/hosts + else + echo ${UI_Instances} >> ${WORKSPACE}/icdc-devops/ansible/hosts + fi + + ''' + + } + + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/agent-setup-icdc.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + credentialsId: 'commonsdocker', + extraVars: [ + env: "${params.Environment}", + app_type: 'app', + project: 'ctdc', + log_path: '/local/content/k9dc', + sumo_access_id: "${SUMO_ACCESS_ID}", + sumo_access_key: "${SUMO_ACCESS_KEY}", + newrelic_license_key: "${NEWRELIC_LICENSE_KEY}" + ], + colorized: true) + } + + } + } + } + + } + post { + always { + sendSlackMessage() + cleanWs() + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/ctdc-data-loader/Jenkinsfile b/jenkins/jobs/ctdc-data-loader/Jenkinsfile new file mode 100644 index 000000000..87b5be2e1 --- /dev/null +++ b/jenkins/jobs/ctdc-data-loader/Jenkinsfile @@ -0,0 +1,171 @@ +pipeline { + agent { + node { + label 'slave-ncias-d2320-c' + } + } + parameters { + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'master', + name: 'Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/icdc-dataloader') + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'demo,dev,qa,stage,prod' ) + string(defaultValue: "", + description: 'S3 Folder to load data from', + name: 'S3Folder') + extendedChoice( + defaultValue: 'no', + name: 'WipeDB', + description: 'Choose yes to wipe DB', + quoteValue: false, + multiSelectDelimiter: ',', + value: 'no,yes', + type: 'PT_RADIO') + extendedChoice( + defaultValue: 'no', + name: 'CheatMode', + description: 'Bypass Data Validation', + quoteValue: false, + multiSelectDelimiter: ',', + value: 'no,yes', + type: 'PT_RADIO') + extendedChoice( + defaultValue: 'false', + name: 'SplitTransactions', + description: 'Choose true to the Split Transactions', + quoteValue: false, + multiSelectDelimiter: ',', + value: 'false,true', + type: 'PT_SINGLE_SELECT') + } + // options { + // ansiColor('xterm') + // } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + checkout([$class: 'GitSCM', + branches: [[name: "${params.Tag}"]], + doGenerateSubmoduleConfigurations: + false, extensions: [], submoduleCfg: [], + recursiveSubmodules: true, + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-dataloader']]]) + sh 'git submodule update --init' + dir('icdc-devops'){ + git branch: 'master', + url: 'https://github.com/CBIIT/icdc-devops.git' + } + dir('ctdc-model'){ + git branch: 'master', + url: 'https://github.com/CBIIT/ctdc-model' + } + } + + } + stage('set-environment'){ + steps { + script { + switch("${params.Environment}") { + case "demo": + withCredentials([string(credentialsId: 'neo4j_ctdc_dev_cred', variable: 'neo4j_password')]) { + env.NEO4J_IP = "${NEO4J_DEMO_IP}" + env.S3_FOLDER = "${params.S3Folder}" + env.TIER = "${params.Environment}" + env.WIPE_DB = "${params.WipeDB}" + env.NEO4J_PASSWORD = "${neo4j_password}" + env.CHEAT_MODE = "${params.CheatMode}" + env.PROJECT = "ctdc" + env.SPLIT = "${params.SplitTransactions}" + } + break + case "dev": + withCredentials([string(credentialsId: 'neo4j_ctdc_dev_cred', variable: 'neo4j_password')]) { + //env.NEO4J_IP = "${NEO4J_CTDC_IP}" + env.NEO4J_IP = "ncias-d2267-c" + env.S3_FOLDER = "${params.S3Folder}" + env.TIER = "${params.Environment}" + env.WIPE_DB = "${params.WipeDB}" + env.NEO4J_PASSWORD = "${neo4j_password}" + env.CHEAT_MODE = "${params.CheatMode}" + env.PROJECT = "ctdc" + env.SPLIT = "${params.SplitTransactions}" + } + break + case "qa": + withCredentials([string(credentialsId: 'neo4j_ctdc_qa_cred', variable: 'neo4j_password')]) { + env.NEO4J_IP = "${NEO4J_QA_IP}" + env.S3_FOLDER = "${params.S3Folder}" + env.TIER = "${params.Environment}" + env.WIPE_DB = "${params.WipeDB}" + env.NEO4J_PASSWORD = "${neo4j_password}" + env.CHEAT_MODE = "${params.CheatMode}" + env.PROJECT = "ctdc" + env.SPLIT = "${params.SplitTransactions}" + } + break + case "stage": + withCredentials([string(credentialsId: 'neo4j_ctdc_stage_cred', variable: 'neo4j_password')]) { + env.NEO4J_IP = "${NEO4J_STAGE_IP}" + env.S3_FOLDER = "${params.S3Folder}" + env.TIER = "${params.Environment}" + env.WIPE_DB = "${params.WipeDB}" + env.NEO4J_PASSWORD = "${neo4j_password}" + env.CHEAT_MODE = "${params.CheatMode}" + env.PROJECT = "ctdc" + env.SPLIT = "${params.SplitTransactions}" + } + break + case "prod": + withCredentials([string(credentialsId: 'neo4j_ctdc_prod_cred', variable: 'neo4j_password')]) { + env.NEO4J_IP = "${NEO4J_PROD_IP}" + env.S3_FOLDER = "${params.S3Folder}" + env.TIER = "${params.Environment}" + env.WIPE_DB = "${params.WipeDB}" + env.NEO4J_PASSWORD = "${neo4j_password}" + env.CHEAT_MODE = "${params.CheatMode}" + env.PROJECT = "ctdc" + env.SPLIT = "${params.SplitTransactions}" + } + break + default: + println "Select valid option" + break + } + } + } + } + stage('loader-data'){ + environment { + SLACK_URL = "${SLACK_URL}" + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/ctdc-data-loader.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + } + post { + always { + cleanWs() + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/ctdc-data-validator/Jenkinsfile b/jenkins/jobs/ctdc-data-validator/Jenkinsfile new file mode 100644 index 000000000..a6ba36b51 --- /dev/null +++ b/jenkins/jobs/ctdc-data-validator/Jenkinsfile @@ -0,0 +1,85 @@ +pipeline { + agent { + node { + label 'slave-ncias-d2320-c' + } + } + parameters { + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'master', + name: 'Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/icdc_helper_scripts') + string(defaultValue: "EAY131-XXX", + description: 'Enter Arm IDs Here (comma separated):', + name: 'ArmID') + string(defaultValue: "", + description: 'Enter The S3 Bucket Where The Manifest File Is Located:', + name: 'S3Bucket') + string(defaultValue: "", + description: 'Enter Manifest File Name Here (include the full path from the base of the S3 bucket):', + name: 'ManifestFile') + } + // options { + // ansiColor('xterm') + // } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + checkout([$class: 'GitSCM', + branches: [[name: "${params.Tag}"]], + doGenerateSubmoduleConfigurations: + false, extensions: [], submoduleCfg: [], + recursiveSubmodules: true, + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc_helper_scripts']]]) + sh 'git submodule update --init' + sh 'git lfs pull' + dir('icdc-devops'){ + git branch: 'master', + url: 'https://github.com/CBIIT/icdc-devops.git' + } + } + } + stage('set-environment'){ + steps { + script { + withCredentials([string(credentialsId: 'okta_key_prod', variable: 'oktaSecret')]){ + env.MATCH_BASE_URL = "https://api.match.nci.nih.gov/api/v1" + env.USE_PROD = "true" + env.OKTA_KEY = "${oktaSecret}" + env.SECRET_NAME = "Okta-Match-Prod-Secrets" + env.META_DATA_PATH = "PROD" + env.META_DATA_BUCKET = "nci-cbiit-ctdc-prod" + env.CIPHER_KEY = "3" + } + } + } + } + stage('validate-metadata'){ + environment { + SLACK_URL = "${SLACK_URL}" + ARM_ID = "${params.ArmID}" + MANIFEST_FILE = "${params.ManifestFile}" + S3_BUCKET = "${params.S3Bucket}" + PHS_ID = "" + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/ctdc-file-validator.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + } + } \ No newline at end of file diff --git a/jenkins/jobs/ctdc-file-loader/Jenkinsfile b/jenkins/jobs/ctdc-file-loader/Jenkinsfile new file mode 100644 index 000000000..0cb9e16f4 --- /dev/null +++ b/jenkins/jobs/ctdc-file-loader/Jenkinsfile @@ -0,0 +1,118 @@ +pipeline { + agent { + node { + label 'slave-ncias-d2320-c' + } + } + parameters { + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'master', + name: 'Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/ctdc-data-processing') + string(defaultValue: "EAY131-XXX", + description: 'Enter ArmID Here:', + name: 'ArmID') + string(defaultValue: "phs00xxxx", + description: 'Enter PhsID Here:', + name: 'PhsID') + string(defaultValue: "", + description: 'S3 Bucket to load file from', + name: 'S3Bucket') + extendedChoice( + defaultValue: 'uat', + name: 'MatchEnv', + description: 'Choose environment to file to', + quoteValue: false, + multiSelectDelimiter: ',', + value: 'uat,prod', + type: 'PT_RADIO') + extendedChoice( + defaultValue: '3', + name: 'CipheyKey', + description: 'Choose Cipher Key to Use', + quoteValue: false, + multiSelectDelimiter: ',', + value: '0,1,2,3,4,5,6,7,8,9', + type: 'PT_SINGLE_SELECT') + } + // options { + // ansiColor('xterm') + // } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + checkout([$class: 'GitSCM', + branches: [[name: "${params.Tag}"]], + doGenerateSubmoduleConfigurations: + false, extensions: [], submoduleCfg: [], + recursiveSubmodules: true, + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/ctdc-data-processing']]]) + sh 'git submodule update --init' + sh 'git lfs pull' + dir('icdc-devops'){ + git branch: 'master', + url: 'https://github.com/CBIIT/icdc-devops.git' + } + } + } + stage('set-environment'){ + steps { + script { + switch("${params.MatchEnv}") { + case "uat": + withCredentials([string(credentialsId: 'okta_key_uat', variable: 'oktaSecret')]){ + env.MATCH_BASE_URL = "https://api.match-uat.nci.nih.gov/api/v1" + env.USE_PROD = "false" + env.OKTA_KEY = "${oktaSecret}" + env.SECRET_NAME = "Okta-Match-UAT-Secrets" + env.META_DATA_PATH = "UAT" + env.META_DATA_BUCKET = "nci-cbiit-ctdc-dev" + env.CIPHER_KEY = "${params.CipheyKey}" + } + break + case "prod": + withCredentials([string(credentialsId: 'okta_key_prod', variable: 'oktaSecret')]){ + env.MATCH_BASE_URL = "https://api.match.nci.nih.gov/api/v1" + env.USE_PROD = "true" + env.OKTA_KEY = "${oktaSecret}" + env.SECRET_NAME = "Okta-Match-Prod-Secrets" + env.META_DATA_PATH = "PROD" + env.META_DATA_BUCKET = "nci-cbiit-ctdc-prod" + env.CIPHER_KEY = "${params.CipheyKey}" + } + break + default: + println "Select valid MatchEnv" + break + } + } + } + } + stage('loader-data'){ + environment { + SLACK_URL = "${SLACK_URL}" + S3_BUCKET = "${params.S3Bucket}" + ARM_ID = "${params.ArmID}" + PHS_ID = "${params.PhsID}" + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/ctdc-file-loader.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/ctdc-pipeline/Jenkinsfile b/jenkins/jobs/ctdc-pipeline/Jenkinsfile new file mode 100644 index 000000000..d4e0edab0 --- /dev/null +++ b/jenkins/jobs/ctdc-pipeline/Jenkinsfile @@ -0,0 +1,228 @@ +//load shared library for slack notification +//@Library('shared-library')_ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "CTDC Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "ctdc devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${CTDC_SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + +pipeline { + + agent { + node { + label 'ctdc-docker-maven' + } + } + parameters { + //listGitBranches(branchFilter: '.*', credentialsId: '', defaultValue: 'master', name: 'Tag', quickFilterEnabled: false, remoteURL: 'https://github.com/CBIIT/icdc-codebase', selectedValue: 'DEFAULT', sortMode: 'NONE', tagFilter: '.*', type: 'PT_BRANCH_TAG') + // gitParameter(branchFilter: '.*', credentialsId: '', defaultValue: 'master', name: 'Tag', quickFilterEnabled: false, remoteURL: 'https://github.com/CBIIT/icdc-codebase', selectedValue: 'DEFAULT', sortMode: 'NONE', tagFilter: '.*', type: 'PT_BRANCH_TAG') + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'Develop', + name: 'Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/ctdc-codebase') + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'sandbox,dev,qa,stage,prod' ) + } + // triggers { + // pollSCM('H/15 * * * 1-5') + // } + options { + timestamps() + } + tools { + maven 'Default' + jdk 'Default' + } + stages { + stage('checkout'){ + steps { + checkout([$class: 'GitSCM', + branches: [[name: "${params.Tag}"]], + doGenerateSubmoduleConfigurations: + false, extensions: [], submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/ctdc-codebase']]]) + + dir('icdc-devops'){ + git branch: 'master', + url: 'https://github.com/CBIIT/icdc-devops.git' + } + } + } + stage('Set Environment'){ + environment { + SANDBOX_URL = "k9dc.essential-dev.com" + DEV_URL = "trialcommons-dev.cancer.gov" + QA_URL = "trialcommons-qa.cancer.gov" + STAGE_URL = "trialcommons-stage.cancer.gov" + PROD_URL = "trialcommons.cancer.gov" + VERSION = "${params.Tag}" + } + steps { + script { + switch("${params.Environment}") { + case "sandbox": + withCredentials([file(credentialsId: 'sandbox_env_file', variable: 'secret_file')]) { + sh "cp ${secret_file} ${WORKSPACE}/src/main/frontend/.env" + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's,tag_version,${VERSION},g' .env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${SANDBOX_URL}/,g' application_example.properties" + env.TOMCAT01_IP = "${TOMCAT01_IP}" + env.TOMCAT02_IP = "${TOMCAT02_IP}" + env.NEO4J_IP = "${NEO4J_IP}" + } + break + case "dev": + withCredentials([file(credentialsId: 'dev_env_file', variable: 'secret_file')]) { + sh "cp ${secret_file} ${WORKSPACE}/src/main/frontend/.env" + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's,tag_version,${VERSION},g' .env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${DEV_URL}/,g' application_example.properties" + env.TOMCAT01_IP = "${TOMCAT01_DEV_IP}" + env.TOMCAT02_IP = "${TOMCAT02_DEV_IP}" + env.NEO4J_IP = "${NEO4J_DEV_IP}" + } + break + case "qa": + withCredentials([file(credentialsId: 'qa_env_file', variable: 'secret_file')]) { + sh "cp ${secret_file} ${WORKSPACE}/src/main/frontend/.env" + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's,tag_version,${VERSION},g' .env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${QA_URL}/,g' application_example.properties" + env.TOMCAT01_IP = "${TOMCAT01_QA_IP}" + env.TOMCAT02_IP = "${TOMCAT02_QA_IP}" + env.NEO4J_IP = "${NEO4J_QA_IP}" + } + break + case "stage": + withCredentials([file(credentialsId: 'stage_env_file', variable: 'secret_file')]) { + sh "cp ${secret_file} ${WORKSPACE}/src/main/frontend/.env" + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's,tag_version,${VERSION},g' .env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${STAGE_URL}/,g' application_example.properties" + env.TOMCAT01_IP = "${TOMCAT01_STAGE_IP}" + env.TOMCAT02_IP = "${TOMCAT02_STAGE_IP}" + env.NEO4J_IP = "${NEO4J_STAGE_IP}" + } + break + case "prod": + withCredentials([file(credentialsId: 'prod_env_file', variable: 'secret_file')]) { + sh "cp ${secret_file} ${WORKSPACE}/src/main/frontend/.env" + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's,tag_version,${VERSION},g' .env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${PROD_URL}/,g' application_example.properties" + env.TOMCAT01_IP = "${TOMCAT01_PROD_IP}" + env.TOMCAT02_IP = "${TOMCAT02_PROD_IP}" + env.NEO4J_IP = "${NEO4J_PROD_IP}" + } + break + default: + withCredentials([file(credentialsId: 'dev_env_file', variable: 'secret_file')]) { + sh "cp ${secret_file} ${WORKSPACE}/src/main/frontend/.env" + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's,tag_version,${VERSION},g' .env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${DEV_URL}/,g' application_example.properties" + env.TOMCAT01_IP = "${TOMCAT01_DEV_IP}" + env.TOMCAT02_IP = "${TOMCAT02_DEV_IP}" + env.NEO4J_IP = "${NEO4J_DEV_IP}" + } + break + } + } + } + } + stage('Build') { + environment { + CTDC_SLACK_URL = "${CTDC_SLACK_URL}" + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([string(credentialsId: 'authorization_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'neo4j_user', passwordVariable: 'NEO4J_PASSWORD', usernameVariable: 'NEO4J_USER'), + string(credentialsId: 'fence-id', variable: 'FENCE_ID'), + string(credentialsId: 'fence-credential', variable: 'FENCE_CREDENTIAL')]) { + + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/deploy-ctdc.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + } + stage('Deploy') { + when { + expression { + currentBuild.result == null || currentBuild.result == 'SUCCESS' + } + } + steps { + ansiColor('xterm') { + withCredentials([sshUserPrivateKey(credentialsId: 'commonsdocker', keyFileVariable: 'deployer_key', passphraseVariable: '', usernameVariable: 'deployer')]) { +sh label: '', script: ''' + +for server in $TOMCAT02_IP $TOMCAT01_IP; +do +pushd target +scp -i $deployer_key -o StrictHostKeyChecking=no ROOT.war $deployer@${server}: +popd +ssh -i $deployer_key -T -o StrictHostKeyChecking=no $deployer@${server} << EOF +sudo docker rm -f ctdc +cd /local/content/docker +sudo docker pull cbiitssrepo/tomcat9 +sudo docker-compose up -d & +wait %1 +sleep 20 +docker cp ~/ROOT.war ctdc:/usr/local/tomcat/webapps +rm -rf ~/ROOT.war +EOF +done''' + } + } + } + } + stage('schema'){ + environment { + NEO4J_IP = "${NEO4J_IP}" + } + steps { + withCredentials([string(credentialsId: 'authorization_bearer', variable: 'BEARER')]){ + sh "curl -X POST http://${NEO4J_IP}:7474/graphql/idl/ -H 'Accept: application/json' -H 'Authorization: ${BEARER}' -d @src/main/resources/graphql/ctdc.graphql" + } + } + } + } + post { + always { + + sendSlackMessage() + } + } +} diff --git a/jenkins/jobs/ctdc/Jenkinsfile b/jenkins/jobs/ctdc/Jenkinsfile new file mode 100644 index 000000000..bab29c80f --- /dev/null +++ b/jenkins/jobs/ctdc/Jenkinsfile @@ -0,0 +1,222 @@ +//load shared library for slack notification +//@Library('shared-library')_ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "CTDC Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "ctdc devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${CTDC_SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + +pipeline { + + agent { + node { + label 'ctdc-docker-maven' + } + } + parameters { + //listGitBranches(branchFilter: '.*', credentialsId: '', defaultValue: 'master', name: 'Tag', quickFilterEnabled: false, remoteURL: 'https://github.com/CBIIT/icdc-codebase', selectedValue: 'DEFAULT', sortMode: 'NONE', tagFilter: '.*', type: 'PT_BRANCH_TAG') + // gitParameter(branchFilter: '.*', credentialsId: '', defaultValue: 'master', name: 'Tag', quickFilterEnabled: false, remoteURL: 'https://github.com/CBIIT/icdc-codebase', selectedValue: 'DEFAULT', sortMode: 'NONE', tagFilter: '.*', type: 'PT_BRANCH_TAG') + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'Develop', + name: 'Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/ctdc-codebase') + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'sandbox,dev,qa,stage,prod' ) + } + // triggers { + // pollSCM('H/15 * * * 1-5') + // } + options { + timestamps() + } + tools { + maven 'Default' + jdk 'Default' + } + stages { + stage('Checkout') { + steps { + checkout([$class: 'GitSCM', branches: [[name: "${params.Tag}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[url: 'https://github.com/CBIIT/ctdc-codebase']]]) + } + } + stage('Set Environment'){ + environment { + SANDBOX_URL = "k9dc.essential-dev.com" + DEV_URL = "trialcommons-dev.cancer.gov" + QA_URL = "trialcommons-qa.cancer.gov" + STAGE_URL = "trialcommons-stage.cancer.gov" + PROD_URL = "trialcommons.cancer.gov" + VERSION = "${params.Tag}" + } + steps { + script { + switch("${params.Environment}") { + case "sandbox": + withCredentials([file(credentialsId: 'sandbox_env_file', variable: 'secret_file')]) { + sh "cp ${secret_file} ${WORKSPACE}/src/main/frontend/.env" + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's,tag_version,${VERSION},g' .env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${SANDBOX_URL}/,g' application_example.properties" + env.TOMCAT01_IP = "${TOMCAT01_IP}" + env.TOMCAT02_IP = "${TOMCAT02_IP}" + env.NEO4J_IP = "${NEO4J_IP}" + } + break + case "dev": + withCredentials([file(credentialsId: 'dev_env_file', variable: 'secret_file')]) { + sh "cp ${secret_file} ${WORKSPACE}/src/main/frontend/.env" + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's,tag_version,${VERSION},g' .env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${DEV_URL}/,g' application_example.properties" + env.TOMCAT01_IP = "${TOMCAT01_DEV_IP}" + env.TOMCAT02_IP = "${TOMCAT02_DEV_IP}" + env.NEO4J_IP = "${NEO4J_DEV_IP}" + } + break + case "qa": + withCredentials([file(credentialsId: 'qa_env_file', variable: 'secret_file')]) { + sh "cp ${secret_file} ${WORKSPACE}/src/main/frontend/.env" + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's,tag_version,${VERSION},g' .env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${QA_URL}/,g' application_example.properties" + env.TOMCAT01_IP = "${TOMCAT01_QA_IP}" + env.TOMCAT02_IP = "${TOMCAT02_QA_IP}" + env.NEO4J_IP = "${NEO4J_QA_IP}" + } + break + case "stage": + withCredentials([file(credentialsId: 'stage_env_file', variable: 'secret_file')]) { + sh "cp ${secret_file} ${WORKSPACE}/src/main/frontend/.env" + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's,tag_version,${VERSION},g' .env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${STAGE_URL}/,g' application_example.properties" + env.TOMCAT01_IP = "${TOMCAT01_STAGE_IP}" + env.TOMCAT02_IP = "${TOMCAT02_STAGE_IP}" + env.NEO4J_IP = "${NEO4J_STAGE_IP}" + } + break + case "prod": + withCredentials([file(credentialsId: 'prod_env_file', variable: 'secret_file')]) { + sh "cp ${secret_file} ${WORKSPACE}/src/main/frontend/.env" + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's,tag_version,${VERSION},g' .env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${PROD_URL}/,g' application_example.properties" + env.TOMCAT01_IP = "${TOMCAT01_PROD_IP}" + env.TOMCAT02_IP = "${TOMCAT02_PROD_IP}" + env.NEO4J_IP = "${NEO4J_PROD_IP}" + } + break + default: + withCredentials([file(credentialsId: 'dev_env_file', variable: 'secret_file')]) { + sh "cp ${secret_file} ${WORKSPACE}/src/main/frontend/.env" + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's,tag_version,${VERSION},g' .env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${DEV_URL}/,g' application_example.properties" + env.TOMCAT01_IP = "${TOMCAT01_DEV_IP}" + env.TOMCAT02_IP = "${TOMCAT02_DEV_IP}" + env.NEO4J_IP = "${NEO4J_DEV_IP}" + } + break + } + } + } + } + stage('Build') { + environment { + CTDC_SLACK_URL = "${CTDC_SLACK_URL}" + } + steps { + withCredentials([string(credentialsId: 'authorization_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'neo4j_user', passwordVariable: 'NEO4J_PASSWORD', usernameVariable: 'NEO4J_USER'), + string(credentialsId: 'fence-id', variable: 'FENCE_ID'), + string(credentialsId: 'fence-credential', variable: 'FENCE_CREDENTIAL')]) { + sh "cd ${WORKSPACE}/src/main/resources/ && mv application_example.properties application.properties" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's/localhost/${NEO4J_IP}/g' application.properties" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's/Basic 123456/${BEARER}/' application.properties" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's/password=${NEO4J_IP}/password=${NEO4J_PASSWORD}/' application.properties" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's/neo4j_username/${NEO4J_USER}/g' application.properties" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's/accessid/${FENCE_ID}/g' application.properties" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's/accesskey/${FENCE_CREDENTIAL}/g' application.properties" + + sh "mvn package -DskipTests" + + sh "mv target/CTDC-0.0.1.war target/ROOT.war" + } + } + } + stage('Deploy') { + when { + expression { + currentBuild.result == null || currentBuild.result == 'SUCCESS' + } + } + steps { + ansiColor('xterm') { + withCredentials([sshUserPrivateKey(credentialsId: 'commonsdocker', keyFileVariable: 'deployer_key', passphraseVariable: '', usernameVariable: 'deployer')]) { +sh label: '', script: ''' + +for server in $TOMCAT02_IP $TOMCAT01_IP; +do +pushd target +scp -i $deployer_key -o StrictHostKeyChecking=no ROOT.war $deployer@${server}: +popd +ssh -i $deployer_key -T -o StrictHostKeyChecking=no $deployer@${server} << EOF +sudo docker rm -f ctdc +cd /local/content/docker +sudo docker pull cbiitssrepo/tomcat9 +sudo docker-compose up -d & +wait %1 +sleep 20 +docker cp ~/ROOT.war ctdc:/usr/local/tomcat/webapps +rm -rf ~/ROOT.war +EOF +done''' + } + } + } + } + stage('schema'){ + environment { + NEO4J_IP = "${NEO4J_IP}" + } + steps { + withCredentials([string(credentialsId: 'authorization_bearer', variable: 'BEARER')]){ + sh "curl -X POST http://${NEO4J_IP}:7474/graphql/idl/ -H 'Accept: application/json' -H 'Authorization: ${BEARER}' -d @src/main/resources/graphql/ctdc.graphql" + } + } + } + } + post { + always { + + sendSlackMessage() + } + } +} diff --git a/jenkins/jobs/data-loader/Jenkinsfile b/jenkins/jobs/data-loader/Jenkinsfile new file mode 100644 index 000000000..a0dc5c1f1 --- /dev/null +++ b/jenkins/jobs/data-loader/Jenkinsfile @@ -0,0 +1,116 @@ +pipeline { + agent { + node { + label 'slave-ncias-d2320-c' + } + } + parameters { + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'master', + name: 'Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/icdc-dataloader') + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'demo,dev,qa,stage,prod' ) + string(defaultValue: "", + description: 'S3 Folder to load data from', + name: 'S3Folder') + extendedChoice( + defaultValue: 'no', + name: 'WipeDB', + description: 'Choose yes to wipe DB', + quoteValue: false, + multiSelectDelimiter: ',', + value: 'no,yes', + type: 'PT_RADIO') + } + // options { + // ansiColor('xterm') + // } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + checkout([$class: 'GitSCM', + branches: [[name: "${params.Tag}"]], + doGenerateSubmoduleConfigurations: + false, extensions: [], submoduleCfg: [], + recursiveSubmodules: true, + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-dataloader']]]) + sh 'git submodule update --init' + dir('icdc-devops'){ + git branch: 'master', + url: 'https://github.com/CBIIT/icdc-devops.git' + } + } + + } + stage('set-environment'){ + steps { + script { + switch("${params.Environment}") { + case "demo": + env.NEO4J_IP = "${NEO4J_DEMO_IP}" + env.S3_FOLDER = "${params.S3Folder}" + env.TIER = "${params.Environment}" + env.WIPE_DB = "${params.WipeDB}" + break + case "dev": + env.NEO4J_IP = "${NEO4J_DEV_IP}" + env.S3_FOLDER = "${params.S3Folder}" + env.TIER = "${params.Environment}" + env.WIPE_DB = "${params.WipeDB}" + break + case "qa": + env.NEO4J_IP = "${NEO4J_QA_IP}" + env.S3_FOLDER = "${params.S3Folder}" + env.TIER = "${params.Environment}" + env.WIPE_DB = "${params.WipeDB}" + break + case "stage": + env.NEO4J_IP = "${NEO4J_STAGE_IP}" + env.S3_FOLDER = "${params.S3Folder}" + env.TIER = "${params.Environment}" + env.WIPE_DB = "${params.WipeDB}" + break + case "prod": + env.NEO4J_IP = "${NEO4J_PROD_IP}" + env.WIPE_DB = "${params.WipeDB}" + break + default: + println "Select valid option" + break + } + } + } + } + stage('loader-data'){ + environment { + SLACK_URL = "${SLACK_URL}" + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'neo4j_user', passwordVariable: 'NEO4J_PASSWORD', usernameVariable: 'NEO4J_USER')]) { + + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/data-loader.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/dev-canine-testing/Jenkinsfile b/jenkins/jobs/dev-canine-testing/Jenkinsfile new file mode 100644 index 000000000..3888acc51 --- /dev/null +++ b/jenkins/jobs/dev-canine-testing/Jenkinsfile @@ -0,0 +1,124 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + coffee_image = ":hot-coffee:" + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${coffee_image}\n Details at: ${env.BUILD_URL}console", + fallback: "ICDC Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "Selenium Automated Testing", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + +pipeline { + + agent { + node { + label 'selenium-testing' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'qa', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'sandbox,dev,qa,stage' ) + } + options { + timestamps() + } + tools { + maven 'maven-3.6.1' + jdk 'jdk11' + } + stages { + // stage('Fireup Selenium') { + // steps { + // //sh "sudo docker rm -vf selenium" + // sh 'sudo docker run -d -p 4444:4444 --shm-size=2g --name selenium selenium/standalone-chrome' + // } + // } + stage('Checkout') { + steps { + git branch: 'master', + url: 'https://github.com/CBIIT/icdc_uitest', + credentialsId: 'vdonkor' + } + } + stage('Set Environment'){ + environment { + SANDBOX_URL = "k9dc.essential-dev.com" + DEV_URL = "caninecommons-dev.cancer.gov" + QA_URL = "caninecommons-qa.cancer.gov" + STAGE_URL = "caninecommons-stage.cancer.gov" + } + steps { + script { + switch("${params.Environment}") { + case "sandbox": + sh "cd ${WORKSPACE}/src/test/resources/test_data/ && sed -i 's/caninecommons-qa.cancer.gov/${ SANDBOX_URL}/g' config.properties" + break + case "dev": + sh "cd ${WORKSPACE}/src/test/resources/test_data/ && sed -i 's/caninecommons-qa.cancer.gov/${ DEV_URL}/g' config.properties" + break + case "qa": + sh "cd ${WORKSPACE}/src/test/resources/test_data/ && cat config.properties" + break + case "stage": + sh "cd ${WORKSPACE}/src/test/resources/test_data/ && sed -i 's/caninecommons-qa.cancer.gov/${ STAGE_URL}/g' config.properties" + break + default: + println "Select valid option" + break + } + } + } + } + stage('Build') { + steps { + wrap([$class: 'Xvfb']) { + sh """ + mvn clean install + """ + + sh """ + cd target/site/cucumber-reports + zip -r cucumber-html-reports.zip cucumber-html-reports + """ + } + } + } + + } + post { + always { + cucumber failedFeaturesNumber: -1, failedScenariosNumber: -1, failedStepsNumber: -1, fileIncludePattern: '**/*.json', jsonReportDirectory: 'target', pendingStepsNumber: -1, skippedStepsNumber: -1, sortingMethod: 'ALPHABETICAL', undefinedStepsNumber: -1 + //emailext attachmentsPattern: 'target/site/cucumber-reports/cucumber-html-reports.zip', body: 'See attached results from recent integration Job run.', subject: '$PROJECT_NAME - Build # $BUILD_NUMBER - $BUILD_STATUS!', to: 'afag.ibrahimova@nih.gov;laxmi.lolla@nih.gov; amit.mukherjee@nih.gov;vincent.donkor@nih.gov' + //sendSlackMessage() + // println "${env.STEP_PASSED}" + //sh "sudo docker rm -vf ${selenium}" + } + } +} + + \ No newline at end of file diff --git a/jenkins/jobs/dev-canine-testing/pipeline.groovy b/jenkins/jobs/dev-canine-testing/pipeline.groovy new file mode 100644 index 000000000..93250e14a --- /dev/null +++ b/jenkins/jobs/dev-canine-testing/pipeline.groovy @@ -0,0 +1,18 @@ +pipelineJob('icdc/canine-pipeline') { + + def repo = 'https://github.com/CBIIT/icdc-devops.git' + description("canine testing pipeline Job") + + definition { + cpsScm { + scm { + git { + remote { url(repo) } + branches('master', '**/feature*') + scriptPath('jenkins/jobs/canine-testing/Jenkinsfile') + extensions { } // required to avoid tagging + } + } + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/file-monitor/Jenkinsfile b/jenkins/jobs/file-monitor/Jenkinsfile new file mode 100644 index 000000000..f48b8925a --- /dev/null +++ b/jenkins/jobs/file-monitor/Jenkinsfile @@ -0,0 +1,60 @@ + +pipeline { + agent { + node { + label 'docker-maven' + } + } + + options { + ansiColor('xterm') + } + + stages{ + stage('checkout'){ + steps { + checkout( poll: false, + changelog:false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']]]) + + } + + } + + stage('check status'){ + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([string(credentialsId: 'files-slack-token', variable: 'SLACK_TOKEN')]) + { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/file-monitor.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + slack_token: "${SLACK_TOKEN}" + ], + colorized: true + ) + } + } + } + } + } + post { + + cleanup { + + cleanWs() + + } + + } +} diff --git a/jenkins/jobs/git-copy/Jenkinsfile b/jenkins/jobs/git-copy/Jenkinsfile new file mode 100644 index 000000000..ef0546465 --- /dev/null +++ b/jenkins/jobs/git-copy/Jenkinsfile @@ -0,0 +1,118 @@ +//load shared library for slack notification +//@Library('shared-library')_ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "ICDC Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "icdc devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + +pipeline { + + agent { + node { + label 'slave-ncias-d2320-c' + } + } + parameters { + //listGitBranches(branchFilter: '.*', credentialsId: '', defaultValue: 'master', name: 'Tag', quickFilterEnabled: false, remoteURL: 'https://github.com/CBIIT/icdc-codebase', selectedValue: 'DEFAULT', sortMode: 'NONE', tagFilter: '.*', type: 'PT_BRANCH_TAG') + // gitParameter(branchFilter: '.*', credentialsId: '', defaultValue: 'master', name: 'Tag', quickFilterEnabled: false, remoteURL: 'https://github.com/CBIIT/icdc-codebase', selectedValue: 'DEFAULT', sortMode: 'NONE', tagFilter: '.*', type: 'PT_BRANCH_TAG') + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'Develop', + name: 'Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'NONE', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/icdc-codebase') + extendedChoice( + name: 'CopyFrom', + defaultValue: 'dev', + description: 'Choose the environment to copy from', + type: 'PT_SINGLE_SELECT', + value: 'dev,qa,stage' ) + extendedChoice( + name: 'CopyTo', + defaultValue: 'qa', + description: 'Choose the environment to copy to', + type: 'PT_SINGLE_SELECT', + value: 'qa,stage,prod' ) + } + // triggers { + // pollSCM('H/15 * * * 1-5') + // } + options { + timestamps() + } + tools { + maven 'Default' + jdk 'Default' + } + stages { + stage('checkout'){ + steps { + checkout([$class: 'GitSCM', + branches: [[name: "${params.Tag}"]], + doGenerateSubmoduleConfigurations: + false, extensions: [], submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-codebase']]]) + + dir('icdc-devops'){ + git branch: 'master', + url: 'https://github.com/CBIIT/icdc-devops.git' + } + } + } + stage('Build') { + environment { + COPY_FROM = "${params.CopyFrom}" + COPY_TO = "${params.CopyTo}" + SLACK_URL = "${SLACK_ICDC_URL}" + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'git_credential', passwordVariable: 'GIT_PASSWORD', usernameVariable: 'GIT_USERNAME'), + ]) { + + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/git-copy.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + } + } + post { + + always { + + sendSlackMessage() + } + } +} diff --git a/jenkins/jobs/gmb/Jenkinsfile b/jenkins/jobs/gmb/Jenkinsfile new file mode 100644 index 000000000..d17ce586e --- /dev/null +++ b/jenkins/jobs/gmb/Jenkinsfile @@ -0,0 +1,307 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'cicd_microservice' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'dev' ) + string( + defaultValue: 'gmb', + description: 'Project Name', + name: 'ProjectName') + booleanParam( + defaultValue: true, + description: 'Use the Redis cache', + name: 'Use_Redis') + booleanParam( + defaultValue: false, + description: 'Flush the Redis cache', + name: 'Flush_Redis') + booleanParam( + defaultValue: true, + description: 'Enable the Redis filter', + name: 'Enable_Redis_Filter') + string( + defaultValue: 'redis-schema.graphql', + description: 'The file to use when loading redis schema', + name: 'Redis_Schema_File') + string( + defaultValue: 'redis-filter-config.bento.yaml', + description: 'The file to use when loading redis queries', + name: 'Redis_Queries_File') + string( + defaultValue: 'test-queries.1k.bento.yaml', + description: 'The file to use when loading test queries', + name: 'Test_Queries_File') + string( + defaultValue: '', + description: 'The commit id to checkout for the frontend', + name: 'fe_commit_id') + string( + defaultValue: '', + description: 'The commit id to checkout for the backend', + name: 'be_commit_id') + + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Backend_Tag}"]], + doGenerateSubmoduleConfigurations: + false, extensions: [], submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/bento-backend']]]) + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Frontend_Tag}"]], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'RelativeTargetDirectory', + relativeTargetDir: 'gmb-frontend']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/bento-gmb-frontend']]]) + + checkout( poll: false, + changelog:false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']]]) + + } + + } + stage('Set Environment'){ + environment { + SLACK_URL = "${SLACK_URL}" + } + steps { + script { + switch("${params.Environment}") { + case "dev": + withCredentials([usernamePassword(credentialsId: "${params.ProjectName}_${params.Environment}_neo4j_user", passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.TIER = "${params.Environment}" + env.NEO4J_IP = "${NEO4J_GMB_IP}" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + default: + withCredentials([usernamePassword(credentialsId: "${params.ProjectName}_${params.Environment}_neo4j_user", passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.TIER = "${params.Environment}" + env.NEO4J_IP = "${NEO4J_GMB_IP}" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + } + } + } + } + + stage('build'){ + environment { + FE_VERSION = "${params.Frontend_Tag}" + BE_VERSION = "${params.Backend_Tag}" + } + steps { + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER')]){ + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/gmb-build.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + project: "${params.ProjectName}", + redis_schema_file: "${params.Redis_Schema_File}", + redis_init_queries_file: "${params.Redis_Queries_File}", + test_queries_file: "${params.Test_Queries_File}", + frontend_commit_id: "${params.fe_commit_id}", + backend_commit_id: "${params.be_commit_id}", + ], + colorized: true) + } + } + } + } + + stage('clear redis cache'){ + agent { label 'data-loader' } + when { + expression { params.Flush_Redis } + } + + environment { + TIER = "${params.Environment}" + } + + steps{ + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/redis.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + + stage('deploy'){ + environment { + FE_VERSION = "${params.Frontend_Tag}-${BUILD_NUMBER}" + BE_VERSION = "${params.Backend_Tag}-${BUILD_NUMBER}" + } + steps{ + node('cicd_microservice') { + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/gmb-deploy.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + project: "${params.ProjectName}", + enable_redis: "${params.Use_Redis}", + enable_redis_filter: "${params.Enable_Redis_Filter}", + ], + colorized: true) + } + } + } + } + + stage('update database monitoring agents'){ + steps{ + node('data-loader') { + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + echo "Recreating inventory file" + sh "cp ${WORKSPACE}/icdc-devops/ansible/hosts ${WORKSPACE}/icdc-devops/ansible/hosts.bak" + sh "echo [agent_setup] >> ${WORKSPACE}/icdc-devops/ansible/hosts" + sh "echo ${NEO4J_IP} ansible_ssh_private_key_file=/home/bento/.ssh/devops ansible_ssh_user=bento >> ${WORKSPACE}/icdc-devops/ansible/hosts" + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/agent-setup.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + env: "${params.Environment}", + app_type: 'database', + app_name: 'neo4j', + project: "${params.ProjectName}", + log_path: '/var/log/neo4j/*.log', + ], + colorized: true) + } + } + } + } + + } + post { + + always { + + sendSlackMessage() + + } + + + cleanup { + + cleanWs() + + } + + } +} \ No newline at end of file diff --git a/jenkins/jobs/gmb/Jenkinsfile_deploy b/jenkins/jobs/gmb/Jenkinsfile_deploy new file mode 100644 index 000000000..80c035fef --- /dev/null +++ b/jenkins/jobs/gmb/Jenkinsfile_deploy @@ -0,0 +1,418 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'cicd_microservice' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'dev' ) + extendedChoice( + name: 'Region', + defaultValue: 'us-east-1', + description: 'Choose aws region to build', + type: 'PT_SINGLE_SELECT', + value: 'us-east-1' ) + booleanParam( + defaultValue: true, + description: 'Use the Redis cache', + name: 'Use_Redis') + booleanParam( + defaultValue: true, + description: 'Flush the Redis cache', + name: 'Flush_Redis') + booleanParam( + defaultValue: true, + description: 'Enable the Elasticsearch filter', + name: 'Enable_ES_Filter') + booleanParam( + defaultValue: true, + description: 'Reload Elasticsearch data', + name: 'Reload_ES_Data') + string( + defaultValue: '', + description: 'The set of parameters to use for test automation - if left blank no tests will be run', + name: 'Test_Params') + +// gitParameter(branchFilter: 'origin/(.*)', +// defaultValue: 'integration', +// name: 'Frontend_Image', +// type: 'PT_BRANCH_TAG', +// quickFilterEnabled: false, +// selectedValue: 'DEFAULT', +// sortMode: 'ASCENDING_SMART', +// tagFilter: '*', +// useRepository: 'https://github.com/CBIIT/bento-frontend') +// +// The Frontend_Image parameter has been replaced in this job by a GUI parameter due to the fact that type "active choice reactive parameter" is not supported. +// The GUI definition for this parameter should follow Environment and use the following values: +// +// Script: +// env_value = Environment +// def gettags = ("git ls-remote -h -t https://github.com/CBIIT/bento-frontend.git").execute() +// def tags = gettags.text.readLines().collect { it.split()[1].replaceAll('refs/heads/', '').replaceAll('refs/tags/', '').replaceAll("\\^\\{\\}", '')} +// List tagnames = tags.collect{ '' + it + '' } +// +// if(env_value.equalsIgnoreCase("dev")) { +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("integration")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// } else { +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("master")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// } +// +// return tagnames +// +// Fallback Script: +// return ['integration'] +// +// NOTE: this sets the default frontend branch to "integration" and is required for builds triggered from SCM polling. +// +// Referenced parameters: +// Environment + +// gitParameter(branchFilter: 'origin/(.*)', +// defaultValue: 'Integration', +// name: 'Backend_Image', +// type: 'PT_BRANCH_TAG', +// quickFilterEnabled: false, +// selectedValue: 'DEFAULT', +// sortMode: 'ASCENDING_SMART', +// tagFilter: '*', +// useRepository: 'https://github.com/CBIIT/bento-backend') +// +// The Backend_Image parameter has been replaced in this job by a GUI parameter due to the fact that type "active choice reactive parameter" is not supported. +// The GUI definition for this parameter should follow Frontend_Image and use the following values: +// +// Script: +// front_tag = Frontend_Image +// def gettags = ("git ls-remote -h -t https://github.com/CBIIT/bento-backend.git").execute() +// def tags = gettags.text.readLines().collect { it.split()[1].replaceAll('refs/heads/', '').replaceAll('refs/tags/', '').replaceAll("\\^\\{\\}", '')} +// List tagnames = tags.collect{ '' + it + '' } +// +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase(front_tag)) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("Integration")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// +// return tagnames +// +// Fallback Script: +// return ['Integration'] +// +// NOTE: this sets the default backend branch to "Integration" and is required for builds triggered from SCM polling. The branch defined here should math the default for Frontend_Image +// +// Referenced parameters: +// Frontend_Image + + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ +// stage('checkout'){ +// steps { +// git branch: "${params.Backend_Image}", +// url: 'https://github.com/CBIIT/bento-backend' +// +// checkout([$class: 'GitSCM', +// branches: [[name: "${params.Backend_Image}"]], +// doGenerateSubmoduleConfigurations: +// false, extensions: [], submoduleCfg: [], +// userRemoteConfigs: +// [[url: 'https://github.com/CBIIT/bento-backend']]]) +// +// dir('bento-frontend'){ +// git branch: "${params.Frontend_Image}", +// url: 'https://github.com/CBIIT/bento-frontend' +// } +// +// checkout([$class: 'GitSCM', +// branches: [[name: "${params.Frontend_Image}"]], +// doGenerateSubmoduleConfigurations: false, +// extensions: [[$class: 'RelativeTargetDirectory', +// relativeTargetDir: 'bento-frontend']], +// submoduleCfg: [], +// userRemoteConfigs: +// [[url: 'https://github.com/CBIIT/bento-frontend']]]) +// +// checkout( changelog:false, +// poll: false, +// scm: [$class: 'GitSCM', +// branches: [[name: '*/master']], +// doGenerateSubmoduleConfigurations: false, +// extensions: [[$class: 'DisableRemotePoll'], +// [$class: 'PathRestriction', excludedRegions: '*'], +// [$class: 'RelativeTargetDirectory', +// relativeTargetDir: 'icdc-devops']], +// submoduleCfg: [], +// userRemoteConfigs: +// [[url: 'https://github.com/CBIIT/icdc-devops.git']] +// ]) +// +// } +// +// } + stage('Set Environment'){ + environment { + SLACK_URL = "${SLACK_URL}" + } + steps { + + script { + currentBuild.displayName = "Tag: ${params.Frontend_Image} Environment: ${params.Environment}" + } + + script { + switch("${params.Environment}") { + case "dev": + withCredentials([string(credentialsId: 'es_dev_host', variable: 'ES_HOST'), + usernamePassword(credentialsId: 'dev_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_DEV_IP}" + env.TIER = "dev" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + env.ES_HOST = "${ES_HOST}" + + } + break + default: + withCredentials([usernamePassword(credentialsId: 'dev_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_DEV_IP}" + env.TIER = "dev" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + } + } + } + } + + stage('clear redis cache'){ + agent { label 'data-loader' } + when { + expression { params.Flush_Redis } + } + + environment { + TIER = "${params.Environment}" + } + + steps{ + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/redis.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + + stage('deploy'){ + environment { + SLACK_URL = "${SLACK_URL}" + PROJECT = "${params.ProjectName}", + TIER = "${params.Environment}" + FE_VERSION = "${params.Frontend_Image}" + BE_VERSION = "${params.Backend_Image}" + SLACK_CHANNEL = "#system-alerts" + URL_DOMAIN = "bento-tools.org" + } + steps{ + node('cicd_microservice') { + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/gmb-deploy.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + project: "${params.ProjectName}", + enable_redis: "${params.Use_Redis}", + enable_es_filter: "${params.Enable_ES_Filter}", + ], + colorized: true) + } + + script { + + sh label: 'Env-Updates', script: '''#!/bin/bash + + yum -y install python3 + + cd icdc-devops/monitoring + pip3 install -r requirements.txt + + ''' + + withCredentials([string(credentialsId: 'newrelic_api_key', variable: 'API_KEY')]) { + sh "python3 $WORKSPACE/icdc-devops/monitoring/releases/add_apm_release.py -p $PROJECT -t $TIER -v $FE_VERSION/$BE_VERSION -k $API_KEY" + sh "python3 $WORKSPACE/icdc-devops/monitoring/monitor_update.py -p $PROJECT -t $TIER -k $API_KEY" + sh "python3 $WORKSPACE/icdc-devops/monitoring/dashboards/add_tier_dashboards.py -p $PROJECT -t $TIER -k $API_KEY" + } + + } + + } + } + } + + stage('update database monitoring agents'){ + steps{ + node('data-loader') { + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + echo "Recreating inventory file" + sh "cp ${WORKSPACE}/icdc-devops/ansible/hosts ${WORKSPACE}/icdc-devops/ansible/hosts.bak" + sh "echo [agent_setup] >> ${WORKSPACE}/icdc-devops/ansible/hosts" + sh "echo ${NEO4J_IP} ansible_ssh_private_key_file=/home/bento/.ssh/devops ansible_ssh_user=bento >> ${WORKSPACE}/icdc-devops/ansible/hosts" + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/agent-setup.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + env: "${params.Environment}", + app_type: 'database', + app_name: 'neo4j', + project: "${params.ProjectName}", + log_path: '/var/log/neo4j/*.log', + ], + colorized: true) + } + } + } + } + + } + post { + always { + + sendSlackMessage() + + } + + success { + + script { + if ("${params.Test_Params}"?.trim()) { + echo 'Run Katalon Tests' + def params = "[${params.Test_Params}]" + def list_params = evaluate(params) + build quietPeriod: 300, wait: false, job: 'Test_Automation/Katalon_prod', parameters: [gitParameter(name: 'Tag', value: list_params['Tag']), string(name: 'KatalonSuite', value: list_params['KatalonSuite']), extendedChoice(name: 'Profile', value: list_params['Profile']), string(name: 'EmailRecipients', value: list_params['Email'])] + } + } + + script { + if (params.Reload_ES_Data) { + echo 'Reload data to Elasticsearch' + String tag = "${params.Backend_Image}" + String branch = "${tag}".substring(0, "${tag}".lastIndexOf("-")) + "-bento" + "${tag}".substring("${tag}".lastIndexOf("-"), "${tag}".length()) + build job: 'Bento/_Data_Processing/BentoDataLoader_ES', parameters: [gitParameter(name: 'Dataloader_Branch', value: 'master'), gitParameter(name: 'Backend_Branch', value: "${branch}"), extendedChoice(name: 'Environment', value: "${params.Environment}")] + } + } + + } + + cleanup { + + cleanWs() + + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/icdc-agents/Jenkinsfile b/jenkins/jobs/icdc-agents/Jenkinsfile new file mode 100644 index 000000000..75f2670e9 --- /dev/null +++ b/jenkins/jobs/icdc-agents/Jenkinsfile @@ -0,0 +1,243 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${ICDC_SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'commons-docker-ncias-p2236-v' + } + } + + parameters { + + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'dev,qa,stage,prod' ) + +// The UI_Instances parameter has been replaced in this job by a GUI parameter due to the fact that type "active choice reactive parameter" is not supported. +// The GUI definition for this parameter should follow Environment and use the following values: +// +// Script: +// if (env.equals("dev")) +// { +// return ["nciws-d1092-c,nciws-d2001-c"] +// } +// else if (env.equals("qa")) +// { +// return ["nciws-q2024-c,nciws-q2025-c"] +// } +// else +// { +// return ["You must select a valid environment"] +// } +// +// Fallback Script: +// None +// +// Choice Type: +// Check Boxes +// +// Referenced parameters: +// Environment + +// The DB_Instances parameter has been replaced in this job by a GUI parameter due to the fact that type "active choice reactive parameter" is not supported. +// The GUI definition for this parameter should follow Environment and use the following values: +// +// Script: +// if (env.equals("dev")) +// { +// return ["ncias-d2224-c"] +// } +// else if (env.equals("qa")) +// { +// return ["ncias-q2251-c"] +// } +// else +// { +// return ["You must select a valid environment"] +// } +// +// Fallback Script: +// None +// +// Choice Type: +// Check Boxes +// +// Referenced parameters: +// Environment + + } + + options { + ansiColor('xterm') + } + + tools { + maven 'Default' + jdk 'Default' + } + + stages{ + stage('checkout'){ + steps { + + checkout( poll: false, + changelog:false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']]]) + + } + + } + + stage('DB Agents'){ + when { + expression { params.DB_Instances } + } + environment { + ICDC_SLACK_URL = "${ICDC_SLACK_URL}" + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + + withCredentials([string(credentialsId: 'sumo_access_id', variable: 'SUMO_ACCESS_ID'), + string(credentialsId: 'sumo_access_key', variable: 'SUMO_ACCESS_KEY'), + string(credentialsId: 'newrelic_license_key', variable: 'NEWRELIC_LICENSE_KEY') + ]) + { + + script { + sh label: 'database-hosts', script: '''#!/bin/bash + echo "Creating inventory file" + echo "[agent_setup]" > ${WORKSPACE}/icdc-devops/ansible/hosts + + if [[ $(wc -l < ${DB_Instances} | cut -d ',' -f2 ) -ge 0 ]];then + echo ${DB_Instances} | cut -d ',' -f1 >> ${WORKSPACE}/icdc-devops/ansible/hosts + echo ${DB_Instances} | cut -d ',' -f2 >> ${WORKSPACE}/icdc-devops/ansible/hosts + else + echo ${DB_Instances} >> ${WORKSPACE}/icdc-devops/ansible/hosts + fi + + ''' + + } + + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/agent-setup-icdc.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + credentialsId: 'commonsdocker', + extraVars: [ + env: "${params.Environment}", + app_type: 'db', + project: 'icdc', + log_path: '/var/log/neo4j', + sumo_access_id: "${SUMO_ACCESS_ID}", + sumo_access_key: "${SUMO_ACCESS_KEY}", + newrelic_license_key: "${NEWRELIC_LICENSE_KEY}" + ], + colorized: true) + + } + } + } + } + + stage('UI Agents'){ + when { + expression { params.UI_Instances } + } + environment { + ICDC_SLACK_URL = "${ICDC_SLACK_URL}" + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + + withCredentials([string(credentialsId: 'sumo_access_id', variable: 'SUMO_ACCESS_ID'), + string(credentialsId: 'sumo_access_key', variable: 'SUMO_ACCESS_KEY'), + string(credentialsId: 'newrelic_license_key', variable: 'NEWRELIC_LICENSE_KEY') + ]) + { + + script { + sh label: 'ui-hosts', script: '''#!/bin/bash + echo "Creating inventory file" + echo "[agent_setup]" > ${WORKSPACE}/icdc-devops/ansible/hosts + + if [[ $(wc -l < ${UI_Instances} | cut -d ',' -f2 ) -ge 0 ]];then + echo ${UI_Instances} | cut -d ',' -f1 >> ${WORKSPACE}/icdc-devops/ansible/hosts + echo ${UI_Instances} | cut -d ',' -f2 >> ${WORKSPACE}/icdc-devops/ansible/hosts + else + echo ${UI_Instances} >> ${WORKSPACE}/icdc-devops/ansible/hosts + fi + + ''' + + } + + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/agent-setup-icdc.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + credentialsId: 'commonsdocker', + extraVars: [ + env: "${params.Environment}", + app_type: 'app', + project: 'icdc', + log_path: '/local/content/k9dc', + sumo_access_id: "${SUMO_ACCESS_ID}", + sumo_access_key: "${SUMO_ACCESS_KEY}", + newrelic_license_key: "${NEWRELIC_LICENSE_KEY}" + ], + colorized: true) + } + + } + } + } + + } + post { + always { + sendSlackMessage() + cleanWs() + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/icdc-data-dictionary/Jenkinsfile b/jenkins/jobs/icdc-data-dictionary/Jenkinsfile new file mode 100644 index 000000000..e89d06c93 --- /dev/null +++ b/jenkins/jobs/icdc-data-dictionary/Jenkinsfile @@ -0,0 +1,179 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'commons-docker-ncias-p2236-v' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'dev,qa,stage,prod' ) + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'main', + name: 'Dictionary_Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/visualiser-standalone') + + } + + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + checkout([$class: 'GitSCM', + branches: [[name: "${params.Dictionary_Tag}"]], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'RelativeTargetDirectory', + relativeTargetDir: 'visualiser-standalone']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/visualiser-standalone']]]) + + checkout( poll: false, + changelog:false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']]]) + + } + + } + + stage('build'){ + environment { + DICTIONARY_VERSION = "${params.Dictionary_Tag}-${BUILD_NUMBER}" + TIER = "${params.Environment}" + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([ + usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER'), + file(credentialsId: 'ansible_host_file', variable: 'host_file'), + string(credentialsId: 'docker_host', variable: 'DOCKER_HOST'), + string(credentialsId: 'tls_hostname', variable: 'TLS_HOSTNAME')]) + { + sh "cp ${host_file} ${WORKSPACE}/icdc-devops/ansible/hosts" + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/build-icdc-data-dictionary.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + tier: "${params.Environment}" + ], + colorized: true) + } + } + } + } + + stage('deploy'){ + environment { + DICTIONARY_VERSION = "${params.Dictionary_Tag}-${BUILD_NUMBER}" + SLACK_URL = "${ICDC_SLACK_URL}" + } + steps{ + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([ + string(credentialsId: 'newrelic_license_key', variable: 'NEWRELIC_LIC_KEY'), + usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER') + ]) + { + + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/deploy-icdc-data-dictionary.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + tier: "${params.Environment}" + ], + colorized: true) + } + } + } + } + } + post { + + always { + + sendSlackMessage() + + } + + success { + + script { + withCredentials([string(credentialsId: 'git_credential_token', variable: 'git_token')]) { + sh label: 'GIT-Tag Dictionary', script: '''#!/bin/bash + cd ${WORKSPACE}/visualiser-standalone + gitURL=$(git config remote.origin.url | sed 's|^.*//||') + echo "Applying tag $Dictionary_Tag to URL: $gitURL" + git config user.email "jenkins@bento-tools.org" + git config user.name "Bento Jenkins" + git tag --no-sign -a ${Dictionary_Tag}-icdc-${BUILD_NUMBER} -m "Jenkins tag: ${Dictionary_Tag}-${BUILD_NUMBER}" + git push https://${git_token}:x-oauth-basic@${gitURL} --tags + + ''' + + } + } + + } + + cleanup { + + cleanWs() + + } + + } +} \ No newline at end of file diff --git a/jenkins/jobs/icdc-data-dictionary/Jenkinsfile_deploy b/jenkins/jobs/icdc-data-dictionary/Jenkinsfile_deploy new file mode 100644 index 000000000..86e24adef --- /dev/null +++ b/jenkins/jobs/icdc-data-dictionary/Jenkinsfile_deploy @@ -0,0 +1,107 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'commons-docker-ncias-p2236-v' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'sandbox,dev,test,qa,stage,prod') + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('deploy'){ + environment { + SLACK_URL = "${ICDC_SLACK_URL}" + DICTIONARY_VERSION = "${params.Dictionary_Tag}" + } + steps{ + node('commons-docker-ncias-p2236-v') { + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER'), + string(credentialsId: 'newrelic_license_key', variable: 'NEWRELIC_LIC_KEY'), + file(credentialsId: 'ansible_host_file', variable: 'host_file')]) + { + sh "cp ${host_file} ${WORKSPACE}/icdc-devops/ansible/hosts" + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/deploy-icdc-data-dictionary.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + tier: "${params.Environment}" + ], + colorized: true) + } + } + } + } + } + + } + post { + always { + + sendSlackMessage() + + } + + cleanup { + + cleanWs() + + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/icdc-data-loader/Jenkinsfile b/jenkins/jobs/icdc-data-loader/Jenkinsfile new file mode 100644 index 000000000..040853ccb --- /dev/null +++ b/jenkins/jobs/icdc-data-loader/Jenkinsfile @@ -0,0 +1,255 @@ +pipeline { + agent { + node { + label 'slave-ncias-d2320-c' + } + } + parameters { + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'master', + name: 'Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/icdc-dataloader') + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'demo,ecs-dev,dev,qa,stage,prod' ) + extendedChoice( + name: 'ProjectName', + defaultValue: 'icdc', + description: 'Choose the project', + type: 'PT_SINGLE_SELECT', + value: 'icdc,ctdc,bento' ) + string(defaultValue: "", + description: 'S3 Folder to load data from', + name: 'S3Folder') + extendedChoice( + defaultValue: 'no', + name: 'WipeDB', + description: 'Choose yes to wipe DB', + quoteValue: false, + multiSelectDelimiter: ',', + value: 'no,yes', + type: 'PT_RADIO') + extendedChoice( + defaultValue: 'no', + name: 'CheatMode', + description: 'Bypass Data Validation', + quoteValue: false, + multiSelectDelimiter: ',', + value: 'no,yes', + type: 'PT_RADIO') + extendedChoice( + defaultValue: 'false', + name: 'SplitTransactions', + description: 'Choose true to the Split Transactions', + quoteValue: false, + multiSelectDelimiter: ',', + value: 'false,true', + type: 'PT_SINGLE_SELECT') + booleanParam( + defaultValue: true, + description: 'Flush the Redis cache', + name: 'Flush_Redis') + } + // options { + // ansiColor('xterm') + // } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + checkout([$class: 'GitSCM', + branches: [[name: "${params.Tag}"]], + doGenerateSubmoduleConfigurations: + false, extensions: [], submoduleCfg: [], + recursiveSubmodules: true, + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-dataloader']]]) + sh 'git submodule update --init' + dir('icdc-devops'){ + git branch: 'master', + url: 'https://github.com/CBIIT/icdc-devops.git' + } + dir('icdc-model'){ + git branch: 'master', + url: 'https://github.com/CBIIT/icdc-model-tool' + } + } + + } + stage('set-environment'){ + steps { + script { + switch("${params.Environment}") { + case "demo": + withCredentials([usernamePassword(credentialsId: 'neo4j_icdc_dev_cred', passwordVariable: 'neo4j_password', usernameVariable: 'neo4j_user')]) { + env.NEO4J_IP = "${NEO4J_DEMO_IP}" + env.S3_FOLDER = "${params.S3Folder}" + env.TIER = "${params.Environment}" + env.WIPE_DB = "${params.WipeDB}" + env.CHEAT_MODE = "${params.CheatMode}" + env.PROJECT = "${params.ProjectName}" + env.NEO4J_PASSWORD = "${neo4j_password}" + env.SPLIT = "${params.SplitTransactions}" + } + break + case "dev": + withCredentials([usernamePassword(credentialsId: 'neo4j_icdc_dev_cred', passwordVariable: 'neo4j_password', usernameVariable: 'neo4j_user')]) { + env.NEO4J_IP = "${NEO4J_ICDC_DEV_IP}" + env.S3_FOLDER = "${params.S3Folder}" + env.TIER = "${params.Environment}" + env.WIPE_DB = "${params.WipeDB}" + env.PROJECT = "${params.ProjectName}" + env.CHEAT_MODE = "${params.CheatMode}" + env.NEO4J_PASSWORD = "${neo4j_password}" + env.SPLIT = "${params.SplitTransactions}" + } + break + case "ecs-dev": + withCredentials([usernamePassword(credentialsId: 'neo4j_icdc_dev_cred', passwordVariable: 'neo4j_password', usernameVariable: 'neo4j_user')]) { + env.NEO4J_IP = "${NEO4J_ECS_DEV_IP}" + env.S3_FOLDER = "${params.S3Folder}" + env.TIER = "${params.Environment}" + env.WIPE_DB = "${params.WipeDB}" + env.PROJECT = "${params.ProjectName}" + env.CHEAT_MODE = "${params.CheatMode}" + env.NEO4J_PASSWORD = "${neo4j_password}" + env.SPLIT = "${params.SplitTransactions}" + } + break + case "qa": + withCredentials([usernamePassword(credentialsId: 'neo4j_icdc_qa_cred', passwordVariable: 'neo4j_password', usernameVariable: 'neo4j_user')]) { + env.NEO4J_IP = "${NEO4J_ICDC_QA_IP}" + env.S3_FOLDER = "${params.S3Folder}" + env.TIER = "${params.Environment}" + env.TIER = "dev" + env.WIPE_DB = "${params.WipeDB}" + env.PROJECT = "${params.ProjectName}" + env.CHEAT_MODE = "${params.CheatMode}" + env.NEO4J_PASSWORD = "${neo4j_password}" + env.SPLIT = "${params.SplitTransactions}" + } + break + case "stage": + withCredentials([usernamePassword(credentialsId: 'neo4j_icdc_stage_cred', passwordVariable: 'neo4j_password', usernameVariable: 'neo4j_user')]) { + env.NEO4J_IP = "${NEO4J_ICDC_STAGE_IP}" + env.S3_FOLDER = "${params.S3Folder}" + env.TIER = "${params.Environment}" + env.WIPE_DB = "${params.WipeDB}" + env.PROJECT = "${params.ProjectName}" + env.CHEAT_MODE = "${params.CheatMode}" + env.NEO4J_PASSWORD = "${neo4j_password}" + env.SPLIT = "${params.SplitTransactions}" + } + break + case "prod": + withCredentials([usernamePassword(credentialsId: 'neo4j_icdc_prod_cred', passwordVariable: 'neo4j_password', usernameVariable: 'neo4j_user')]) { + env.NEO4J_IP = "${NEO4J_ICDC_PROD_IP}" + env.S3_FOLDER = "${params.S3Folder}" + env.TIER = "${params.Environment}" + env.WIPE_DB = "${params.WipeDB}" + env.PROJECT = "${params.ProjectName}" + env.CHEAT_MODE = "${params.CheatMode}" + env.NEO4J_PASSWORD = "${neo4j_password}" + env.SPLIT = "${params.SplitTransactions}" + } + break + default: + println "Select valid option" + break + } + } + } + } + stage('loader-data'){ + environment { + SLACK_URL = "${SLACK_URL}" + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/icdc-data-loader.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + neo4j_ip: "${NEO4J_IP}", + ], + credentialsId: 'commonsdocker', + colorized: true) + } + } + } + + stage('clear redis cache'){ + agent { label 'commons-docker-ncias-p2236-v' } + when { + allOf { + + not { expression { params.Environment == 'stage' } } + not { expression { params.Environment == 'prod' } } + expression { params.Flush_Redis } + + } + + } + + environment { + TIER = "${params.Environment}" + } + + steps{ + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + + script { + sh label: 'database-hosts', script: '''#!/bin/bash + echo "Creating inventory file" + echo "[icdc]" > ${WORKSPACE}/icdc-devops/ansible/hosts + echo ${NEO4J_IP} >> ${WORKSPACE}/icdc-devops/ansible/hosts + + ''' + + } + + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/redis_icdc.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + credentialsId: 'commonsdocker', + colorized: true) + } + } + } + + } + post { + + cleanup { + + cleanWs() + + } + + } +} \ No newline at end of file diff --git a/jenkins/jobs/icdc-data-loader/Jenkinsfile_data_dump b/jenkins/jobs/icdc-data-loader/Jenkinsfile_data_dump new file mode 100644 index 000000000..d7ac068d5 --- /dev/null +++ b/jenkins/jobs/icdc-data-loader/Jenkinsfile_data_dump @@ -0,0 +1,160 @@ +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${ICDC_SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + +pipeline { + agent { + node { + label 'slave-ncias-d2320-c' + } + } + + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'dev,qa' ) + string(defaultValue: "", + description: 'Name of the dump file to use', + name: 'DumpFileName') + + } + + // options { + // ansiColor('xterm') + // } + + tools { + maven 'Default' + jdk 'Default' + } + + stages{ + + stage('checkout'){ + steps { + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + } + } + + stage('set-environment'){ + environment { + ICDC_SLACK_URL = "${ICDC_SLACK_URL}" + } + steps { + script { + switch("${params.Environment}") { + case "dev": + env.NEO4J_IP = "${NEO4J_ICDC_DEV_IP}" + env.DUMP_FILE = "${params.DumpFileName}" + env.TIER = "${params.Environment}" + break + case "qa": + env.NEO4J_IP = "${NEO4J_ICDC_QA_IP}" + env.DUMP_FILE = "${params.DumpFileName}" + env.TIER = "${params.Environment}" + break + default: + println "Select valid option" + break + } + } + } + } + + stage('dump data'){ + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + + script { + sh label: 'db-hosts', script: '''#!/bin/bash + echo "Creating inventory file" + echo "[icdc-neo4j]" > ${WORKSPACE}/icdc-devops/ansible/hosts + echo ${NEO4J_IP} >> ${WORKSPACE}/icdc-devops/ansible/hosts + + ''' + + } + + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/icdc-data-dump.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + credentialsId: 'commonsdocker', + colorized: true) + } + + } + } + + stage('push to s3'){ + steps{ + + script { + sh label: 'db-hosts', script: '''#!/bin/bash + echo "Creating inventory file" + echo "[loader]" > ${WORKSPACE}/icdc-devops/ansible/hosts + echo "localhost" >> ${WORKSPACE}/icdc-devops/ansible/hosts + + ''' + + } + + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/icdc-data-dump-push.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + credentialsId: 'commonsdocker', + colorized: true) + + } + } + + } + post { + always { + sendSlackMessage() + } + cleanup { + cleanWs() + } + } +} diff --git a/jenkins/jobs/icdc-data-loader/Jenkinsfile_data_dump_upper b/jenkins/jobs/icdc-data-loader/Jenkinsfile_data_dump_upper new file mode 100644 index 000000000..5033e76cb --- /dev/null +++ b/jenkins/jobs/icdc-data-loader/Jenkinsfile_data_dump_upper @@ -0,0 +1,160 @@ +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${ICDC_SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + +pipeline { + agent { + node { + label 'slave-ncias-d2320-c' + } + } + + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'stage,prod' ) + string(defaultValue: "", + description: 'Name of the dump file to use', + name: 'DumpFileName') + + } + + // options { + // ansiColor('xterm') + // } + + tools { + maven 'Default' + jdk 'Default' + } + + stages{ + + stage('checkout'){ + steps { + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + } + } + + stage('set-environment'){ + environment { + ICDC_SLACK_URL = "${ICDC_SLACK_URL}" + } + steps { + script { + switch("${params.Environment}") { + case "stage": + env.NEO4J_IP = "${NEO4J_ICDC_STAGE_IP}" + env.DUMP_FILE = "${params.DumpFileName}" + env.TIER = "${params.Environment}" + break + case "prod": + env.NEO4J_IP = "${NEO4J_ICDC_PROD_IP}" + env.DUMP_FILE = "${params.DumpFileName}" + env.TIER = "${params.Environment}" + break + default: + println "Select valid option" + break + } + } + } + } + + stage('dump data'){ + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + + script { + sh label: 'db-hosts', script: '''#!/bin/bash + echo "Creating inventory file" + echo "[icdc-neo4j]" > ${WORKSPACE}/icdc-devops/ansible/hosts + echo ${NEO4J_IP} >> ${WORKSPACE}/icdc-devops/ansible/hosts + + ''' + + } + + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/icdc-data-dump.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + credentialsId: 'commonsdocker', + colorized: true) + } + + } + } + + stage('push to s3'){ + steps{ + + script { + sh label: 'db-hosts', script: '''#!/bin/bash + echo "Creating inventory file" + echo "[loader]" > ${WORKSPACE}/icdc-devops/ansible/hosts + echo "localhost" >> ${WORKSPACE}/icdc-devops/ansible/hosts + + ''' + + } + + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/icdc-data-dump-push.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + credentialsId: 'commonsdocker', + colorized: true) + + } + } + + } + post { + always { + sendSlackMessage() + } + cleanup { + cleanWs() + } + } +} diff --git a/jenkins/jobs/icdc-data-loader/Jenkinsfile_dumpfile b/jenkins/jobs/icdc-data-loader/Jenkinsfile_dumpfile new file mode 100644 index 000000000..e83a1e6a2 --- /dev/null +++ b/jenkins/jobs/icdc-data-loader/Jenkinsfile_dumpfile @@ -0,0 +1,217 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${ICDC_SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + +pipeline { + agent { + node { + label 'slave-ncias-d2320-c' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'dev,qa' ) + string(defaultValue: "", + description: 'S3 Folder to load data from', + name: 'S3Folder') + string(defaultValue: "", + description: 'Name of the dump file to use', + name: 'DumpFileName') + booleanParam( + defaultValue: true, + description: 'Flush the Redis cache', + name: 'Flush_Redis') + + } + // options { + // ansiColor('xterm') + // } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + } + } + + stage('set-environment'){ + environment { + ICDC_SLACK_URL = "${ICDC_SLACK_URL}" + } + steps { + script { + switch("${params.Environment}") { + case "dev": + env.NEO4J_IP = "${NEO4J_ICDC_DEV_IP}" + env.S3_BUCKET = "${params.S3Folder}" + env.DUMP_FILE = "${params.DumpFileName}" + env.TIER = "${params.Environment}" + env.DB_HOSTS = "ncias-d2224-c" + env.FE_HOSTS = "nciws-d1092-c,nciws-d2001-c" + break + case "qa": + env.NEO4J_IP = "${NEO4J_ICDC_QA_IP}" + env.S3_BUCKET = "${params.S3Folder}" + env.DUMP_FILE = "${params.DumpFileName}" + env.TIER = "${params.Environment}" + env.DB_HOSTS = "ncias-q2251-c" + env.FE_HOSTS = "nciws-q2024-c,nciws-q2025-c" + break + default: + println "Select valid option" + break + } + } + } + } + + stage('get data'){ + steps{ + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/neo4j-loader-icdc-get.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + + stash includes: "**/${DUMP_FILE}", name: 'dump_file' + + } + } + + stage('load data'){ + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + + script { + sh label: 'db-hosts', script: '''#!/bin/bash + echo "Creating inventory file" + echo "[icdc-neo4j]" > ${WORKSPACE}/icdc-devops/ansible/hosts + + if [[ $(wc -l < ${DB_HOSTS} | cut -d ',' -f2 ) -ge 0 ]];then + echo ${DB_HOSTS} | cut -d ',' -f1 >> ${WORKSPACE}/icdc-devops/ansible/hosts + echo ${DB_HOSTS} | cut -d ',' -f2 >> ${WORKSPACE}/icdc-devops/ansible/hosts + else + echo ${DB_HOSTS} >> ${WORKSPACE}/icdc-devops/ansible/hosts + fi + + ''' + + } + + script { + sh label: 'ui-hosts', script: '''#!/bin/bash + echo "Creating inventory file" + echo "[icdc-hosts]" >> ${WORKSPACE}/icdc-devops/ansible/hosts + + if [[ $(wc -l < ${FE_HOSTS} | cut -d ',' -f2 ) -ge 0 ]];then + echo ${FE_HOSTS} | cut -d ',' -f1 >> ${WORKSPACE}/icdc-devops/ansible/hosts + echo ${FE_HOSTS} | cut -d ',' -f2 >> ${WORKSPACE}/icdc-devops/ansible/hosts + else + echo ${FE_HOSTS} >> ${WORKSPACE}/icdc-devops/ansible/hosts + fi + + ''' + + } + + unstash 'dump_file' + + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/neo4j-loader-icdc.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + credentialsId: 'commonsdocker', + colorized: true) + } + + } + } + + stage('clear redis cache'){ + when { + expression { params.Flush_Redis } + } + + environment { + TIER = "${params.Environment}" + } + + steps{ + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + + script { + sh label: 'database-hosts', script: '''#!/bin/bash + echo "Creating inventory file" + echo "[icdc]" > ${WORKSPACE}/icdc-devops/ansible/hosts + echo ${NEO4J_IP} >> ${WORKSPACE}/icdc-devops/ansible/hosts + + ''' + + } + + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/redis_icdc.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + credentialsId: 'commonsdocker', + colorized: true) + } + } + } + } + post { + always { + sendSlackMessage() + } + cleanup { + cleanWs() + } + } +} diff --git a/jenkins/jobs/icdc-data-loader/Jenkinsfile_dumpfile_upper b/jenkins/jobs/icdc-data-loader/Jenkinsfile_dumpfile_upper new file mode 100644 index 000000000..2864483b2 --- /dev/null +++ b/jenkins/jobs/icdc-data-loader/Jenkinsfile_dumpfile_upper @@ -0,0 +1,213 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${ICDC_SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + +pipeline { + agent { + node { + label 'slave-ncias-d2320-c' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'stage,prod' ) + string(defaultValue: "", + description: 'S3 Folder to load data from', + name: 'S3Folder') + string(defaultValue: "", + description: 'Name of the dump file to use', + name: 'DumpFileName') + booleanParam( + defaultValue: true, + description: 'Flush the Redis cache', + name: 'Flush_Redis') + + } + // options { + // ansiColor('xterm') + // } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + } + } + + stage('set-environment'){ + environment { + ICDC_SLACK_URL = "${ICDC_SLACK_URL}" + } + steps { + script { + switch("${params.Environment}") { + case "stage": + env.NEO4J_IP = "${NEO4J_ICDC_STAGE_IP}" + env.S3_BUCKET = "${params.S3Folder}" + env.DUMP_FILE = "${params.DumpFileName}" + env.TIER = "${params.Environment}" + break + case "prod": + env.NEO4J_IP = "${NEO4J_ICDC_PROD_IP}" + env.S3_BUCKET = "${params.S3Folder}" + env.DUMP_FILE = "${params.DumpFileName}" + env.TIER = "${params.Environment}" + break + default: + println "Select valid option" + break + } + } + } + } + + stage('get data'){ + steps{ + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/neo4j-loader-icdc-get.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + + stash includes: "**/${DUMP_FILE}", name: 'dump_file' + + } + } + + stage('load data'){ + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + + script { + sh label: 'db-hosts', script: '''#!/bin/bash + echo "Creating inventory file" + echo "[icdc-neo4j]" > ${WORKSPACE}/icdc-devops/ansible/hosts + + if [[ $(wc -l < ${DB_HOSTS} | cut -d ',' -f2 ) -ge 0 ]];then + echo ${DB_HOSTS} | cut -d ',' -f1 >> ${WORKSPACE}/icdc-devops/ansible/hosts + echo ${DB_HOSTS} | cut -d ',' -f2 >> ${WORKSPACE}/icdc-devops/ansible/hosts + else + echo ${DB_HOSTS} >> ${WORKSPACE}/icdc-devops/ansible/hosts + fi + + ''' + + } + + script { + sh label: 'ui-hosts', script: '''#!/bin/bash + echo "Creating inventory file" + echo "[icdc-hosts]" >> ${WORKSPACE}/icdc-devops/ansible/hosts + + if [[ $(wc -l < ${FE_HOSTS} | cut -d ',' -f2 ) -ge 0 ]];then + echo ${FE_HOSTS} | cut -d ',' -f1 >> ${WORKSPACE}/icdc-devops/ansible/hosts + echo ${FE_HOSTS} | cut -d ',' -f2 >> ${WORKSPACE}/icdc-devops/ansible/hosts + else + echo ${FE_HOSTS} >> ${WORKSPACE}/icdc-devops/ansible/hosts + fi + + ''' + + } + + unstash 'dump_file' + + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/neo4j-loader-icdc.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + credentialsId: 'commonsdocker', + colorized: true) + } + + } + } + + stage('clear redis cache'){ + when { + expression { params.Flush_Redis } + } + + environment { + TIER = "${params.Environment}" + } + + steps{ + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + + script { + sh label: 'database-hosts', script: '''#!/bin/bash + echo "Creating inventory file" + echo "[icdc]" > ${WORKSPACE}/icdc-devops/ansible/hosts + echo ${NEO4J_IP} >> ${WORKSPACE}/icdc-devops/ansible/hosts + + ''' + + } + + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/redis_icdc.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + credentialsId: 'commonsdocker', + colorized: true) + } + } + } + } + post { + always { + sendSlackMessage() + } + cleanup { + cleanWs() + } + } +} diff --git a/jenkins/jobs/icdc-demo/Jenkinsfile b/jenkins/jobs/icdc-demo/Jenkinsfile new file mode 100644 index 000000000..d5643dd95 --- /dev/null +++ b/jenkins/jobs/icdc-demo/Jenkinsfile @@ -0,0 +1,182 @@ +//load shared library for slack notification +//@Library('shared-library')_ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "ICDC Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "icdc devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + +pipeline { + + agent { + node { + label 'icdc-demo-docker-maven' + } + } + parameters { + //listGitBranches(branchFilter: '.*', credentialsId: '', defaultValue: 'master', name: 'Tag', quickFilterEnabled: false, remoteURL: 'https://github.com/CBIIT/icdc-codebase', selectedValue: 'DEFAULT', sortMode: 'NONE', tagFilter: '.*', type: 'PT_BRANCH_TAG') + // gitParameter(branchFilter: '.*', credentialsId: '', defaultValue: 'master', name: 'Tag', quickFilterEnabled: false, remoteURL: 'https://github.com/CBIIT/icdc-codebase', selectedValue: 'DEFAULT', sortMode: 'NONE', tagFilter: '.*', type: 'PT_BRANCH_TAG') + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'Develop', + name: 'Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'NONE', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/icdc-codebase') + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'demo,dev,qa,stage,prod' ) + } + // triggers { + // pollSCM('H/15 * * * 1-5') + // } + options { + timestamps() + } + tools { + maven 'Default' + jdk 'Default' + } + stages { + stage('checkout'){ + steps { + checkout([$class: 'GitSCM', + branches: [[name: "${params.Tag}"]], + doGenerateSubmoduleConfigurations: + false, extensions: [], submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-codebase']]]) + + dir('icdc-devops'){ + git branch: 'master', + url: 'https://github.com/CBIIT/icdc-devops.git' + } + } + } + stage('Set Environment'){ + environment { + DEMO_URL = "caninecommons-demo.cancer.gov" + VERSION = "${params.Tag}" + } + steps { + script { + switch("${params.Environment}") { + case "demo": + withCredentials([file(credentialsId: 'demo_env_file', variable: 'secret_file')]) { + sh "cp ${secret_file} ${WORKSPACE}/src/main/frontend/.env" + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's,tag_version,${VERSION},g' .env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${DEMO_URL}/,g' application_example.properties" + env.TOMCAT01_IP = "${TOMCAT01_DEMO_IP}" + env.NEO4J_IP = "${NEO4J_DEMO_IP}" + } + break + default: + withCredentials([file(credentialsId: 'demo_env_file', variable: 'secret_file')]) { + sh "cp ${secret_file} ${WORKSPACE}/src/main/frontend/.env" + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's,tag_version,${VERSION},g' .env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${DEMO_URL}/,g' application_example.properties" + env.TOMCAT01_IP = "${TOMCAT01_DEMO_IP}" + env.NEO4J_IP = "${NEO4J_DEMO_IP}" + } + break + } + } + } + } + stage('Build') { + environment { + SLACK_URL = "${SLACK_URL}" + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([string(credentialsId: 'authorization_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'neo4j_user', passwordVariable: 'NEO4J_PASSWORD', usernameVariable: 'NEO4J_USER'), + string(credentialsId: 'fence-id', variable: 'FENCE_ID'), + string(credentialsId: 'fence-credential', variable: 'FENCE_CREDENTIAL')]) { + + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/deploy-icdc.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + } + stage('Deploy') { + when { + expression { + currentBuild.result == null || currentBuild.result == 'SUCCESS' + } + } + steps { + ansiColor('xterm') { + withCredentials([sshUserPrivateKey(credentialsId: 'commonsdocker', keyFileVariable: 'deployer_key', passphraseVariable: '', usernameVariable: 'deployer')]) { +sh label: '', script: ''' + +for server in $TOMCAT01_IP; +do +pushd target +scp -i $deployer_key -o StrictHostKeyChecking=no ROOT.war $deployer@${server}: +popd +ssh -i $deployer_key -T -o StrictHostKeyChecking=no $deployer@${server} << EOF +sudo docker rm -f icdc_demo +cd /local/content/docker +sudo docker pull cbiitssrepo/tomcat9 +sudo docker-compose up -d & +wait %1 +sleep 20 +docker cp ~/ROOT.war icdc_demo:/usr/local/tomcat/webapps +rm -rf ~/ROOT.war +EOF +done''' + } + } + } + } + // stage('schema'){ + // environment { + // NEO4J_IP = "${NEO4J_IP}" + // } + // steps { + // withCredentials([string(credentialsId: 'authorization_bearer', variable: 'BEARER')]){ + // sh "curl -X POST http://${NEO4J_IP}:7474/graphql/idl/ -H 'Accept: application/json' -H 'Authorization: ${BEARER}' -d @src/main/resources/graphql/icdc.graphql" + // } + // } + // } + } + post { + always { + + sendSlackMessage() + } + } +} diff --git a/jenkins/jobs/icdc-demo/jenkinsoldfile b/jenkins/jobs/icdc-demo/jenkinsoldfile new file mode 100644 index 000000000..c92d023be --- /dev/null +++ b/jenkins/jobs/icdc-demo/jenkinsoldfile @@ -0,0 +1,175 @@ +//load shared library for slack notification +//@Library('shared-library')_ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "CTDC Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "icdc-demo devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + +pipeline { + + agent { + node { + label 'icdc-demo-docker-maven' + } + } + parameters { + //listGitBranches(branchFilter: '.*', credentialsId: '', defaultValue: 'master', name: 'Tag', quickFilterEnabled: false, remoteURL: 'https://github.com/CBIIT/icdc-codebase', selectedValue: 'DEFAULT', sortMode: 'NONE', tagFilter: '.*', type: 'PT_BRANCH_TAG') + // gitParameter(branchFilter: '.*', credentialsId: '', defaultValue: 'master', name: 'Tag', quickFilterEnabled: false, remoteURL: 'https://github.com/CBIIT/icdc-codebase', selectedValue: 'DEFAULT', sortMode: 'NONE', tagFilter: '.*', type: 'PT_BRANCH_TAG') + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'master', + name: 'Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/icdc-codebase') + extendedChoice( + name: 'Environment', + defaultValue: 'demo', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'demo,dev,qa,stage,prod' ) + } + // triggers { + // pollSCM('H/15 * * * 1-5') + // } + options { + timestamps() + } + tools { + maven 'Default' + jdk 'Default' + } + stages { + stage('Checkout') { + steps { + checkout([$class: 'GitSCM', branches: [[name: "${params.Tag}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[url: 'https://github.com/CBIIT/icdc-codebase']]]) + } + } + stage('Set Environment'){ + environment { + DEMO_URL = "caninecommons-demo.cancer.gov" + VERSION = "${params.Tag}" + } + steps { + script { + switch("${params.Environment}") { + case "demo": + withCredentials([file(credentialsId: 'demo_env_file', variable: 'secret_file')]) { + sh "cp ${secret_file} ${WORKSPACE}/src/main/frontend/.env" + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's,tag_version,${VERSION},g' .env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${DEMO_URL}/,g' application_example.properties" + env.TOMCAT01_IP = "${TOMCAT01_DEMO_IP}" + env.NEO4J_IP = "${NEO4J_DEMO_IP}" + } + break + default: + withCredentials([file(credentialsId: 'demo_env_file', variable: 'secret_file')]) { + sh "cp ${secret_file} ${WORKSPACE}/src/main/frontend/.env" + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's,tag_version,${VERSION},g' .env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${DEMO_URL}/,g' application_example.properties" + env.TOMCAT01_IP = "${TOMCAT01_DEMO_IP}" + env.NEO4J_IP = "${NEO4J_DEMO_IP}" + } + break + } + } + } + } + stage('Build') { + environment { + SLACK_URL = "${SLACK_URL}" + } + steps { + withCredentials([string(credentialsId: 'authorization_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'neo4j_user', passwordVariable: 'NEO4J_PASSWORD', usernameVariable: 'NEO4J_USER'), + string(credentialsId: 'fence-id', variable: 'FENCE_ID'), + string(credentialsId: 'fence-credential', variable: 'FENCE_CREDENTIAL')]) { + sh "cd ${WORKSPACE}/src/main/resources/ && mv application_example.properties application.properties" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's/localhost/${NEO4J_IP}/g' application.properties" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's/Basic 123456/${BEARER}/' application.properties" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's/password=${NEO4J_IP}/password=${NEO4J_PASSWORD}/' application.properties" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's/neo4j_username/${NEO4J_USER}/g' application.properties" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's/accessid/${FENCE_ID}/g' application.properties" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's/accesskey/${FENCE_CREDENTIAL}/g' application.properties" + + sh "mvn package -DskipTests" + + sh "mv target/ICDC-0.0.1.war target/ROOT.war" + } + } + } + stage('Deploy') { + when { + expression { + currentBuild.result == null || currentBuild.result == 'SUCCESS' + } + } + steps { + ansiColor('xterm') { + withCredentials([sshUserPrivateKey(credentialsId: 'commonsdocker', keyFileVariable: 'deployer_key', passphraseVariable: '', usernameVariable: 'deployer')]) { +sh label: '', script: ''' + +for server in $TOMCAT01_IP; +do +pushd target +scp -i $deployer_key -o StrictHostKeyChecking=no ROOT.war $deployer@${server}: +popd +ssh -i $deployer_key -T -o StrictHostKeyChecking=no $deployer@${server} << EOF +sudo docker rm -f icdc_demo +cd /local/content/docker +sudo docker pull cbiitssrepo/tomcat9 +sudo docker-compose up -d & +wait %1 +sleep 20 +docker cp ~/ROOT.war icdc_demo:/usr/local/tomcat/webapps +rm -rf ~/ROOT.war +EOF +done''' + } + } + } + } + stage('schema'){ + environment { + NEO4J_IP = "${NEO4J_IP}" + } + steps { + withCredentials([string(credentialsId: 'authorization_bearer', variable: 'BEARER')]){ + sh "curl -X POST http://${NEO4J_IP}:7474/graphql/idl/ -H 'Accept: application/json' -H 'Authorization: ${BEARER}' -d @src/main/resources/graphql/icdc.graphql" + } + } + } + } + post { + always { + sendSlackMessage() + } + } +} diff --git a/jenkins/jobs/icdc-demo/sumo_migration_steps b/jenkins/jobs/icdc-demo/sumo_migration_steps new file mode 100644 index 000000000..b2afe2992 --- /dev/null +++ b/jenkins/jobs/icdc-demo/sumo_migration_steps @@ -0,0 +1,7 @@ +systemctl stop collector +modify user.properties +remove creds directory +systemctl start collector +suErEZJ2nz4NoD +twkLYfsrGWHrD2TLcmuSZBVnM0RvCzalDPoJKwSFtdJaZ1xQC3Lo3f6zOjaI74uO +https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Docker_Apps/Docker/01-Collect-Logs-and-Metrics-from-Docker#Step_4:_Add_a_Docker_stats_source
 \ No newline at end of file diff --git a/jenkins/jobs/icdc-file-downloader/Jenkinsfile b/jenkins/jobs/icdc-file-downloader/Jenkinsfile new file mode 100644 index 000000000..61ed39e2a --- /dev/null +++ b/jenkins/jobs/icdc-file-downloader/Jenkinsfile @@ -0,0 +1,190 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${FILES_SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'commons-docker-ncias-p2236-v' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'dev,qa,stage,prod' ) + + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'main', + name: 'Downloader_Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/bento-files') + + } + + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + checkout([$class: 'GitSCM', + branches: [[name: "${params.Downloader_Tag}"]], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'RelativeTargetDirectory', + relativeTargetDir: 'bento-files']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/bento-files']]]) + + checkout( poll: false, + changelog:false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']]]) + + } + + } + + stage('build'){ + environment { + DOWNLOADER_VERSION = "${params.Downloader_Tag}-${BUILD_NUMBER}" + TIER = "${params.Environment}" + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([ + usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER'), + file(credentialsId: 'ansible_host_file', variable: 'host_file'), + string(credentialsId: 'docker_host', variable: 'DOCKER_HOST'), + string(credentialsId: 'tls_hostname', variable: 'TLS_HOSTNAME')]) + { + sh "cp ${host_file} ${WORKSPACE}/icdc-devops/ansible/hosts" + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/build-icdc-file-downloader.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + tier: "${params.Environment}" + ], + colorized: true) + } + } + } + } + + stage('deploy'){ + environment { + DOWNLOADER_VERSION = "${params.Downloader_Tag}-${BUILD_NUMBER}" + FILES_SLACK_URL = "${FILES_SLACK_URL}" + TIER = "${params.Environment}" + } + steps{ + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([ + string(credentialsId: 'newrelic_license_key', variable: 'NEWRELIC_LIC_KEY'), + file(credentialsId: 'cloudone_cloudfront_private_key', variable: 'cloudfront_key_file'), + string(credentialsId: "${params.Environment}_cloudfront_key_group_id", variable: 'CLOUDFRONT_KEY_GROUP_ID'), + string(credentialsId: "${params.Environment}_cloudfront_domain_name", variable: 'CLOUDFRONT_DOMAIN_NAME'), + usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER') + ]) + { + script { + env.CLOUDFRONT_PRIVATE_KEY = readFile "${cloudfront_key_file}" + } + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/deploy-icdc-file-downloader.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + //indexd_url: "${params.IndexD_Url}", + tier: "${params.Environment}", + cloudfront_private_key: "${CLOUDFRONT_PRIVATE_KEY}", + cloudfront_domain_name: "${CLOUDFRONT_DOMAIN_NAME}", + cloudfront_key_group_id: "${CLOUDFRONT_KEY_GROUP_ID}" + ], + colorized: true) + } + } + } + } + } + post { + + always { + + sendSlackMessage() + + } + + success { + + script { + withCredentials([string(credentialsId: 'git_credential_token', variable: 'git_token')]) { + sh label: 'GIT-Tag Downloader', script: '''#!/bin/bash + cd ${WORKSPACE}/bento-files + gitURL=$(git config remote.origin.url | sed 's|^.*//||') + echo "Applying tag $Downloader_Tag to URL: $gitURL" + git config user.email "jenkins@bento-tools.org" + git config user.name "Bento Jenkins" + git tag --no-sign -a ${Downloader_Tag}-icdc-${BUILD_NUMBER} -m "Jenkins tag: ${Downloader_Tag}-${BUILD_NUMBER}" + git push https://${git_token}:x-oauth-basic@${gitURL} --tags + + ''' + + } + } + + } + + cleanup { + + cleanWs() + + } + + } +} \ No newline at end of file diff --git a/jenkins/jobs/icdc-file-downloader/Jenkinsfile_deploy b/jenkins/jobs/icdc-file-downloader/Jenkinsfile_deploy new file mode 100644 index 000000000..8b261a101 --- /dev/null +++ b/jenkins/jobs/icdc-file-downloader/Jenkinsfile_deploy @@ -0,0 +1,127 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${FILES_SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'commons-docker-ncias-p2236-v' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'sandbox,dev,test,qa,stage,prod') + //string( + //defaultValue: "https://nci-crdc.datacommons.io/user/data/download/dg.4DFC", + //description: 'Provide indexd url', + //name: 'IndexD_Url') + + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('deploy'){ + environment { + FILES_SLACK_URL = "${FILES_SLACK_URL}" + TIER = "${params.Environment}" + DOWNLOADER_VERSION = "${params.Downloader_Tag}" + } + steps{ + node('commons-docker-ncias-p2236-v') { + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER'), + string(credentialsId: 'newrelic_license_key', variable: 'NEWRELIC_LIC_KEY'), + string(credentialsId: "${params.Environment}_cloudfront_domain_name", variable: 'CLOUDFRONT_DOMAIN_NAME'), + string(credentialsId: "${params.Environment}_cloudfront_key_group_id", variable: 'CLOUDFRONT_KEY_GROUP_ID'), + string(credentialsId: 'sumo_access_id', variable: 'SUMO_ACCESS_ID'), + string(credentialsId: 'sumo_access_key', variable: 'SUMO_ACCESS_KEY'), + string(credentialsId: 'bento_syslog_host', variable: 'SYSLOG_HOST'), + string(credentialsId: "${params.Environment}_cloudfront_key_group_id", variable: 'CLOUDFRONT_KEY_GROUP_ID'), + file(credentialsId: 'cloudone_cloudfront_private_key', variable: 'cloudfront_key_file'), + file(credentialsId: 'ansible_host_file', variable: 'host_file')]) + { + sh "cp ${host_file} ${WORKSPACE}/icdc-devops/ansible/hosts" + script { + env.CLOUDFRONT_PRIVATE_KEY = readFile "${cloudfront_key_file}" + } + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/deploy-icdc-file-downloader.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + tier: "${params.Environment}", + //indexd_url: "${params.IndexD_Url}", + cloudfront_private_key: "${CLOUDFRONT_PRIVATE_KEY}", + cloudfront_domain_name: "${CLOUDFRONT_DOMAIN_NAME}", + cloudfront_key_group_id: "${CLOUDFRONT_KEY_GROUP_ID}" + ], + colorized: true) + } + } + } + } + } + + } + post { + always { + + sendSlackMessage() + + } + + cleanup { + + cleanWs() + + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/icdc-pipeline/Jenkinsfile b/jenkins/jobs/icdc-pipeline/Jenkinsfile new file mode 100644 index 000000000..05f489091 --- /dev/null +++ b/jenkins/jobs/icdc-pipeline/Jenkinsfile @@ -0,0 +1,228 @@ +//load shared library for slack notification +//@Library('shared-library')_ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "ICDC Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "icdc devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${ICDC_SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + +pipeline { + + agent { + node { + label 'docker-maven' + } + } + parameters { + //listGitBranches(branchFilter: '.*', credentialsId: '', defaultValue: 'master', name: 'Tag', quickFilterEnabled: false, remoteURL: 'https://github.com/CBIIT/icdc-codebase', selectedValue: 'DEFAULT', sortMode: 'NONE', tagFilter: '.*', type: 'PT_BRANCH_TAG') + // gitParameter(branchFilter: '.*', credentialsId: '', defaultValue: 'master', name: 'Tag', quickFilterEnabled: false, remoteURL: 'https://github.com/CBIIT/icdc-codebase', selectedValue: 'DEFAULT', sortMode: 'NONE', tagFilter: '.*', type: 'PT_BRANCH_TAG') + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'Develop', + name: 'Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'NONE', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/icdc-codebase') + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'sandbox,dev,qa,stage,prod' ) + } + // triggers { + // pollSCM('H/15 * * * 1-5') + // } + options { + timestamps() + } + tools { + maven 'Default' + jdk 'Default' + } + stages { + stage('checkout'){ + steps { + checkout([$class: 'GitSCM', + branches: [[name: "${params.Tag}"]], + doGenerateSubmoduleConfigurations: + false, extensions: [], submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-codebase']]]) + + dir('icdc-devops'){ + git branch: 'master', + url: 'https://github.com/CBIIT/icdc-devops.git' + } + } + } + stage('Set Environment'){ + environment { + SANDBOX_URL = "k9dc.essential-dev.com" + DEV_URL = "caninecommons-dev.cancer.gov" + QA_URL = "caninecommons-qa.cancer.gov" + STAGE_URL = "caninecommons-stage.cancer.gov" + PROD_URL = "caninecommons.cancer.gov" + VERSION = "${params.Tag}" + } + steps { + script { + switch("${params.Environment}") { + case "sandbox": + withCredentials([file(credentialsId: 'sandbox_env_file', variable: 'secret_file')]) { + sh "cp ${secret_file} ${WORKSPACE}/src/main/frontend/.env" + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's,tag_version,${VERSION},g' .env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${SANDBOX_URL}/,g' application_example.properties" + env.TOMCAT01_IP = "${TOMCAT01_IP}" + env.TOMCAT02_IP = "${TOMCAT02_IP}" + env.NEO4J_IP = "${NEO4J_IP}" + } + break + case "dev": + withCredentials([file(credentialsId: 'dev_env_file', variable: 'secret_file')]) { + sh "cp ${secret_file} ${WORKSPACE}/src/main/frontend/.env" + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's,tag_version,${VERSION},g' .env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${DEV_URL}/,g' application_example.properties" + env.TOMCAT01_IP = "${TOMCAT01_DEV_IP}" + env.TOMCAT02_IP = "${TOMCAT02_DEV_IP}" + env.NEO4J_IP = "${NEO4J_DEV_IP}" + } + break + case "qa": + withCredentials([file(credentialsId: 'qa_env_file', variable: 'secret_file')]) { + sh "cp ${secret_file} ${WORKSPACE}/src/main/frontend/.env" + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's,tag_version,${VERSION},g' .env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${QA_URL}/,g' application_example.properties" + env.TOMCAT01_IP = "${TOMCAT01_QA_IP}" + env.TOMCAT02_IP = "${TOMCAT02_QA_IP}" + env.NEO4J_IP = "${NEO4J_QA_IP}" + } + break + case "stage": + withCredentials([file(credentialsId: 'stage_env_file', variable: 'secret_file')]) { + sh "cp ${secret_file} ${WORKSPACE}/src/main/frontend/.env" + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's,tag_version,${VERSION},g' .env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${STAGE_URL}/,g' application_example.properties" + env.TOMCAT01_IP = "${TOMCAT01_STAGE_IP}" + env.TOMCAT02_IP = "${TOMCAT02_STAGE_IP}" + env.NEO4J_IP = "${NEO4J_STAGE_IP}" + } + break + case "prod": + withCredentials([file(credentialsId: 'prod_env_file', variable: 'secret_file')]) { + sh "cp ${secret_file} ${WORKSPACE}/src/main/frontend/.env" + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's,tag_version,${VERSION},g' .env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${PROD_URL}/,g' application_example.properties" + env.TOMCAT01_IP = "${TOMCAT01_PROD_IP}" + env.TOMCAT02_IP = "${TOMCAT02_PROD_IP}" + env.NEO4J_IP = "${NEO4J_PROD_IP}" + } + break + default: + withCredentials([file(credentialsId: 'dev_env_file', variable: 'secret_file')]) { + sh "cp ${secret_file} ${WORKSPACE}/src/main/frontend/.env" + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's,tag_version,${VERSION},g' .env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${DEV_URL}/,g' application_example.properties" + env.TOMCAT01_IP = "${TOMCAT01_DEV_IP}" + env.TOMCAT02_IP = "${TOMCAT02_DEV_IP}" + env.NEO4J_IP = "${NEO4J_DEV_IP}" + } + break + } + } + } + } + stage('Build') { + environment { + ICDC_SLACK_URL = "${ICDC_SLACK_URL}" + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([string(credentialsId: 'authorization_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'neo4j_user', passwordVariable: 'NEO4J_PASSWORD', usernameVariable: 'NEO4J_USER'), + string(credentialsId: 'fence-id', variable: 'FENCE_ID'), + string(credentialsId: 'fence-credential', variable: 'FENCE_CREDENTIAL')]) { + + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/deploy-icdc.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + } + stage('Deploy') { + when { + expression { + currentBuild.result == null || currentBuild.result == 'SUCCESS' + } + } + steps { + ansiColor('xterm') { + withCredentials([sshUserPrivateKey(credentialsId: 'commonsdocker', keyFileVariable: 'deployer_key', passphraseVariable: '', usernameVariable: 'deployer')]) { +sh label: '', script: ''' + +for server in $TOMCAT02_IP $TOMCAT01_IP; +do +pushd target +scp -i $deployer_key -o StrictHostKeyChecking=no ROOT.war $deployer@${server}: +popd +ssh -i $deployer_key -T -o StrictHostKeyChecking=no $deployer@${server} << EOF +sudo docker rm -f k9dc +cd /local/content/docker +sudo docker pull cbiitssrepo/bento-backend:release +sudo docker-compose up -d & +wait %1 +sleep 20 +docker cp -a ~/ROOT.war k9dc:/usr/local/tomcat/webapps +rm -rf ~/ROOT.war +EOF +done''' + } + } + } + } + stage('schema'){ + environment { + NEO4J_IP = "${NEO4J_IP}" + } + steps { + withCredentials([string(credentialsId: 'authorization_bearer', variable: 'BEARER')]){ + sh "curl -X POST http://${NEO4J_IP}:7474/graphql/idl/ -H 'Accept: application/json' -H 'Authorization: ${BEARER}' -d @src/main/resources/graphql/icdc.graphql" + } + } + } + } + post { + always { + + sendSlackMessage() + } + } +} diff --git a/jenkins/jobs/icdc-pipeline/pipeline.groovy b/jenkins/jobs/icdc-pipeline/pipeline.groovy new file mode 100644 index 000000000..9e5cd7b89 --- /dev/null +++ b/jenkins/jobs/icdc-pipeline/pipeline.groovy @@ -0,0 +1,18 @@ +pipelineJob('icdc/k9dc') { + + def repo = 'https://github.com/vdonkor/ctn.git' + description("ICDC pipeline Job") + + definition { + cpsScm { + scm { + git { + remote { url(repo) } + branches('master', '**/feature*') + scriptPath('jobs/icdc/Jenkinsfile') + extensions { } // required to avoid tagging + } + } + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/icdc/Jenkinsfile b/jenkins/jobs/icdc/Jenkinsfile new file mode 100644 index 000000000..e45e03c7b --- /dev/null +++ b/jenkins/jobs/icdc/Jenkinsfile @@ -0,0 +1,243 @@ + //load shared library for slack notification +//@Library('shared-library')_ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "ICDC Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "icdc devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + +pipeline { + + agent { + node { + label 'icdc_maven' + } + } + parameters { + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'master', + name: 'Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/icdc-codebase') + extendedChoice( + name: 'Environment', + defaultValue: 'sandbox', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'sandbox,dev,QA,stage,prod' ) + + } + triggers { + pollSCM('H/15 * * * 1-5') + // cron('5 * * * 1-5') + } + options { + timestamps() + } + tools { + maven 'maven-3.6.1' + jdk 'jdk11' + } + stages { + stage('checkout'){ + steps { + checkout([$class: 'GitSCM', + branches: [[name: "${params.Tag}"]], + doGenerateSubmoduleConfigurations: + false, extensions: [], submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-codebase']]]) + + dir('icdc-devops'){ + git branch: 'master', + url: 'https://github.com/CBIIT/icdc-devops.git' + } + } + } + stage('Set Environment'){ + environment { + SANDBOX_URL = "k9dc.essential-dev.com" + DEV_URL = "caninecommons-dev.cancer.gov" + QA_URL = "caninecommons-qa.cancer.gov" + STAGE_URL = "caninecommons-stage.cancer.gov" + PROD_URL = "caninecommons.cancer.gov" + VERSION = "${params.Tag}" + + } + steps { + script { + switch("${params.Environment}") { + case "sandbox": + withCredentials([file(credentialsId: 'sandbox_env_file', variable: 'secret_file')]) { + sh "cp ${secret_file} ${WORKSPACE}/src/main/frontend/.env" + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's,tag_version,${VERSION},g' .env" + sh "cat ${WORKSPACE}/src/main/frontend/.env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${SANDBOX_URL}/,g' application_example.properties" + } + break + case "dev": + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's/caninecommons-dev.cancer.gov/${ DEV_URL}/g' .env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${DEV_URL}/,g' application_example.properties" + break + case "qa": + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's/caninecommons-dev.cancer.gov/${ QA_URL}/g' .env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${QA_URL}/,g' application_example.properties" + break + case "stage": + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's/caninecommons-dev.cancer.gov/${ STAGE_URL}/g' .env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${STAGE_URL}/,g' application_example.properties" + break + case "prod": + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's/caninecommons-dev.cancer.gov/${PROD_URL}/g' .env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${PROD_URL}/,g' application_example.properties" + break + default: + println "Select valid option" + break + } + } + } + } + stage('Build') { + environment { + TOMCAT01_IP = "${TOMCAT01_IP}" + TOMCAT02_IP = "${TOMCAT02_IP}" + SLACK_URL = "${SLACK_URL}" + NEO4J_IP = "${NEO4J_IP}" + NODE_OPTIONS = "--max-old-space-size=8000" + + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([string(credentialsId: 'authorization_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'neo4j_user', passwordVariable: 'NEO4J_PASSWORD', usernameVariable: 'NEO4J_USER'), + string(credentialsId: 'fence-id', variable: 'FENCE_ID'), + string(credentialsId: 'fence-credential', variable: 'FENCE_CREDENTIAL')]) { + + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/deploy-icdc.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + } + stage('Deploy') { + when { + expression { + currentBuild.result == null || currentBuild.result == 'SUCCESS' + } + } + steps { + ansiColor('xterm') { + withCredentials([sshUserPrivateKey(credentialsId: 'deployer_ssh_key', keyFileVariable: 'deployer_key', passphraseVariable: '', usernameVariable: 'centos')]) { +sh label: '', script: ''' + +for server in $TOMCAT02_IP $TOMCAT01_IP; +do +pushd target +scp -i $deployer_key -o StrictHostKeyChecking=no ROOT.war $centos@${server}:/tmp +popd +ssh -i $deployer_key -T -o StrictHostKeyChecking=no $centos@${server} << EOF +sudo docker rm -f k9dc +cd /local/content/docker +sudo docker pull cbiitssrepo/tomcat9 +sudo docker-compose up -d & +wait %1 +sleep 20 +sudo docker cp /tmp/ROOT.war k9dc:/usr/local/tomcat/webapps +EOF +done''' + } + } + } + } + stage('schema'){ + environment { + NEO4J_IP = "${NEO4J_IP}" + } + steps { + withCredentials([string(credentialsId: 'authorization_bearer', variable: 'BEARER')]){ + sh "curl -X POST http://${NEO4J_IP}:7474/graphql/idl/ -H 'Accept: application/json' -H 'Authorization: ${BEARER}' -d @src/main/resources/graphql/icdc.graphql" + } + } + } + } + post { + always { + + sendSlackMessage() + } + success { + + script { + withCredentials([usernamePassword(credentialsId: 'git_credential', passwordVariable: 'git_password', usernameVariable: 'git_user')]) { + sh label: 'GIT-Tag Backend', script: '''#!/bin/bash + + gitURL=$(git config remote.origin.url | sed 's|^.*//||') + echo "Applying tag $Backend_Tag to URL: $gitURL" + git config user.email "jenkins@bento-tools.org" + git config user.name "Bento Jenkins" + git tag --no-sign -a ${Backend_Tag}-icdc-${BUILD_NUMBER} -m "Jenkins tag: ${Backend_Tag}-${BUILD_NUMBER}" + git push https://${git_user}:${git_password}@${gitURL} --tags + + ''' + + } + } + + script { + withCredentials([usernamePassword(credentialsId: 'git_credential', passwordVariable: 'git_password', usernameVariable: 'git_user')]) { + sh label: 'GIT-Tag Frontend', script: '''#!/bin/bash + + cd ${WORKSPACE}/bento-frontend + gitURL=$(git config remote.origin.url | sed 's|^.*//||') + echo "Applying tag $Frontend_Tag to URL: $gitURL" + git config user.email "jenkins@bento-tools.org" + git config user.name "Bento Jenkins" + git tag --no-sign -a ${Frontend_Tag}-${BUILD_NUMBER} -m "Jenkins tag: ${Frontend_Tag}-${BUILD_NUMBER}" + git push https://${git_user}:${git_password}@${gitURL} --tags + + ''' + + } + } + + } + cleanup { + + cleanWs() + + } + + + } +} \ No newline at end of file diff --git a/jenkins/jobs/icdc/pipeline.groovy b/jenkins/jobs/icdc/pipeline.groovy new file mode 100644 index 000000000..9e5cd7b89 --- /dev/null +++ b/jenkins/jobs/icdc/pipeline.groovy @@ -0,0 +1,18 @@ +pipelineJob('icdc/k9dc') { + + def repo = 'https://github.com/vdonkor/ctn.git' + description("ICDC pipeline Job") + + definition { + cpsScm { + scm { + git { + remote { url(repo) } + branches('master', '**/feature*') + scriptPath('jobs/icdc/Jenkinsfile') + extensions { } // required to avoid tagging + } + } + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/ins/Jenkinsfile b/jenkins/jobs/ins/Jenkinsfile new file mode 100644 index 000000000..e3d3949ac --- /dev/null +++ b/jenkins/jobs/ins/Jenkinsfile @@ -0,0 +1,299 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'cicd_microservice' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'dev' ) + extendedChoice( + name: 'Region', + defaultValue: 'us-east-1', + description: 'Choose aws region to build', + type: 'PT_SINGLE_SELECT', + value: 'us-east-1' ) + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'main', + name: 'Backend_Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: ' https://github.com/CBIIT/INS-WebService') + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'main', + name: 'Frontend_Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: ' https://github.com/CBIIT/INS-WebPortal') + string( + defaultValue: 'ins', + description: 'Project Name', + name: 'ProjectName') + booleanParam( + defaultValue: true, + description: 'Use the Redis cache', + name: 'Use_Redis') + booleanParam( + defaultValue: false, + description: 'Flush the Redis cache', + name: 'Flush_Redis') + booleanParam( + defaultValue: true, + description: 'Enable the Redis filter', + name: 'Enable_Redis_Filter') + string( + defaultValue: 'redis-schema.graphql', + description: 'The file to use when loading redis schema', + name: 'Redis_Schema_File') + string( + defaultValue: 'redis-filter-config.bento.yaml', + description: 'The file to use when loading redis queries', + name: 'Redis_Queries_File') + string( + defaultValue: 'test-queries.1k.bento.yaml', + description: 'The file to use when loading test queries', + name: 'Test_Queries_File') + string( + defaultValue: '', + description: 'The commit id to checkout for the frontend', + name: 'fe_commit_id') + string( + defaultValue: '', + description: 'The commit id to checkout for the backend', + name: 'be_commit_id') + + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Backend_Tag}"]], + doGenerateSubmoduleConfigurations: + false, extensions: [], submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/INS-WebService']]]) + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Frontend_Tag}"]], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'RelativeTargetDirectory', + relativeTargetDir: 'ins-frontend']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/INS-WebPortal']]]) + + checkout( poll: false, + changelog:false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']]]) + + } + + } + stage('Set Environment'){ + environment { + SLACK_URL = "${SLACK_URL}" + VERSION = "${params.Frontend_Tag}" + } + steps { + script { + switch("${params.Environment}") { + case "dev": + withCredentials([usernamePassword(credentialsId: 'ins_dev_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER'), + string(credentialsId: 'ins_dev_bearer', variable: 'BEARER')]) { + env.TIER = "dev" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + env.BEARER = "${BEARER}" + + } + break + default: + withCredentials([usernamePassword(credentialsId: 'ins_dev_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.TIER = "dev" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + env.BEARER = "${BEARER}" + + } + break + } + } + } + } + + stage('build'){ + environment { + VERSION = "${params.Frontend_Tag}" + FE_VERSION = "${params.Frontend_Tag}" + BE_VERSION = "${params.Backend_Tag}" + } + steps { + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER')]){ + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/bento-build.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + project: 'ins', + redis_schema_file: "${params.Redis_Schema_File}", + redis_init_queries_file: "${params.Redis_Queries_File}", + test_queries_file: "${params.Test_Queries_File}", + frontend_commit_id: "${params.fe_commit_id}", + backend_commit_id: "${params.be_commit_id}", + ], + colorized: true) + } + } + } + } + + stage('clear redis cache'){ + agent { label 'data-loader' } + when { + expression { params.Flush_Redis } + } + + environment { + TIER = "${params.Environment}" + } + + steps{ + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/redis.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + + stage('deploy'){ + environment { + VERSION = "${params.Frontend_Tag}" + FE_VERSION = "${params.Frontend_Tag}-${BUILD_NUMBER}" + BE_VERSION = "${params.Backend_Tag}-${BUILD_NUMBER}" + } + steps{ + node('cicd_microservice') { + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/bento-deploy.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + project: 'ins', + enable_redis: "${params.Use_Redis}", + enable_redis_filter: "${params.Enable_Redis_Filter}", + ], + colorized: true) + } + } + } + } + + + + } + post { + + always { + + sendSlackMessage() + + } + + + cleanup { + + cleanWs() + + } + + } +} \ No newline at end of file diff --git a/jenkins/jobs/ins/Jenkinsfile_deploy b/jenkins/jobs/ins/Jenkinsfile_deploy new file mode 100644 index 000000000..d34cc0369 --- /dev/null +++ b/jenkins/jobs/ins/Jenkinsfile_deploy @@ -0,0 +1,230 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'cicd_microservice' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'sandbox,dev,test,qa,perf,icdc,prod' ) + extendedChoice( + name: 'Region', + defaultValue: 'us-east-1', + description: 'Choose aws region to build', + type: 'PT_SINGLE_SELECT', + value: 'us-east-1,us-west-1' ) + booleanParam( + defaultValue: true, + description: 'Flush the Redis cache', + name: 'Flush_Redis') + booleanParam( + defaultValue: true, + description: 'Enable the Redis filter', + name: 'Enable_Redis_Filter') + + + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + + stage('Set Environment'){ + environment { + SLACK_URL = "${SLACK_URL}" + VERSION = "${params.Frontend_Image}" + } + steps { + + script { + currentBuild.displayName = "Tag: ${VERSION} Environment: ${params.Environment}" + } + + script { + switch("${params.Environment}") { + case "dev": + withCredentials([string(credentialsId: 'ins_dev_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'ins_dev_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_INS_DEV_IP}" + env.BEARER = "${BEARER}" + env.TIER = "dev" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + case "qa": + withCredentials([string(credentialsId: 'ins_qa_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'ins_qa_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_INS_QA_IP}" + env.BEARER = "${BEARER}" + env.TIER = "qa" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + case "perf": + withCredentials([string(credentialsId: 'ins_stage_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'ins_stage_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_INS_STAGE_IP}" + env.BEARER = "${BEARER}" + env.TIER = "stage" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + case "prod": + withCredentials([string(credentialsId: 'ins_prod_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'ins_prod_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_INS_PROD_IP}" + env.BEARER = "${BEARER}" + env.TIER = "prod" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + + default: + withCredentials([string(credentialsId: 'ins_dev_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'ins_dev_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.NEO4J_IP = "${NEO4J_INS_DEV_IP}" + env.BEARER = "${BEARER}" + env.TIER = "dev" + env.NEO4J_USER = "${NEO4J_USER}" + env.NEO4J_PASS = "${NEO4J_PASS}" + + } + break + } + } + } + } + + stage('clear redis cache'){ + agent { label 'data-loader' } + when { + expression { params.Flush_Redis } + } + + environment { + TIER = "${params.Environment}" + } + + steps{ + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/redis.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + + stage('deploy'){ + environment { + SLACK_URL = "${SLACK_URL}" + TIER = "${params.Environment}" + VERSION = "${params.Frontend_Image}" + FE_VERSION = "${params.Frontend_Image}" + BE_VERSION = "${params.Backend_Image}" + } + steps{ + node('cicd_microservice') { + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/deploy-bento-ins.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + enable_redis_filter: "${params.Enable_Redis_Filter}", + ], + colorized: true) + } + } + } + } + + } + post { + always { + + sendSlackMessage() + + } + + cleanup { + + cleanWs() + + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/katalon/Jenkinsfile b/jenkins/jobs/katalon/Jenkinsfile new file mode 100644 index 000000000..0c5adfaca --- /dev/null +++ b/jenkins/jobs/katalon/Jenkinsfile @@ -0,0 +1,424 @@ +//node('docker-katalon-ch') { +// parameters { +// extendedChoice( +// name: 'Browser', +// defaultValue: 'Chrome', +// description: 'Choose the browser (headless) to use', +// type: 'PT_SINGLE_SELECT', +// value: 'Chrome,Firefox' ) +// } +// stage('set agent'){ +// if (params.Browser == 'Firefox') { +// AGENT_LABEL = "docker-katalon-ff" +// } else { +// AGENT_LABEL = "docker-katalon-ch" +// } +// } +// } + +pipeline { + agent { + node { + //label "${AGENT_LABEL}" + label "docker-katalon-ch" + } + } + environment { + katalonVer = '7.2.6' + chromedriverVer = '83.0.4103.39' + geckodriverVer = '0.26.0' + } + parameters { + + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'master', + name: 'Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/Commons_Automation') + + string(defaultValue: "Commons_Automation.prj", + description: 'Enter the Katalon Project file (include the path relative to the repo root):', + name: 'KatalonPrj') + + string(defaultValue: "Test Suites/Canine_TestSuite", + description: 'Enter the Katalon Suite Path (not including the test suite file):', + name: 'KatalonSuite') + + extendedChoice( + name: 'Browser', + defaultValue: 'Chrome', + description: 'Choose the browser (headless) to use', + type: 'PT_SINGLE_SELECT', + value: 'Chrome,Firefox' ) + + extendedChoice( + name: 'Profile', + defaultValue: 'QA_ICDC', + description: 'Choose the profile to use', + type: 'PT_SINGLE_SELECT', + value: 'DEV_ICDC,QA_ICDC,STAGE_ICDC,PROD_ICDC,QA_CTDC' ) + + string(defaultValue: "gayathri.radhakrishnan@nih.gov,laxmi.lolla@nih.gov,sohil.sohil@nih.gov", + description: 'Enter a list of email addresses to notify in case of test failures:', + name: 'EmailRecipients') + + } + // options { + // ansiColor('xterm') + // } + tools { + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Tag}"]], + doGenerateSubmoduleConfigurations: false, + extensions: [], + submoduleCfg: [], + userRemoteConfigs: [[url: 'https://github.com/CBIIT/Commons_Automation']]]) + + } + } + stage('set Profile'){ + environment { + KATALON_PRJ = "${params.KatalonPrj}" + } + steps { + script { + switch("${params.Profile}") { + case "DEV_ICDC": + WIKI_PAGE="289178264" + PROJECT="ICDC" + TIER="DEV" + withCredentials([string(credentialsId: 'Box_Email_DEV_ICDC', variable: 'box_email'), + string(credentialsId: 'Box_Url_DEV_ICDC', variable: 'box_url'), + file(credentialsId: 'Katalon_Dev_ICDC', variable: 'pass_file')]) { + BOX_URL="${box_url}" + BOX_EMAIL="${box_email}" + PROFILE="${pass_file}" + sh "cp ${pass_file} ${WORKSPACE}/Profiles/" + } + break + case "QA_ICDC": + WIKI_PAGE="289178264" + PROJECT="ICDC" + TIER="QA" + withCredentials([string(credentialsId: 'Box_Email_QA_ICDC', variable: 'box_email'), + string(credentialsId: 'Box_Url_QA_ICDC', variable: 'box_url'), + file(credentialsId: 'Katalon_QA_ICDC', variable: 'pass_file')]) { + BOX_URL="${box_url}" + BOX_EMAIL="${box_email}" + PROFILE="${pass_file}" + sh "cp ${pass_file} ${WORKSPACE}/Profiles/" + } + break + case "STAGE_ICDC": + WIKI_PAGE="289178264" + PROJECT="ICDC" + TIER="STAGE" + withCredentials([string(credentialsId: 'Box_Email_STAGE_ICDC', variable: 'box_email'), + string(credentialsId: 'Box_Url_STAGE_ICDC', variable: 'box_url'), + file(credentialsId: 'Katalon_STAGE_ICDC', variable: 'pass_file')]) { + BOX_URL="${box_url}" + BOX_EMAIL="${box_email}" + PROFILE="${pass_file}" + sh "cp ${pass_file} ${WORKSPACE}/Profiles/" + } + break + case "PROD_ICDC": + WIKI_PAGE="289178264" + PROJECT="ICDC" + TIER="PROD" + withCredentials([string(credentialsId: 'Box_Email_PROD_ICDC', variable: 'box_email'), + string(credentialsId: 'Box_Url_PROD_ICDC', variable: 'box_url'), + file(credentialsId: 'Katalon_PROD_ICDC', variable: 'pass_file')]) { + BOX_URL="${box_url}" + BOX_EMAIL="${box_email}" + PROFILE="${pass_file}" + sh "cp ${pass_file} ${WORKSPACE}/Profiles/" + } + break + case "QA_CTDC": + WIKI_PAGE="289178298" + PROJECT="CTDC" + TIER="QA" + withCredentials([string(credentialsId: 'Box_Email_QA_CTDC', variable: 'box_email'), + string(credentialsId: 'Box_Url_QA_CTDC', variable: 'box_url'), + file(credentialsId: 'Katalon_QA_CTDC', variable: 'pass_file')]) { + BOX_URL="${box_url}" + BOX_EMAIL="${box_email}" + PROFILE="${pass_file}" + sh "cp ${pass_file} ${WORKSPACE}/Profiles/" + } + break + } + } + } + } + stage('run tests'){ + environment { + KATALON_BROWSER = "${params.Browser}" + KATALON_PROFILE = "${params.Profile}" + KATALON_PRJ = "${params.KatalonPrj}" + KATALON_SUITE_PATH = "${params.KatalonSuite}" + PROFILE_FILE = "${PROFILE}" + } + steps { + + script { + withCredentials([string(credentialsId: 'Katalon_API_Key', variable: 'api_key'), + string(credentialsId: 'Katalon_Org_ID', variable: 'org_id')]) { + + sh label: 'Katalon-Tests', script: '''#!/bin/bash + + # Set datestamp for results file + dateStamp=$(date +%Y%m%d) + reportFile="${KATALON_PROFILE}_${dateStamp}_build_${BUILD_NUMBER}" + + # Recreate the results directory + rm -rf results && mkdir results + + # Create the output files directory (required for writing excel files) + rm -rf OutputFiles && mkdir OutputFiles + + # Update profile filename + profile_file=$(basename $PROFILE_FILE) + profile_name="${profile_file%.*}" + + #echo "projectPath=$WORKSPACE/$KATALON_PRJ testSuitePath=$KATALON_SUITE_PATH executionProfile=$profile_name filename=$profile_file" + + # Run Katalon Tests + katalonc -noSplash -runMode=console --config -webui.autoUpdateDrivers=true -projectPath="$WORKSPACE/$KATALON_PRJ" -retry=0 -testSuitePath="$KATALON_SUITE_PATH" -executionProfile="$profile_name" -browserType="$KATALON_BROWSER (headless)" -reportFolder="results" -reportFileName="$reportFile" -apiKey="$api_key" -orgID="$org_id" + + ''' + + } + } + + } + } + } + post { + always { + + publishHTML([allowMissing: true, + alwaysLinkToLastBuild: false, + keepAll: false, + reportDir: 'results', + reportFiles: '*.html', + reportName: 'HTML Report', + reportTitles: '']) + + script { + + sh label: 'Zip-Katalon-Results', script: '''#!/bin/bash + + apt-get update && apt-get install -y zip + + resultsFile=$(basename results/*.html) + zipFile=$(basename -s .html results/*.html) + + zip -r results/$zipFile.zip OutputFiles + zip -u results/$zipFile.zip results/$resultsFile + + ''' + + } + + emailext(attachmentsPattern: 'results/*.zip', + body: 'Katalon Test Results', + subject: 'Katalon Test Results - Box', + to: "${BOX_EMAIL}") + + script { + + withCredentials([usernamePassword(credentialsId: 'Katalon_wiki_results', passwordVariable: 'passwd', usernameVariable: 'user')]) { + + sh label: 'Katalon-Results-Confluence', script: '''#!/bin/bash + + inputFile=$(exec find $WORKSPACE/results -type f -name "*.xml") + resultsFile=$(basename results/*.zip) + project=''' + PROJECT + ''' + tier=''' + TIER + ''' + pageID=''' + WIKI_PAGE + ''' + pageName="$project Automated Test Results" + creds="\'$user:$passwd\'" + + # get updated page text + totalTestCases="$(grep -c '

} + emailBody=${emailBody//\\\\n/
} + emailBody="$emailBody


The results of this test run can be found in Box: $resultsFile" + echo $emailBody + + ''', + returnStdout: true).trim() + + } + + emailext(attachmentsPattern: 'results/*.zip', + mimeType: 'text/html', + body: "${EMAIL_BODY}", + subject: 'Katalon Tests: results attached', + to: "${EmailRecipients}") + + + } + + failure { + + script { + + ERROR_OUTPUT = sh (label: 'Katalon-Results-Parsing', script: '''#!/bin/bash + + project=''' + PROJECT + ''' + tier=''' + TIER + ''' + inputFile=$(exec find $WORKSPACE/results -type f -name "*.xml") + + # Get test stats + totalTestCases="$(grep -c '

} + emailBody=${emailBody//\\\\n/
} + emailBody="$emailBody


The results of this test run can be found in Box: $resultsFile" + echo $emailBody + + ''', + returnStdout: true).trim() + + } + + emailext(attachmentsPattern: 'results/*.zip', + mimeType: 'text/html', + body: "${EMAIL_BODY}", + subject: 'Katalon Tests: results attached', + to: "${EmailRecipients}") + + + } + + failure { + + script { + + ERROR_OUTPUT = sh (label: 'Katalon-Results-Parsing', script: '''#!/bin/bash + + project=''' + PROJECT + ''' + tier=''' + TIER + ''' + inputFile=$(exec find $WORKSPACE/results -type f -name "*.xml") + + # Get test stats + totalTestCases="$(grep -c '

} + // emailBody=${emailBody//\\\\n/
} + // emailBody="$emailBody


The results of this test run can be found in Box: $resultsFile" + // echo $emailBody + + // ''', + // returnStdout: true).trim() + + // } + + // emailext(attachmentsPattern: 'results/*.html', + // mimeType: 'text/html', + // body: "${EMAIL_BODY}", + // subject: 'Failed Katalon Tests: results attached', + // to: "${EmailRecipients}") + + // } + + } + } \ No newline at end of file diff --git a/jenkins/jobs/katalon/Jenkinsfile_Bento_dev_ctdc b/jenkins/jobs/katalon/Jenkinsfile_Bento_dev_ctdc new file mode 100644 index 000000000..c37055521 --- /dev/null +++ b/jenkins/jobs/katalon/Jenkinsfile_Bento_dev_ctdc @@ -0,0 +1,374 @@ +//node('cicd_microservice') { +// parameters { +// extendedChoice( +// name: 'Browser', +// defaultValue: 'Firefox', +// description: 'Choose the browser (headless) to use', +// type: 'PT_SINGLE_SELECT', +// value: 'Chrome,Firefox' ) +// } +// stage('set agent'){ +// if (params.Browser == 'Firefox') { +// AGENT_LABEL = "docker-katalon-ff" +// } else { +// AGENT_LABEL = "docker-katalon-ch" +// } +// } +// } + +pipeline { + agent { + node { + //label "${AGENT_LABEL}" + label "docker-katalon-ch" + } + } + + environment { + katalonVer = '7.2.6' + chromedriverVer = '83.0.4103.39' + geckodriverVer = '0.26.0' + } + parameters { + + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'master', + name: 'Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/Commons_Automation') + + string(defaultValue: "Commons_Automation.prj", + description: 'Enter the Katalon Project file (include the path relative to the repo root):', + name: 'KatalonPrj') + + string(defaultValue: "Test Suites/Bento_TestSuite", + description: 'Enter the Katalon Suite Path (not including the test suite file):', + name: 'KatalonSuite') + + extendedChoice( + name: 'Browser', + defaultValue: 'Chrome', + description: 'Choose the browser (headless) to use', + type: 'PT_SINGLE_SELECT', + value: 'Chrome,Firefox' ) + + extendedChoice( + name: 'Profile', + defaultValue: 'Katalon_DEV_CTDC', + description: 'Choose the profile to use', + type: 'PT_SINGLE_SELECT', + value: 'Katalon_DEV_CTDC' ) + + string(defaultValue: "gayathri.radhakrishnan@nih.gov,sohil.sohil@nih.gov", + description: 'Enter a list of email addresses to notify in case of test failures:', + name: 'EmailRecipients') + + } + // options { + // ansiColor('xterm') + // } + tools { + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Tag}"]], + doGenerateSubmoduleConfigurations: false, + extensions: [], + submoduleCfg: [], + userRemoteConfigs: [[url: 'https://github.com/CBIIT/Commons_Automation']]]) + + //git branch: "${params.Tag}", + // changelog: false, + // poll: false, + // url: 'https://github.com/CBIIT/Commons_Automation' + + //dir('icdc-devops'){ + // git branch: 'master', + // url: 'https://github.com/CBIIT/icdc-devops.git'} + + } + } + stage('set Profile'){ + environment { + KATALON_PRJ = "${params.KatalonPrj}" + } + steps { + script { + switch("${params.Profile}") { + //case "QA_ICDC": + // WIKI_PAGE="434110502" + // WIKI_NAME="ICDC" + // withCredentials([string(credentialsId: 'Box_Email_QA_ICDC', variable: 'box_email')]) { + // BOX_EMAIL="${box_email}" + // } + // withCredentials([string(credentialsId: 'Box_Url_QA_ICDC', variable: 'box_url')]) { + // BOX_URL="${box_url}" + // } + // withCredentials([file(credentialsId: 'Katalon_QA_ICDC', variable: 'pass_file')]) { + // sh "cp ${pass_file} ${WORKSPACE}/Profiles/${params.Profile}.glbl" + // } + // break + //case "QA_CTDC": + // WIKI_PAGE="434110839" + // WIKI_NAME="CTDC" + // withCredentials([string(credentialsId: 'Box_Email_QA_CTDC', variable: 'box_email')]) { + // BOX_EMAIL="${box_email}" + // } + // withCredentials([string(credentialsId: 'Box_Url_QA_ICDC', variable: 'box_url')]) { + // BOX_URL="${box_url}" + // } + // withCredentials([file(credentialsId: 'Katalon_QA_CTDC', variable: 'pass_file')]) { + // sh "cp ${pass_file} ${WORKSPACE}/Profiles/${params.Profile}.glbl" + // } + // break + case "Katalon_DEV_CTDC": + // withCredentials([string(credentialsId: 'Box_Email_QA_CTDC', variable: 'box_email')]) { + BOX_EMAIL="${EmailRecipients}" + // } + withCredentials([file(credentialsId: 'Katalon_DEV_CTDC', variable: 'pass_file')]) { + PROFILE="${pass_file}" + //sh "mkdir ${WORKSPACE}/Profiles && cp ${pass_file} ${WORKSPACE}/Profiles/${params.Profile}.glbl" + //sh "mkdir ${WORKSPACE}/Profiles && cp ${pass_file} ${WORKSPACE}/Profiles/" + sh "cp ${pass_file} ${WORKSPACE}/Profiles/" + } + break + //case "Katalon_Dev_CTDC": + // withCredentials([string(credentialsId: 'Box_Email_QA_CTDC', variable: 'box_email')]) { + // BOX_EMAIL="${EmailRecipients}" + // } + // withCredentials([file(credentialsId: 'Katalon_Dev_CTDC', variable: 'pass_file')]) { + // PROFILE="${pass_file}" + // sh "cp ${pass_file} ${WORKSPACE}/Profiles/${params.Profile}.glbl" + // } + // break + } + } + } + } + stage('run tests'){ + environment { + KATALON_BROWSER = "${params.Browser}" + KATALON_PROFILE = "${params.Profile}" + KATALON_PRJ = "${params.KatalonPrj}" + KATALON_SUITE_PATH = "${params.KatalonSuite}" + PROFILE_FILE = "${PROFILE}" + } + steps { + + script { + withCredentials([string(credentialsId: 'Katalon_API_Key', variable: 'api_key'), + string(credentialsId: 'Katalon_Org_ID', variable: 'org_id')]) { + + sh label: 'Katalon-Tests', script: '''#!/bin/bash + + # Set datestamp for results file + dateStamp=$(date +%Y%m%d) + reportFile="${KATALON_PROFILE}_${dateStamp}_build_${BUILD_NUMBER}" + + # Recreate the results directory + rm -rf results && mkdir results + + # Create the output files directory (required for writing excel files) + rm -rf OutputFiles && mkdir OutputFiles + + # Update profile filename + profile_file=$(basename $PROFILE_FILE) + profile_name="${profile_file%.*}" + #cp Profiles/$KATALON_PROFILE.glbl Profiles/$profile_file + + #echo "projectPath=$KATALON_PRJ testSuitePath=$KATALON_SUITE_PATH executionProfile=$profile_name filename=$profile_file" + + # Run Katalon Tests + katalonc -noSplash -runMode=console --config -webui.autoUpdateDrivers=true -projectPath="$WORKSPACE/$KATALON_PRJ" -retry=0 -testSuitePath="$KATALON_SUITE_PATH" -executionProfile="$profile_name" -browserType="$KATALON_BROWSER (headless)" -reportFolder="results" -reportFileName="$reportFile" -apiKey="$api_key" -orgID="$org_id" + + ''' + + } + } + + } + } + } + post { + always { + + publishHTML([allowMissing: true, + alwaysLinkToLastBuild: false, + keepAll: false, + reportDir: 'results', + reportFiles: '*.html', + reportName: 'HTML Report', + reportTitles: '']) + + script { + + sh label: 'Zip-Katalon-Results', script: '''#!/bin/bash + + apt-get update && apt-get install -y zip + + resultsFile=$(basename results/*.html) + zipFile=$(basename -s .html results/*.html) + + zip -r results/$zipFile.zip OutputFiles + zip -u results/$zipFile.zip results/$resultsFile + + ''' + + } + + //emailext(attachmentsPattern: 'results/*.html', + emailext(attachmentsPattern: 'results/*.zip', + body: 'Katalon Test Results', + subject: 'Katalon Test Results', + to: "${BOX_EMAIL}") + + //script { + + // withCredentials([usernamePassword(credentialsId: 'Katalon_wiki_results', passwordVariable: 'passwd', usernameVariable: 'user')]) { + + // sh label: 'Katalon-Results-Confluence', script: '''#!/bin/bash + + // inputFile=$(exec find $WORKSPACE/results -type f -name "*.xml") + // resultsFile=$(basename results/*.html) + // pageID=''' + WIKI_PAGE + ''' + // pageName=''' + WIKI_NAME + ''' + // pageName="$pageName Automated Test Results" + // creds="\'$user:$passwd\'" + + // # get updated page text + // totalTestCases="$(grep -c '

} + // emailBody=${emailBody//\\\\n/
} + // emailBody="$emailBody


The results of this test run can be found in Box: $resultsFile" + // echo $emailBody + + // ''', + // returnStdout: true).trim() + + // } + + // emailext(attachmentsPattern: 'results/*.html', + // mimeType: 'text/html', + // body: "${EMAIL_BODY}", + // subject: 'Failed Katalon Tests: results attached', + // to: "${EmailRecipients}") + + // } + + } + } \ No newline at end of file diff --git a/jenkins/jobs/katalon/Jenkinsfile_Bento_dev_icdc b/jenkins/jobs/katalon/Jenkinsfile_Bento_dev_icdc new file mode 100644 index 000000000..ed88ddbda --- /dev/null +++ b/jenkins/jobs/katalon/Jenkinsfile_Bento_dev_icdc @@ -0,0 +1,374 @@ +//node('cicd_microservice') { +// parameters { +// extendedChoice( +// name: 'Browser', +// defaultValue: 'Firefox', +// description: 'Choose the browser (headless) to use', +// type: 'PT_SINGLE_SELECT', +// value: 'Chrome,Firefox' ) +// } +// stage('set agent'){ +// if (params.Browser == 'Firefox') { +// AGENT_LABEL = "docker-katalon-ff" +// } else { +// AGENT_LABEL = "docker-katalon-ch" +// } +// } +// } + +pipeline { + agent { + node { + //label "${AGENT_LABEL}" + label "docker-katalon-ch" + } + } + + environment { + katalonVer = '7.2.6' + chromedriverVer = '83.0.4103.39' + geckodriverVer = '0.26.0' + } + parameters { + + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'master', + name: 'Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/Commons_Automation') + + string(defaultValue: "Commons_Automation.prj", + description: 'Enter the Katalon Project file (include the path relative to the repo root):', + name: 'KatalonPrj') + + string(defaultValue: "Test Suites/Bento_TestSuite", + description: 'Enter the Katalon Suite Path (not including the test suite file):', + name: 'KatalonSuite') + + extendedChoice( + name: 'Browser', + defaultValue: 'Chrome', + description: 'Choose the browser (headless) to use', + type: 'PT_SINGLE_SELECT', + value: 'Chrome,Firefox' ) + + extendedChoice( + name: 'Profile', + defaultValue: 'Katalon_DEV', + description: 'Choose the profile to use', + type: 'PT_SINGLE_SELECT', + value: 'Katalon_DEV_ICDC' ) + + string(defaultValue: "gayathri.radhakrishnan@nih.gov,sohil.sohil@nih.gov", + description: 'Enter a list of email addresses to notify in case of test failures:', + name: 'EmailRecipients') + + } + // options { + // ansiColor('xterm') + // } + tools { + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Tag}"]], + doGenerateSubmoduleConfigurations: false, + extensions: [], + submoduleCfg: [], + userRemoteConfigs: [[url: 'https://github.com/CBIIT/Commons_Automation']]]) + + //git branch: "${params.Tag}", + // changelog: false, + // poll: false, + // url: 'https://github.com/CBIIT/Commons_Automation' + + //dir('icdc-devops'){ + // git branch: 'master', + // url: 'https://github.com/CBIIT/icdc-devops.git'} + + } + } + stage('set Profile'){ + environment { + KATALON_PRJ = "${params.KatalonPrj}" + } + steps { + script { + switch("${params.Profile}") { + //case "QA_ICDC": + // WIKI_PAGE="434110502" + // WIKI_NAME="ICDC" + // withCredentials([string(credentialsId: 'Box_Email_QA_ICDC', variable: 'box_email')]) { + // BOX_EMAIL="${box_email}" + // } + // withCredentials([string(credentialsId: 'Box_Url_QA_ICDC', variable: 'box_url')]) { + // BOX_URL="${box_url}" + // } + // withCredentials([file(credentialsId: 'Katalon_QA_ICDC', variable: 'pass_file')]) { + // sh "cp ${pass_file} ${WORKSPACE}/Profiles/${params.Profile}.glbl" + // } + // break + //case "QA_CTDC": + // WIKI_PAGE="434110839" + // WIKI_NAME="CTDC" + // withCredentials([string(credentialsId: 'Box_Email_QA_CTDC', variable: 'box_email')]) { + // BOX_EMAIL="${box_email}" + // } + // withCredentials([string(credentialsId: 'Box_Url_QA_ICDC', variable: 'box_url')]) { + // BOX_URL="${box_url}" + // } + // withCredentials([file(credentialsId: 'Katalon_QA_CTDC', variable: 'pass_file')]) { + // sh "cp ${pass_file} ${WORKSPACE}/Profiles/${params.Profile}.glbl" + // } + // break + case "Katalon_DEV_ICDC": + // withCredentials([string(credentialsId: 'Box_Email_QA_CTDC', variable: 'box_email')]) { + BOX_EMAIL="${EmailRecipients}" + // } + withCredentials([file(credentialsId: 'Katalon_DEV_ICDC', variable: 'pass_file')]) { + PROFILE="${pass_file}" + //sh "mkdir ${WORKSPACE}/Profiles && cp ${pass_file} ${WORKSPACE}/Profiles/${params.Profile}.glbl" + //sh "mkdir ${WORKSPACE}/Profiles && cp ${pass_file} ${WORKSPACE}/Profiles/" + sh "cp ${pass_file} ${WORKSPACE}/Profiles/" + } + break + //case "Katalon_Dev_CTDC": + // withCredentials([string(credentialsId: 'Box_Email_QA_CTDC', variable: 'box_email')]) { + // BOX_EMAIL="${EmailRecipients}" + // } + // withCredentials([file(credentialsId: 'Katalon_Dev_CTDC', variable: 'pass_file')]) { + // PROFILE="${pass_file}" + // sh "cp ${pass_file} ${WORKSPACE}/Profiles/${params.Profile}.glbl" + // } + // break + } + } + } + } + stage('run tests'){ + environment { + KATALON_BROWSER = "${params.Browser}" + KATALON_PROFILE = "${params.Profile}" + KATALON_PRJ = "${params.KatalonPrj}" + KATALON_SUITE_PATH = "${params.KatalonSuite}" + PROFILE_FILE = "${PROFILE}" + } + steps { + + script { + withCredentials([string(credentialsId: 'Katalon_API_Key', variable: 'api_key'), + string(credentialsId: 'Katalon_Org_ID', variable: 'org_id')]) { + + sh label: 'Katalon-Tests', script: '''#!/bin/bash + + # Set datestamp for results file + dateStamp=$(date +%Y%m%d) + reportFile="${KATALON_PROFILE}_${dateStamp}_build_${BUILD_NUMBER}" + + # Recreate the results directory + rm -rf results && mkdir results + + # Create the output files directory (required for writing excel files) + rm -rf OutputFiles && mkdir OutputFiles + + # Update profile filename + profile_file=$(basename $PROFILE_FILE) + profile_name="${profile_file%.*}" + #cp Profiles/$KATALON_PROFILE.glbl Profiles/$profile_file + + #echo "projectPath=$KATALON_PRJ testSuitePath=$KATALON_SUITE_PATH executionProfile=$profile_name filename=$profile_file" + + # Run Katalon Tests + katalonc -noSplash -runMode=console --config -webui.autoUpdateDrivers=true -projectPath="$WORKSPACE/$KATALON_PRJ" -retry=0 -testSuitePath="$KATALON_SUITE_PATH" -executionProfile="$profile_name" -browserType="$KATALON_BROWSER (headless)" -reportFolder="results" -reportFileName="$reportFile" -apiKey="$api_key" -orgID="$org_id" + + ''' + + } + } + + } + } + } + post { + always { + + publishHTML([allowMissing: true, + alwaysLinkToLastBuild: false, + keepAll: false, + reportDir: 'results', + reportFiles: '*.html', + reportName: 'HTML Report', + reportTitles: '']) + + script { + + sh label: 'Zip-Katalon-Results', script: '''#!/bin/bash + + apt-get update && apt-get install -y zip + + resultsFile=$(basename results/*.html) + zipFile=$(basename -s .html results/*.html) + + zip -r results/$zipFile.zip OutputFiles + zip -u results/$zipFile.zip results/$resultsFile + + ''' + + } + + //emailext(attachmentsPattern: 'results/*.html', + emailext(attachmentsPattern: 'results/*.zip', + body: 'Katalon Test Results', + subject: 'Katalon Test Results', + to: "${BOX_EMAIL}") + + //script { + + // withCredentials([usernamePassword(credentialsId: 'Katalon_wiki_results', passwordVariable: 'passwd', usernameVariable: 'user')]) { + + // sh label: 'Katalon-Results-Confluence', script: '''#!/bin/bash + + // inputFile=$(exec find $WORKSPACE/results -type f -name "*.xml") + // resultsFile=$(basename results/*.html) + // pageID=''' + WIKI_PAGE + ''' + // pageName=''' + WIKI_NAME + ''' + // pageName="$pageName Automated Test Results" + // creds="\'$user:$passwd\'" + + // # get updated page text + // totalTestCases="$(grep -c '

} + // emailBody=${emailBody//\\\\n/
} + // emailBody="$emailBody


The results of this test run can be found in Box: $resultsFile" + // echo $emailBody + + // ''', + // returnStdout: true).trim() + + // } + + // emailext(attachmentsPattern: 'results/*.html', + // mimeType: 'text/html', + // body: "${EMAIL_BODY}", + // subject: 'Failed Katalon Tests: results attached', + // to: "${EmailRecipients}") + + // } + + } + } \ No newline at end of file diff --git a/jenkins/jobs/katalon/Jenkinsfile_dev b/jenkins/jobs/katalon/Jenkinsfile_dev new file mode 100644 index 000000000..8f429397c --- /dev/null +++ b/jenkins/jobs/katalon/Jenkinsfile_dev @@ -0,0 +1,363 @@ +//node('docker-katalon-ch') { +// parameters { +// extendedChoice( +// name: 'Browser', +// defaultValue: 'Chrome', +// description: 'Choose the browser (headless) to use', +// type: 'PT_SINGLE_SELECT', +// value: 'Chrome,Firefox' ) +// } +// stage('set agent'){ +// if (params.Browser == 'Firefox') { +// AGENT_LABEL = "docker-katalon-ff" +// } else { +// AGENT_LABEL = "docker-katalon-ch" +// } +// } +// } + +pipeline { + agent { + node { + //label "${AGENT_LABEL}" + label "docker-katalon-ch" + } + } + + environment { + katalonVer = '7.2.6' + chromedriverVer = '83.0.4103.39' + geckodriverVer = '0.26.0' + } + parameters { + + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'master', + name: 'Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/Commons_Automation') + + string(defaultValue: "Commons_Automation.prj", + description: 'Enter the Katalon Project file (include the path relative to the repo root):', + name: 'KatalonPrj') + + string(defaultValue: "Test Suites/Canine_TestSuite", + description: 'Enter the Katalon Suite Path (not including the test suite file):', + name: 'KatalonSuite') + + extendedChoice( + name: 'Browser', + defaultValue: 'Chrome', + description: 'Choose the browser (headless) to use', + type: 'PT_SINGLE_SELECT', + value: 'Chrome,Firefox' ) + + extendedChoice( + name: 'Profile', + defaultValue: 'Katalon_Dev_ICDC', + description: 'Choose the profile to use', + type: 'PT_SINGLE_SELECT', + value: 'Katalon_Dev_ICDC,Katalon_Dev_CTDC' ) + + string(defaultValue: "gayathri.radhakrishnan@nih.gov,sohil.sohil@nih.gov", + description: 'Enter a list of email addresses to notify in case of test failures:', + name: 'EmailRecipients') + + } + // options { + // ansiColor('xterm') + // } + tools { + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Tag}"]], + doGenerateSubmoduleConfigurations: false, + extensions: [], + submoduleCfg: [], + userRemoteConfigs: [[url: 'https://github.com/CBIIT/Commons_Automation']]]) + + } + } + stage('set Profile'){ + environment { + KATALON_PRJ = "${params.KatalonPrj}" + } + steps { + script { + switch("${params.Profile}") { + //case "QA_ICDC": + // WIKI_PAGE="434110502" + // WIKI_NAME="ICDC" + // withCredentials([string(credentialsId: 'Box_Email_QA_ICDC', variable: 'box_email')]) { + // BOX_EMAIL="${box_email}" + // } + // withCredentials([string(credentialsId: 'Box_Url_QA_ICDC', variable: 'box_url')]) { + // BOX_URL="${box_url}" + // } + // withCredentials([file(credentialsId: 'Katalon_QA_ICDC', variable: 'pass_file')]) { + // sh "cp ${pass_file} ${WORKSPACE}/Profiles/${params.Profile}.glbl" + // } + // break + //case "QA_CTDC": + // WIKI_PAGE="434110839" + // WIKI_NAME="CTDC" + // withCredentials([string(credentialsId: 'Box_Email_QA_CTDC', variable: 'box_email')]) { + // BOX_EMAIL="${box_email}" + // } + // withCredentials([string(credentialsId: 'Box_Url_QA_ICDC', variable: 'box_url')]) { + // BOX_URL="${box_url}" + // } + // withCredentials([file(credentialsId: 'Katalon_QA_CTDC', variable: 'pass_file')]) { + // sh "cp ${pass_file} ${WORKSPACE}/Profiles/${params.Profile}.glbl" + // } + // break + case "Katalon_Dev_ICDC": + // withCredentials([string(credentialsId: 'Box_Email_QA_CTDC', variable: 'box_email')]) { + BOX_EMAIL="${EmailRecipients}" + // } + withCredentials([file(credentialsId: 'Katalon_Dev_ICDC', variable: 'pass_file')]) { + PROFILE="${pass_file}" + sh "cp ${pass_file} ${WORKSPACE}/Profiles/" + } + break + case "Katalon_Dev_CTDC": + // withCredentials([string(credentialsId: 'Box_Email_QA_CTDC', variable: 'box_email')]) { + BOX_EMAIL="${EmailRecipients}" + // } + withCredentials([file(credentialsId: 'Katalon_Dev_CTDC', variable: 'pass_file')]) { + PROFILE="${pass_file}" + sh "cp ${pass_file} ${WORKSPACE}/Profiles/" + } + break + } + } + } + } + stage('run tests'){ + environment { + KATALON_BROWSER = "${params.Browser}" + KATALON_PROFILE = "${params.Profile}" + KATALON_PRJ = "${params.KatalonPrj}" + KATALON_SUITE_PATH = "${params.KatalonSuite}" + PROFILE_FILE = "${PROFILE}" + } + steps { + + script { + withCredentials([string(credentialsId: 'Katalon_API_Key', variable: 'api_key'), + string(credentialsId: 'Katalon_Org_ID', variable: 'org_id')]) { + + sh label: 'Katalon-Tests', script: '''#!/bin/bash + + # Set datestamp for results file + dateStamp=$(date +%Y%m%d) + reportFile="${KATALON_PROFILE}_${dateStamp}_build_${BUILD_NUMBER}" + + # Recreate the results directory + rm -rf results && mkdir results + + # Create the output files directory (required for writing excel files) + rm -rf OutputFiles && mkdir OutputFiles + + # Update profile filename + profile_file=$(basename $PROFILE_FILE) + profile_name="${profile_file%.*}" + #cp Profiles/$KATALON_PROFILE.glbl Profiles/$profile_file + + #echo "projectPath=$KATALON_PRJ testSuitePath=$KATALON_SUITE_PATH executionProfile=$profile_name filename=$profile_file" + + # Run Katalon Tests + katalonc -noSplash -runMode=console --config -webui.autoUpdateDrivers=true -projectPath="$WORKSPACE/$KATALON_PRJ" -retry=0 -testSuitePath="$KATALON_SUITE_PATH" -executionProfile="$profile_name" -browserType="$KATALON_BROWSER (headless)" -reportFolder="results" -reportFileName="$reportFile" -apiKey="$api_key" -orgID="$org_id" + + ''' + + } + } + + } + } + } + post { + always { + + publishHTML([allowMissing: true, + alwaysLinkToLastBuild: false, + keepAll: false, + reportDir: 'results', + reportFiles: '*.html', + reportName: 'HTML Report', + reportTitles: '']) + + script { + + sh label: 'Zip-Katalon-Results', script: '''#!/bin/bash + + apt-get update && apt-get install -y zip + + resultsFile=$(basename results/*.html) + zipFile=$(basename -s .html results/*.html) + + zip -r results/$zipFile.zip OutputFiles + zip -u results/$zipFile.zip results/$resultsFile + + ''' + + } + + //emailext(attachmentsPattern: 'results/*.html', + emailext(attachmentsPattern: 'results/*.zip', + body: 'Katalon Test Results', + subject: 'Katalon Test Results', + to: "${BOX_EMAIL}") + + //script { + + // withCredentials([usernamePassword(credentialsId: 'Katalon_wiki_results', passwordVariable: 'passwd', usernameVariable: 'user')]) { + + // sh label: 'Katalon-Results-Confluence', script: '''#!/bin/bash + + // inputFile=$(exec find $WORKSPACE/results -type f -name "*.xml") + // resultsFile=$(basename results/*.html) + // pageID=''' + WIKI_PAGE + ''' + // pageName=''' + WIKI_NAME + ''' + // pageName="$pageName Automated Test Results" + // creds="\'$user:$passwd\'" + + // # get updated page text + // totalTestCases="$(grep -c '

} + // emailBody=${emailBody//\\\\n/
} + // emailBody="$emailBody


The results of this test run can be found in Box: $resultsFile" + // echo $emailBody + + // ''', + // returnStdout: true).trim() + + // } + + // emailext(attachmentsPattern: 'results/*.html', + // mimeType: 'text/html', + // body: "${EMAIL_BODY}", + // subject: 'Failed Katalon Tests: results attached', + // to: "${EmailRecipients}") + + // } + + } + } \ No newline at end of file diff --git a/jenkins/jobs/microservices/Jenkinsfile b/jenkins/jobs/microservices/Jenkinsfile new file mode 100644 index 000000000..b220d23ec --- /dev/null +++ b/jenkins/jobs/microservices/Jenkinsfile @@ -0,0 +1,238 @@ +//load shared library for slack notification +//@Library('shared-library')_ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "CTDC Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "ctdc devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + +pipeline { + + agent { + node { + label 'cicd_microservice' + } + } + parameters { + //listGitBranches(branchFilter: '.*', credentialsId: '', defaultValue: 'master', name: 'Tag', quickFilterEnabled: false, remoteURL: 'https://github.com/CBIIT/icdc-codebase', selectedValue: 'DEFAULT', sortMode: 'NONE', tagFilter: '.*', type: 'PT_BRANCH_TAG') + // gitParameter(branchFilter: '.*', credentialsId: '', defaultValue: 'master', name: 'Tag', quickFilterEnabled: false, remoteURL: 'https://github.com/CBIIT/icdc-codebase', selectedValue: 'DEFAULT', sortMode: 'NONE', tagFilter: '.*', type: 'PT_BRANCH_TAG') + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'master', + name: 'Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/microservices') + extendedChoice( + name: 'Environment', + defaultValue: 'sandbox', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'sandbox,dev,qa,stage,prod' ) + } + // triggers { + // pollSCM('H/15 * * * 1-5') + // } + options { + timestamps() + } + tools { + maven 'Default' + jdk 'Default' + } + stages { + stage('Checkout') { + steps { + checkout([$class: 'GitSCM', branches: [[name: "${params.Tag}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[url: 'https://github.com/CBIIT/microservices']]]) + } + } + stage('Set Environment'){ + environment { + SANDBOX_URL = "k9dc.essential-dev.com" + DEV_URL = "trialcommons-dev.cancer.gov" + QA_URL = "trialcommons-qa.cancer.gov" + STAGE_URL = "trialcommons-stage.cancer.gov" + PROD_URL = "trialcommons.cancer.gov" + VERSION = "${params.Tag}" + } + steps { + script { + switch("${params.Environment}") { + case "sandbox": + withCredentials([file(credentialsId: 'sandbox_env_file', variable: 'secret_file')]) { + sh "cp ${secret_file} ${WORKSPACE}/src/main/frontend/.env" + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's,tag_version,${VERSION},g' .env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${SANDBOX_URL}/,g' application_example.properties" + env.TOMCAT01_IP = "${TOMCAT01_IP}" + env.TOMCAT02_IP = "${TOMCAT02_IP}" + env.NEO4J_IP = "${NEO4J_IP}" + } + break + case "dev": + withCredentials([file(credentialsId: 'dev_env_file', variable: 'secret_file')]) { + sh "cp ${secret_file} ${WORKSPACE}/src/main/frontend/.env" + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's,tag_version,${VERSION},g' .env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${DEV_URL}/,g' application_example.properties" + env.TOMCAT01_IP = "${TOMCAT01_DEV_IP}" + env.TOMCAT02_IP = "${TOMCAT02_DEV_IP}" + env.NEO4J_IP = "${NEO4J_DEV_IP}" + } + break + case "qa": + withCredentials([file(credentialsId: 'qa_env_file', variable: 'secret_file')]) { + sh "cp ${secret_file} ${WORKSPACE}/src/main/frontend/.env" + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's,tag_version,${VERSION},g' .env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${QA_URL}/,g' application_example.properties" + env.TOMCAT01_IP = "${TOMCAT01_QA_IP}" + env.TOMCAT02_IP = "${TOMCAT02_QA_IP}" + env.NEO4J_IP = "${NEO4J_QA_IP}" + } + break + case "stage": + withCredentials([file(credentialsId: 'stage_env_file', variable: 'secret_file')]) { + sh "cp ${secret_file} ${WORKSPACE}/src/main/frontend/.env" + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's,tag_version,${VERSION},g' .env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${STAGE_URL}/,g' application_example.properties" + env.TOMCAT01_IP = "${TOMCAT01_STAGE_IP}" + env.TOMCAT02_IP = "${TOMCAT02_STAGE_IP}" + env.NEO4J_IP = "${NEO4J_STAGE_IP}" + } + break + case "prod": + withCredentials([file(credentialsId: 'prod_env_file', variable: 'secret_file')]) { + sh "cp ${secret_file} ${WORKSPACE}/src/main/frontend/.env" + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's,tag_version,${VERSION},g' .env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${PROD_URL}/,g' application_example.properties" + env.TOMCAT01_IP = "${TOMCAT01_PROD_IP}" + env.TOMCAT02_IP = "${TOMCAT02_PROD_IP}" + env.NEO4J_IP = "${NEO4J_PROD_IP}" + } + break + default: + withCredentials([file(credentialsId: 'sandbox_env_file', variable: 'secret_file')]) { + sh "cp ${secret_file} ${WORKSPACE}/src/main/frontend/.env" + sh "cd ${WORKSPACE}/src/main/frontend/ && sed -i 's,tag_version,${VERSION},g' .env" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's,fence.redirect_url=https://localhost/,fence.redirect_url=https://${SANDBOX_URL}/,g' application_example.properties" + env.TOMCAT01_IP = "${TOMCAT01_IP}" + env.TOMCAT02_IP = "${TOMCAT02_IP}" + env.NEO4J_IP = "${NEO4J_IP}" + } + break + } + } + } + } + stage('Build') { + environment { + SLACK_URL = "${SLACK_URL}" + } + steps { + withCredentials([string(credentialsId: 'authorization_bearer', variable: 'BEARER'), + usernamePassword(credentialsId: 'neo4j_user', passwordVariable: 'NEO4J_PASSWORD', usernameVariable: 'NEO4J_USER'), + string(credentialsId: 'fence-id', variable: 'FENCE_ID'), + usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER'), + string(credentialsId: 'fence-credential', variable: 'FENCE_CREDENTIAL')]) { + sh "cd ${WORKSPACE}/src/main/resources/ && mv application_example.properties application.properties" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's/localhost/${NEO4J_IP}/g' application.properties" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's/Basic 123456/${BEARER}/' application.properties" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's/password=${NEO4J_IP}/password=${NEO4J_PASSWORD}/' application.properties" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's/neo4j_username/${NEO4J_USER}/g' application.properties" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's/accessid/${FENCE_ID}/g' application.properties" + sh "cd ${WORKSPACE}/src/main/resources && sed -i 's/accesskey/${FENCE_CREDENTIAL}/g' application.properties" + + //run maven + sh """ + # mvn package -DskipTests + # mv target/ICDC-0.0.1.war target/ROOT.war + # docker login --username ${DOCKER_USER} --password ${DOCKER_PASSWORD} + # docker build -t cbiitssrepo/api -f ${WORKSPACE}/dockerfiles/backend-dockerfile . + # docker push cbiitssrepo/api + cd ${WORKSPACE}/src/main/frontend/ + #rm package-lock.json + npm install + npm install --save https://github.com/skiran86/mui-custom-datatables/tarball/master + cd ${WORKSPACE}/src/main/frontend/node_modules/mui-custom-datatables + npm install + npm run build + cd - + npm run-script build + ls -l . + #docker build -t cbiitssrepo/app -f ${WORKSPACE}/dockerfiles/frontend-dockerfile . + #docker push cbiitssrepo/app + """ + + //build image from + + // sh "mv target/ICDC-0.0.1.war target/ROOT.war" + } + } + } +// stage('Deploy') { +// when { +// expression { +// currentBuild.result == null || currentBuild.result == 'SUCCESS' +// } +// } +// steps { +// ansiColor('xterm') { +// withCredentials([sshUserPrivateKey(credentialsId: 'commonsdocker', keyFileVariable: 'deployer_key', passphraseVariable: '', usernameVariable: 'deployer')]) { +// sh label: '', script: ''' + +// for server in $TOMCAT02_IP $TOMCAT01_IP; +// do +// pushd target +// scp -i $deployer_key -o StrictHostKeyChecking=no ROOT.war $deployer@${server}: +// popd +// ssh -i $deployer_key -T -o StrictHostKeyChecking=no $deployer@${server} << EOF +// docker cp ~/ROOT.war ctn:/usr/local/tomcat/webapps +// rm -rf ~/ROOT.war +// EOF +// done''' +// } +// } +// } +// } + // stage('schema'){ + // environment { + // NEO4J_IP = "${NEO4J_IP}" + // } + // steps { + // withCredentials([string(credentialsId: 'authorization_bearer', variable: 'BEARER')]){ + // sh "curl -X POST http://${NEO4J_IP}:7474/graphql/idl/ -H 'Accept: application/json' -H 'Authorization: ${BEARER}' -d @src/main/resources/graphql/icdc.graphql" + // } + // } + // } + } + post { + always { + sh "echo Testing" + //sendSlackMessage() + } + } +} diff --git a/jenkins/jobs/monitors/Jenkinsfile b/jenkins/jobs/monitors/Jenkinsfile new file mode 100644 index 000000000..e751ef3fe --- /dev/null +++ b/jenkins/jobs/monitors/Jenkinsfile @@ -0,0 +1,130 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + slack_image = ":sparkles:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${slack_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'cicd_microservice' + } + } + + parameters { + string( + defaultValue: '', + description: 'The project to search for', + name: 'Project') + string( + defaultValue: '', + description: 'The tier to search for', + name: 'Tier') + } + + options { + ansiColor('xterm') + } + + stages{ + stage('checkout'){ + steps { + + checkout( poll: false, + changelog:false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']]]) + + } + + } + + stage('set-environment'){ + steps { + script { + def proj = '' + if (params.Project != ''){ + proj = "-p ${params.Project}" + } + + def tier = '' + if (params.Tier != ''){ + tier = "-t ${params.Tier}" + } + + env.PY_VARS = "${proj} ${tier}" + } + } + } + + stage('Search Monitors'){ + steps { + script { + + sh label: 'Env-Updates', script: '''#!/bin/bash + + yum -y install python3 curl + + cd monitoring + pip3 install -r requirements.txt + + ''' + + withCredentials([string(credentialsId: 'newrelic_api_key', variable: 'API_KEY'), + string(credentialsId: 'sumo_auth', variable: 'SUMO_AUTH')]) { + sh "python3 monitoring/monitor_query.py $PY_VARS -k $API_KEY -a \'$SUMO_AUTH\'" + } + + } + + } + } + + } + post { + +// always { +// +// sendSlackMessage() +// +// } + + cleanup { + + cleanWs() + + } + + } +} \ No newline at end of file diff --git a/jenkins/jobs/neo4j-update/Jenkinsfile b/jenkins/jobs/neo4j-update/Jenkinsfile new file mode 100644 index 000000000..2c061d894 --- /dev/null +++ b/jenkins/jobs/neo4j-update/Jenkinsfile @@ -0,0 +1,163 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${ICDC_SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'commons-docker-ncias-p2236-v' + } + } + + parameters { + + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to upgrade', + type: 'PT_SINGLE_SELECT', + value: 'dev,qa,stage,prod' ) + + } + + options { + ansiColor('xterm') + } + + tools { + maven 'Default' + jdk 'Default' + } + + stages{ + stage('checkout'){ + steps { + + checkout( poll: false, + changelog:false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']]]) + + } + + } + + stage('Set Environment'){ + environment { + SLACK_URL = "${SLACK_URL}" + } + steps { + script { + switch("${params.Environment}") { + case "dev": + withCredentials([usernamePassword(credentialsId: 'neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.DB_INSTANCE = "ncias-d2224-c" + env.NEO4J_PASS = "${NEO4J_PASS}" + env.NEO4J_EDITION = "community" + } + break + case "qa": + withCredentials([usernamePassword(credentialsId: 'neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.DB_INSTANCE = "ncias-q2251-c" + env.NEO4J_PASS = "${NEO4J_PASS}" + env.NEO4J_EDITION = "community" + } + break + case "stage": + withCredentials([usernamePassword(credentialsId: 'neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.DB_INSTANCE = "ncias-s2261-c" + env.NEO4J_PASS = "${NEO4J_PASS}" + env.NEO4J_EDITION = "community" + } + break + case "prod": + withCredentials([usernamePassword(credentialsId: 'neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.DB_INSTANCE = "ncias-p2284-c" + env.NEO4J_PASS = "${NEO4J_PASS}" + env.NEO4J_EDITION = "enterprise" + } + break + default: + withCredentials([usernamePassword(credentialsId: 'neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.DB_INSTANCE = "ncias-d2224-c" + env.NEO4J_PASS = "${NEO4J_PASS}" + env.NEO4J_EDITION = "community" + } + break + } + } + } + } + + stage('DB Update'){ + environment { + ICDC_SLACK_URL = "${ICDC_SLACK_URL}" + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + + script { + sh label: 'database-hosts', script: '''#!/bin/bash + echo "Creating inventory file" + echo "[neo4j]" > ${WORKSPACE}/icdc-devops/ansible/hosts + echo ${DB_INSTANCE} >> ${WORKSPACE}/icdc-devops/ansible/hosts + ''' + + } + + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/neo4j-version-update.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + neo4j_edition: "${NEO4J_EDITION}", + ], + credentialsId: 'commonsdocker', + colorized: true) + + } + } + } + + } + post { + always { + sendSlackMessage() + cleanWs() + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/neo4j-update/Jenkinsfile_bento b/jenkins/jobs/neo4j-update/Jenkinsfile_bento new file mode 100644 index 000000000..9b2fead80 --- /dev/null +++ b/jenkins/jobs/neo4j-update/Jenkinsfile_bento @@ -0,0 +1,173 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${ICDC_SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'data-loader' + } + } + + parameters { + + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to upgrade', + type: 'PT_SINGLE_SELECT', + value: 'dev,qa,perf,test,icdc,ctdc,prod' ) + + } + + options { + ansiColor('xterm') + } + + tools { + maven 'Default' + jdk 'Default' + } + + stages{ + stage('checkout'){ + steps { + + checkout( poll: false, + changelog:false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']]]) + + } + + } + + stage('Set Environment'){ + environment { + SLACK_URL = "${SLACK_URL}" + } + steps { + script { + switch("${params.Environment}") { + case "dev": + withCredentials([usernamePassword(credentialsId: 'dev_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.DB_INSTANCE = "${NEO4J_DEV_IP}" + env.NEO4J_PASS = "${NEO4J_PASS}" + } + break + case "qa": + withCredentials([usernamePassword(credentialsId: 'qa_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.DB_INSTANCE = "${NEO4J_QA_IP}" + env.NEO4J_PASS = "${NEO4J_PASS}" + } + break + case "perf": + withCredentials([usernamePassword(credentialsId: 'perf_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.DB_INSTANCE = "${NEO4J_PERF_IP}" + env.NEO4J_PASS = "${NEO4J_PASS}" + } + break + case "test": + withCredentials([usernamePassword(credentialsId: 'perf_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.DB_INSTANCE = "${NEO4J_TEST_IP}" + env.NEO4J_PASS = "${NEO4J_PASS}" + } + break + case "icdc": + withCredentials([usernamePassword(credentialsId: 'icdc_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.DB_INSTANCE = "${NEO4J_ICDC_IP}" + env.NEO4J_PASS = "${NEO4J_PASS}" + } + break + case "ctdc": + withCredentials([usernamePassword(credentialsId: 'ctdc_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.DB_INSTANCE = "${NEO4J_CTDC_IP}" + env.NEO4J_PASS = "${NEO4J_PASS}" + } + break + case "prod": + withCredentials([usernamePassword(credentialsId: 'prod_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.DB_INSTANCE = "${NEO4J_PROD_IP}" + env.NEO4J_PASS = "${NEO4J_PASS}" + } + break + default: + withCredentials([usernamePassword(credentialsId: 'dev_neo4j_user', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.DB_INSTANCE = "${NEO4J_DEV_IP}" + env.NEO4J_PASS = "${NEO4J_PASS}" + } + break + } + } + } + } + + stage('DB Update'){ + environment { + ICDC_SLACK_URL = "${ICDC_SLACK_URL}" + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + + script { + sh label: 'database-hosts', script: '''#!/bin/bash + echo "Creating inventory file" + echo "[neo4j]" > ${WORKSPACE}/icdc-devops/ansible/hosts + echo ${DB_INSTANCE} >> ${WORKSPACE}/icdc-devops/ansible/hosts + ''' + + } + + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/neo4j-version-update.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + credentialsId: 'commonsdocker', + colorized: true) + + } + } + } + + } + post { + always { + sendSlackMessage() + cleanWs() + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/neo4j-update/Jenkinsfile_ctdc b/jenkins/jobs/neo4j-update/Jenkinsfile_ctdc new file mode 100644 index 000000000..da05c4b20 --- /dev/null +++ b/jenkins/jobs/neo4j-update/Jenkinsfile_ctdc @@ -0,0 +1,149 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${ICDC_SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'commons-docker-ncias-p2236-v' + } + } + + parameters { + + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to upgrade', + type: 'PT_SINGLE_SELECT', + value: 'dev,qa' ) + + } + + options { + ansiColor('xterm') + } + + tools { + maven 'Default' + jdk 'Default' + } + + stages{ + stage('checkout'){ + steps { + + checkout( poll: false, + changelog:false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']]]) + + } + + } + + stage('Set Environment'){ + environment { + SLACK_URL = "${SLACK_URL}" + } + steps { + script { + switch("${params.Environment}") { + case "dev": + withCredentials([usernamePassword(credentialsId: 'neo4j_ctdc_dev_cred', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.DB_INSTANCE = "ncias-d2267-c" + env.NEO4J_PASS = "${NEO4J_PASS}" + env.NEO4J_EDITION = "community" + } + break + case "qa": + withCredentials([usernamePassword(credentialsId: 'neo4j_ctdc_qa_cred', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.DB_INSTANCE = "ncidb-q325-c" + env.NEO4J_PASS = "${NEO4J_PASS}" + env.NEO4J_EDITION = "community" + } + break + default: + withCredentials([usernamePassword(credentialsId: 'neo4j_ctdc_dev_cred', passwordVariable: 'NEO4J_PASS', usernameVariable: 'NEO4J_USER')]) { + env.DB_INSTANCE = "ncias-d2267-c" + env.NEO4J_PASS = "${NEO4J_PASS}" + env.NEO4J_EDITION = "community" + } + break + } + } + } + } + + stage('DB Update'){ + environment { + ICDC_SLACK_URL = "${ICDC_SLACK_URL}" + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + + script { + sh label: 'database-hosts', script: '''#!/bin/bash + echo "Creating inventory file" + echo "[neo4j]" > ${WORKSPACE}/icdc-devops/ansible/hosts + echo ${DB_INSTANCE} >> ${WORKSPACE}/icdc-devops/ansible/hosts + ''' + + } + + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/neo4j-version-update.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + neo4j_edition: "${NEO4J_EDITION}", + ], + credentialsId: 'commonsdocker', + colorized: true) + + } + } + } + + } + post { + always { + sendSlackMessage() + cleanWs() + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/open-target-backend/Jenkinsfile b/jenkins/jobs/open-target-backend/Jenkinsfile new file mode 100644 index 000000000..773b979c3 --- /dev/null +++ b/jenkins/jobs/open-target-backend/Jenkinsfile @@ -0,0 +1,234 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${PPDC_SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'cicd_scala' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'dev,qa,stage,prod' ) + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'master', + name: 'Backend_Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/ppdc-otp-backend.git') + + + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Backend_Tag}"]], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'RelativeTargetDirectory', + relativeTargetDir: 'ppdc-otp-backend']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/ppdc-otp-backend.git']]]) + + checkout( poll: false, + changelog:false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']]]) + + } + + } + stage('Set Environment'){ + environment { + PPDC_SLACK_URL = "${PPDC_SLACK_URL}" + + } + steps { + script { + switch("${params.Environment}") { + case "dev": + withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER')]) { + env.ELASTICSEARCH_HOST = "${OTP_ELASTICSEARCH_DEV_IP}" + env.SLICK_CLICKHOUSE_URL = "${OTP_CLICKHOUSE_DEV_IP}" + env.PLAY_PORT = "${PLAY_PORT}" + env.NEW_RELIC_APP_NAME = "PPDC_AWS_BACKEND_DEV" + env.NEW_RELIC_LICENSE_KEY="${NEW_RELIC_LICENSE_KEY}" + env.NEW_RELIC_HOST = "gov-collector.newrelic.com" + env.BEARER = "${BEARER}" + env.TIER = "dev" + + + } + break + case "qa": + withCredentials([string(credentialsId: 'qa_bearer', variable: 'BEARER')]) { + env.ELASTICSEARCH_HOST = "${OTP_ELASTICSEARCH_QA_IP}" + env.SLICK_CLICKHOUSE_URL = "${OTP_CLICKHOUSE_QA_IP}" + env.PLAY_PORT = "${PLAY_PORT}" + env.NEW_RELIC_APP_NAME = "PPDC_AWS_BACKEND_QA" + env.NEW_RELIC_LICENSE_KEY="${NEW_RELIC_LICENSE_KEY}" + env.NEW_RELIC_HOST = "gov-collector.newrelic.com" + env.BEARER = "${BEARER}" + env.TIER = "qa" + + + + } + break + case "stage": + withCredentials([string(credentialsId: 'perf_bearer', variable: 'BEARER')]) { + env.ELASTICSEARCH_HOST = "${OTP_ELASTICSEARCH_STAGE_IP}" + env.SLICK_CLICKHOUSE_URL = "${OTP_CLICKHOUSE_STAGE_IP}" + env.PLAY_PORT = "${PLAY_PORT}" + env.NEW_RELIC_APP_NAME = "PPDC_AWS_BACKEND_STAGE" + env.NEW_RELIC_LICENSE_KEY="${NEW_RELIC_LICENSE_KEY}" + env.NEW_RELIC_HOST = "gov-collector.newrelic.com" + env.BEARER = "${BEARER}" + env.TIER = "stage" + + + + } + break + case "prod": + withCredentials([string(credentialsId: 'prod_bearer', variable: 'BEARER')]) { + env.BEARER = "${BEARER}" + env.TIER = "prod" + + + + } + break + default: + withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER')]) { + env.BEARER = "${BEARER}" + env.TIER = "dev" + + + + } + break + } + } + } + } + stage('build'){ + environment { + BE_VERSION = "${params.Backend_Tag}" + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER')]){ + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/build-open-target-backend.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + tier: "${params.Environment}" + ], + hostKeyChecking: false, + colorized: true) + } + } + } + } + stage('deploy'){ + agent { + node { + label 'bastion-host' + } + } + environment { + PPDC_SLACK_URL = "${PPDC_SLACK_URL}" + TIER = "${params.Environment}" + BE_VERSION = "${params.Backend_Tag}" + } + + steps{ + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER'), + file(credentialsId: 'ppdc_ansible_host_file', variable: 'host_file'), + ]){ + sh "cp ${host_file} ${WORKSPACE}/ansible/hosts" + ansiblePlaybook( + playbook: '${WORKSPACE}/ansible/deploy-open-target-backend.yml', + inventory: '${WORKSPACE}/ansible/hosts', + credentialsId: 'server_ssh_key', + extraVars: [ + tier: "${params.Environment}" + ], + become: true, + hostKeyChecking: false, + colorized: true) + } + } + } + } + } + post { + + always { + + sendSlackMessage() + + } + + cleanup { + + cleanWs() + + } + + } +} \ No newline at end of file diff --git a/jenkins/jobs/open-target-backend/Jenkinsfile_deploy b/jenkins/jobs/open-target-backend/Jenkinsfile_deploy new file mode 100644 index 000000000..7c4bf07df --- /dev/null +++ b/jenkins/jobs/open-target-backend/Jenkinsfile_deploy @@ -0,0 +1,374 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${PPDC_SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'cicd_scala' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'sandbox,dev,test,qa,perf,icdc,prod' ) + extendedChoice( + name: 'Region', + defaultValue: 'us-east-1', + description: 'Choose aws region to build', + type: 'PT_SINGLE_SELECT', + value: 'us-east-1,us-west-1' ) + booleanParam( + defaultValue: false, + description: 'Flush the Redis cache', + name: 'Flush_Redis') + +// gitParameter(branchFilter: 'origin/(.*)', +// defaultValue: 'integration', +// name: 'Frontend_Image', +// type: 'PT_BRANCH_TAG', +// quickFilterEnabled: false, +// selectedValue: 'DEFAULT', +// sortMode: 'ASCENDING_SMART', +// tagFilter: '*', +// useRepository: 'https://github.com/CBIIT/bento-frontend') +// +// The Frontend_Image parameter has been replaced in this job by a GUI parameter due to the fact that type "active choice reactive parameter" is not supported. +// The GUI definition for this parameter should follow Environment and use the following values: +// +// Script: +// env_value = Environment +// def gettags = ("git ls-remote -h -t https://github.com/CBIIT/bento-frontend.git").execute() +// def tags = gettags.text.readLines().collect { it.split()[1].replaceAll('refs/heads/', '').replaceAll('refs/tags/', '').replaceAll("\\^\\{\\}", '')} +// List tagnames = tags.collect{ '' + it + '' } +// +// if(env_value.equalsIgnoreCase("dev")) { +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("integration")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// } else { +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("master")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// } +// +// return tagnames +// +// Fallback Script: +// return ['integration'] +// +// NOTE: this sets the default frontend branch to "integration" and is required for builds triggered from SCM polling. +// +// Referenced parameters: +// Environment + +// gitParameter(branchFilter: 'origin/(.*)', +// defaultValue: 'Integration', +// name: 'Backend_Image', +// type: 'PT_BRANCH_TAG', +// quickFilterEnabled: false, +// selectedValue: 'DEFAULT', +// sortMode: 'ASCENDING_SMART', +// tagFilter: '*', +// useRepository: 'https://github.com/CBIIT/bento-backend') +// +// The Backend_Image parameter has been replaced in this job by a GUI parameter due to the fact that type "active choice reactive parameter" is not supported. +// The GUI definition for this parameter should follow Frontend_Image and use the following values: +// +// Script: +// front_tag = Frontend_Image +// def gettags = ("git ls-remote -h -t https://github.com/CBIIT/bento-backend.git").execute() +// def tags = gettags.text.readLines().collect { it.split()[1].replaceAll('refs/heads/', '').replaceAll('refs/tags/', '').replaceAll("\\^\\{\\}", '')} +// List tagnames = tags.collect{ '' + it + '' } +// +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase(front_tag)) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("Integration")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// +// return tagnames +// +// Fallback Script: +// return ['Integration'] +// +// NOTE: this sets the default backend branch to "Integration" and is required for builds triggered from SCM polling. The branch defined here should math the default for Frontend_Image +// +// Referenced parameters: +// Frontend_Image + + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { +// git branch: "${params.Backend_Image}", +// url: 'https://github.com/CBIIT/bento-backend' + +// checkout([$class: 'GitSCM', +// branches: [[name: "${params.Backend_Image}"]], +// doGenerateSubmoduleConfigurations: +// false, extensions: [], submoduleCfg: [], +// userRemoteConfigs: +// [[url: 'https://github.com/CBIIT/bento-backend']]]) + +// dir('bento-frontend'){ +// git branch: "${params.Frontend_Image}", +// url: 'https://github.com/CBIIT/bento-frontend' +// } + +// checkout([$class: 'GitSCM', +// branches: [[name: "${params.Frontend_Image}"]], +// doGenerateSubmoduleConfigurations: false, +// extensions: [[$class: 'RelativeTargetDirectory', +// relativeTargetDir: 'bento-frontend']], +// submoduleCfg: [], +// userRemoteConfigs: +// [[url: 'https://github.com/CBIIT/bento-frontend']]]) + + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + } + + } + stage('Set Environment'){ + environment { + PPDC_SLACK_URL = "${PPDC_SLACK_URL}" + VERSION = "${params.Frontend_Image}" + } + steps { + + script { + currentBuild.displayName = "Tag: ${VERSION} Environment: ${params.Environment}" + } + + script { + switch("${params.Environment}") { + case "dev": + withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER')]) { + env.NEO4J_IP = "${NEO4J_DEV_IP}" + env.BEARER = "${BEARER}" + env.TIER = "dev" + + + + } + break + case "qa": + withCredentials([string(credentialsId: 'qa_bearer', variable: 'BEARER')]) { + env.NEO4J_IP = "${NEO4J_QA_IP}" + env.BEARER = "${BEARER}" + env.TIER = "qa" + + + + } + break + case "perf": + withCredentials([string(credentialsId: 'perf_bearer', variable: 'BEARER')]) { + env.NEO4J_IP = "${NEO4J_PERF_IP}" + env.BEARER = "${BEARER}" + env.TIER = "perf" + + + + } + break + case "prod": + withCredentials([string(credentialsId: 'prod_bearer', variable: 'BEARER')]) { + env.NEO4J_IP = "${NEO4J_PROD_IP}" + env.BEARER = "${BEARER}" + env.TIER = "prod" + + + + } + break + case "icdc": + withCredentials([string(credentialsId: 'icdc_bearer', variable: 'BEARER')]) { + env.NEO4J_IP = "${NEO4J_ICDC_IP}" + env.BEARER = "${BEARER}" + env.TIER = "icdc" + + + + } + break + default: + withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER')]) { + env.NEO4J_IP = "${NEO4J_DEV_IP}" + env.BEARER = "${BEARER}" + env.TIER = "dev" + + + + } + break + } + } + } + } + stage('deploy'){ + environment { + PPDC_SLACK_URL = "${PPDC_SLACK_URL}" + TIER = "${params.Environment}" + VERSION = "${params.Frontend_Image}" + FE_VERSION = "${params.Frontend_Image}" + BE_VERSION = "${params.Backend_Image}" + } + steps{ + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/bento-deploy.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + stage('update database monitoring agents'){ + steps{ + node('data-loader') { + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + echo "Recreating inventory file" + sh "cp ${WORKSPACE}/icdc-devops/ansible/hosts ${WORKSPACE}/icdc-devops/ansible/hosts.bak" + sh "echo [agent_setup] >> ${WORKSPACE}/icdc-devops/ansible/hosts" + sh "echo ${NEO4J_IP} ansible_ssh_private_key_file=/home/bento/.ssh/devops ansible_ssh_user=bento >> ${WORKSPACE}/icdc-devops/ansible/hosts" + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/agent-setup.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + env: "${params.Environment}", + app_type: 'database', + app_name: 'neo4j', + project: 'bento', + log_path: '/var/lib/neo4j/logs/*.log', + ], + colorized: true) + } + } + } + } + stage('clear redis cache'){ + agent { label 'data-loader' } + when { + expression { params.Flush_Redis } + } + + environment { + TIER = "${params.Environment}" + } + + steps{ + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/redis.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + } + post { + always { + + sendSlackMessage() + + } + + cleanup { + + cleanWs() + + } + } +} diff --git a/jenkins/jobs/ppdc-otg/Jenkinsfile b/jenkins/jobs/ppdc-otg/Jenkinsfile new file mode 100644 index 000000000..17b4231d4 --- /dev/null +++ b/jenkins/jobs/ppdc-otg/Jenkinsfile @@ -0,0 +1,219 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${PPDC_SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'cicd_ppdc' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'dev,qa,stage,prod' ) + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'master', + name: 'Frontend_Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/ppdc-otg-frontend.git') + + + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Frontend_Tag}"]], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'RelativeTargetDirectory', + relativeTargetDir: 'ppdc-otg-frontend']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/ppdc-otg-frontend.git']]]) + + checkout( poll: false, + changelog:false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']]]) + + } + + } + stage('Set Environment'){ + environment { + PPDC_SLACK_URL = "${PPDC_SLACK_URL}" + VERSION = "${params.Frontend_Tag}" + } + steps { + script { + switch("${params.Environment}") { + case "dev": + withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER')]) { + env.BEARER = "${BEARER}" + env.TIER = "dev" + + + + } + break + case "qa": + withCredentials([string(credentialsId: 'qa_bearer', variable: 'BEARER')]) { + env.BEARER = "${BEARER}" + env.TIER = "qa" + + + + } + break + case "perf": + withCredentials([string(credentialsId: 'perf_bearer', variable: 'BEARER')]) { + env.BEARER = "${BEARER}" + env.TIER = "perf" + + + + } + break + case "prod": + withCredentials([string(credentialsId: 'prod_bearer', variable: 'BEARER')]) { + env.BEARER = "${BEARER}" + env.TIER = "prod" + + + + } + break + default: + withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER')]) { + env.BEARER = "${BEARER}" + env.TIER = "dev" + + + + } + break + } + } + } + } + stage('build'){ + environment { + VERSION = "${params.Branch_Tag}" + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER')]){ + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/build-ppdc-otg.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + tier: "${params.Environment}", + frontend_version: "${params.Frontend_Tag}" + ], + hostKeyChecking: false, + colorized: true) + } + } + } + } + stage('deploy'){ + agent { + node { + label 'bastion-host' + } + } + environment { + PPDC_SLACK_URL = "${PPDC_SLACK_URL}" + TIER = "${params.Environment}" + VERSION = "${params.Frontend_Tag}" + } + + steps{ + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER'), + file(credentialsId: 'ppdc_ansible_host_file', variable: 'host_file'), + ]){ + sh "cp ${host_file} ${WORKSPACE}/ansible/hosts" + ansiblePlaybook( + playbook: '${WORKSPACE}/ansible/deploy-ppdc-otg.yml', + inventory: '${WORKSPACE}/ansible/hosts', + credentialsId: 'server_ssh_key', + extraVars: [ + tier: "${params.Environment}", + frontend_version: "${params.Frontend_Tag}" + ], + become: true, + hostKeyChecking: false, + colorized: true) + } + } + } + } + } + post { + + always { + + sendSlackMessage() + + } + + cleanup { + + cleanWs() + + } + + } +} diff --git a/jenkins/jobs/ppdc-otg/Jenkinsfile_deploy b/jenkins/jobs/ppdc-otg/Jenkinsfile_deploy new file mode 100644 index 000000000..48ba5d5a1 --- /dev/null +++ b/jenkins/jobs/ppdc-otg/Jenkinsfile_deploy @@ -0,0 +1,374 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${PPDC_SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'cicd_ppdc' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'sandbox,dev,test,qa,perf,icdc,prod' ) + extendedChoice( + name: 'Region', + defaultValue: 'us-east-1', + description: 'Choose aws region to build', + type: 'PT_SINGLE_SELECT', + value: 'us-east-1,us-west-1' ) + booleanParam( + defaultValue: false, + description: 'Flush the Redis cache', + name: 'Flush_Redis') + +// gitParameter(branchFilter: 'origin/(.*)', +// defaultValue: 'integration', +// name: 'Frontend_Image', +// type: 'PT_BRANCH_TAG', +// quickFilterEnabled: false, +// selectedValue: 'DEFAULT', +// sortMode: 'ASCENDING_SMART', +// tagFilter: '*', +// useRepository: 'https://github.com/CBIIT/bento-frontend') +// +// The Frontend_Image parameter has been replaced in this job by a GUI parameter due to the fact that type "active choice reactive parameter" is not supported. +// The GUI definition for this parameter should follow Environment and use the following values: +// +// Script: +// env_value = Environment +// def gettags = ("git ls-remote -h -t https://github.com/CBIIT/bento-frontend.git").execute() +// def tags = gettags.text.readLines().collect { it.split()[1].replaceAll('refs/heads/', '').replaceAll('refs/tags/', '').replaceAll("\\^\\{\\}", '')} +// List tagnames = tags.collect{ '' + it + '' } +// +// if(env_value.equalsIgnoreCase("dev")) { +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("integration")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// } else { +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("master")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// } +// +// return tagnames +// +// Fallback Script: +// return ['integration'] +// +// NOTE: this sets the default frontend branch to "integration" and is required for builds triggered from SCM polling. +// +// Referenced parameters: +// Environment + +// gitParameter(branchFilter: 'origin/(.*)', +// defaultValue: 'Integration', +// name: 'Backend_Image', +// type: 'PT_BRANCH_TAG', +// quickFilterEnabled: false, +// selectedValue: 'DEFAULT', +// sortMode: 'ASCENDING_SMART', +// tagFilter: '*', +// useRepository: 'https://github.com/CBIIT/bento-backend') +// +// The Backend_Image parameter has been replaced in this job by a GUI parameter due to the fact that type "active choice reactive parameter" is not supported. +// The GUI definition for this parameter should follow Frontend_Image and use the following values: +// +// Script: +// front_tag = Frontend_Image +// def gettags = ("git ls-remote -h -t https://github.com/CBIIT/bento-backend.git").execute() +// def tags = gettags.text.readLines().collect { it.split()[1].replaceAll('refs/heads/', '').replaceAll('refs/tags/', '').replaceAll("\\^\\{\\}", '')} +// List tagnames = tags.collect{ '' + it + '' } +// +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase(front_tag)) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("Integration")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// +// return tagnames +// +// Fallback Script: +// return ['Integration'] +// +// NOTE: this sets the default backend branch to "Integration" and is required for builds triggered from SCM polling. The branch defined here should math the default for Frontend_Image +// +// Referenced parameters: +// Frontend_Image + + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { +// git branch: "${params.Backend_Image}", +// url: 'https://github.com/CBIIT/bento-backend' + +// checkout([$class: 'GitSCM', +// branches: [[name: "${params.Backend_Image}"]], +// doGenerateSubmoduleConfigurations: +// false, extensions: [], submoduleCfg: [], +// userRemoteConfigs: +// [[url: 'https://github.com/CBIIT/bento-backend']]]) + +// dir('bento-frontend'){ +// git branch: "${params.Frontend_Image}", +// url: 'https://github.com/CBIIT/bento-frontend' +// } + +// checkout([$class: 'GitSCM', +// branches: [[name: "${params.Frontend_Image}"]], +// doGenerateSubmoduleConfigurations: false, +// extensions: [[$class: 'RelativeTargetDirectory', +// relativeTargetDir: 'bento-frontend']], +// submoduleCfg: [], +// userRemoteConfigs: +// [[url: 'https://github.com/CBIIT/bento-frontend']]]) + + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + } + + } + stage('Set Environment'){ + environment { + PPDC_SLACK_URL = "${PPDC_SLACK_URL}" + VERSION = "${params.Frontend_Image}" + } + steps { + + script { + currentBuild.displayName = "Tag: ${VERSION} Environment: ${params.Environment}" + } + + script { + switch("${params.Environment}") { + case "dev": + withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER')]) { + env.NEO4J_IP = "${NEO4J_DEV_IP}" + env.BEARER = "${BEARER}" + env.TIER = "dev" + + + + } + break + case "qa": + withCredentials([string(credentialsId: 'qa_bearer', variable: 'BEARER')]) { + env.NEO4J_IP = "${NEO4J_QA_IP}" + env.BEARER = "${BEARER}" + env.TIER = "qa" + + + + } + break + case "perf": + withCredentials([string(credentialsId: 'perf_bearer', variable: 'BEARER')]) { + env.NEO4J_IP = "${NEO4J_PERF_IP}" + env.BEARER = "${BEARER}" + env.TIER = "perf" + + + + } + break + case "prod": + withCredentials([string(credentialsId: 'prod_bearer', variable: 'BEARER')]) { + env.NEO4J_IP = "${NEO4J_PROD_IP}" + env.BEARER = "${BEARER}" + env.TIER = "prod" + + + + } + break + case "icdc": + withCredentials([string(credentialsId: 'icdc_bearer', variable: 'BEARER')]) { + env.NEO4J_IP = "${NEO4J_ICDC_IP}" + env.BEARER = "${BEARER}" + env.TIER = "icdc" + + + + } + break + default: + withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER')]) { + env.NEO4J_IP = "${NEO4J_DEV_IP}" + env.BEARER = "${BEARER}" + env.TIER = "dev" + + + + } + break + } + } + } + } + stage('deploy'){ + environment { + PPDC_SLACK_URL = "${PPDC_SLACK_URL}" + TIER = "${params.Environment}" + VERSION = "${params.Frontend_Image}" + FE_VERSION = "${params.Frontend_Image}" + BE_VERSION = "${params.Backend_Image}" + } + steps{ + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/bento-deploy.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + stage('update database monitoring agents'){ + steps{ + node('data-loader') { + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + echo "Recreating inventory file" + sh "cp ${WORKSPACE}/icdc-devops/ansible/hosts ${WORKSPACE}/icdc-devops/ansible/hosts.bak" + sh "echo [agent_setup] >> ${WORKSPACE}/icdc-devops/ansible/hosts" + sh "echo ${NEO4J_IP} ansible_ssh_private_key_file=/home/bento/.ssh/devops ansible_ssh_user=bento >> ${WORKSPACE}/icdc-devops/ansible/hosts" + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/agent-setup.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + env: "${params.Environment}", + app_type: 'database', + app_name: 'neo4j', + project: 'bento', + log_path: '/var/lib/neo4j/logs/*.log', + ], + colorized: true) + } + } + } + } + stage('clear redis cache'){ + agent { label 'data-loader' } + when { + expression { params.Flush_Redis } + } + + environment { + TIER = "${params.Environment}" + } + + steps{ + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/redis.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + } + post { + always { + + sendSlackMessage() + + } + + cleanup { + + cleanWs() + + } + } +} diff --git a/jenkins/jobs/ppdc-otp/Jenkinsfile b/jenkins/jobs/ppdc-otp/Jenkinsfile new file mode 100644 index 000000000..c846545e2 --- /dev/null +++ b/jenkins/jobs/ppdc-otp/Jenkinsfile @@ -0,0 +1,218 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${PPDC_SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'cicd_ppdc' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'dev,qa,stage,prod,demo' ) + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'master', + name: 'Frontend_Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/ppdc-otp-frontend.git') + + + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Frontend_Tag}"]], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'RelativeTargetDirectory', + relativeTargetDir: 'ppdc-otp-frontend']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/ppdc-otp-frontend.git']]]) + + checkout( poll: false, + changelog:false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']]]) + + } + + } + /*stage('Set Environment'){ + environment { + PPDC_SLACK_URL = "${PPDC_SLACK_URL}" + VERSION = "${params.Frontend_Tag}" + } + steps { + script { + switch("${params.Environment}") { + case "dev": + withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER')]) { + env.BEARER = "${BEARER}" + env.TIER = "dev" + + + + } + break + case "qa": + withCredentials([string(credentialsId: 'qa_bearer', variable: 'BEARER')]) { + env.BEARER = "${BEARER}" + env.TIER = "qa" + + + + } + break + case "stage": + withCredentials([string(credentialsId: 'stage_bearer', variable: 'BEARER')]) { + env.BEARER = "${BEARER}" + env.TIER = "stage" + + + + } + break + case "prod": + withCredentials([string(credentialsId: 'prod_bearer', variable: 'BEARER')]) { + env.BEARER = "${BEARER}" + env.TIER = "prod" + + + + } + break + default: + withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER')]) { + env.BEARER = "${BEARER}" + env.TIER = "dev" + + } + break + } + } + } + }*/ + stage('build'){ + environment { + VERSION = "${params.Branch_Tag}" + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER')]){ + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/build-ppdc-otp.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + tier: "${params.Environment}", + frontend_version: "${params.Frontend_Tag}" + ], + hostKeyChecking: false, + colorized: true) + } + } + } + } + stage('deploy'){ + agent { + node { + label 'bastion-host' + } + + } + environment { + PPDC_SLACK_URL = "${PPDC_SLACK_URL}" + TIER = "${params.Environment}" + VERSION = "${params.Frontend_Tag}" + } + + steps{ + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER'), + file(credentialsId: 'ppdc_ansible_host_file', variable: 'host_file'), + ]){ + sh "cp ${host_file} ${WORKSPACE}/ansible/hosts" + ansiblePlaybook( + playbook: '${WORKSPACE}/ansible/deploy-ppdc-otp.yml', + inventory: '${WORKSPACE}/ansible/hosts', + credentialsId: 'server_ssh_key', + extraVars: [ + tier: "${params.Environment}", + frontend_version: "${params.Frontend_Tag}" + ], + become: true, + hostKeyChecking: false, + colorized: true) + } + } + } + } + } + post { + + always { + + sendSlackMessage() + + } + + cleanup { + + cleanWs() + + } + + } +} diff --git a/jenkins/jobs/ppdc-otp/Jenkinsfile_deploy b/jenkins/jobs/ppdc-otp/Jenkinsfile_deploy new file mode 100644 index 000000000..3a576b6b2 --- /dev/null +++ b/jenkins/jobs/ppdc-otp/Jenkinsfile_deploy @@ -0,0 +1,360 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${PPDC_SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'cicd_ppdc' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'sandbox,dev,test,qa,perf,icdc,prod' ) + extendedChoice( + name: 'Region', + defaultValue: 'us-east-1', + description: 'Choose aws region to build', + type: 'PT_SINGLE_SELECT', + value: 'us-east-1,us-west-1' ) + booleanParam( + defaultValue: false, + description: 'Flush the Redis cache', + name: 'Flush_Redis') + +// gitParameter(branchFilter: 'origin/(.*)', +// defaultValue: 'integration', +// name: 'Frontend_Image', +// type: 'PT_BRANCH_TAG', +// quickFilterEnabled: false, +// selectedValue: 'DEFAULT', +// sortMode: 'ASCENDING_SMART', +// tagFilter: '*', +// useRepository: 'https://github.com/CBIIT/bento-frontend') +// +// The Frontend_Image parameter has been replaced in this job by a GUI parameter due to the fact that type "active choice reactive parameter" is not supported. +// The GUI definition for this parameter should follow Environment and use the following values: +// +// Script: +// env_value = Environment +// def gettags = ("git ls-remote -h -t https://github.com/CBIIT/bento-frontend.git").execute() +// def tags = gettags.text.readLines().collect { it.split()[1].replaceAll('refs/heads/', '').replaceAll('refs/tags/', '').replaceAll("\\^\\{\\}", '')} +// List tagnames = tags.collect{ '' + it + '' } +// +// if(env_value.equalsIgnoreCase("dev")) { +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("integration")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// } else { +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("master")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// } +// +// return tagnames +// +// Fallback Script: +// return ['integration'] +// +// NOTE: this sets the default frontend branch to "integration" and is required for builds triggered from SCM polling. +// +// Referenced parameters: +// Environment + +// gitParameter(branchFilter: 'origin/(.*)', +// defaultValue: 'Integration', +// name: 'Backend_Image', +// type: 'PT_BRANCH_TAG', +// quickFilterEnabled: false, +// selectedValue: 'DEFAULT', +// sortMode: 'ASCENDING_SMART', +// tagFilter: '*', +// useRepository: 'https://github.com/CBIIT/bento-backend') +// +// The Backend_Image parameter has been replaced in this job by a GUI parameter due to the fact that type "active choice reactive parameter" is not supported. +// The GUI definition for this parameter should follow Frontend_Image and use the following values: +// +// Script: +// front_tag = Frontend_Image +// def gettags = ("git ls-remote -h -t https://github.com/CBIIT/bento-backend.git").execute() +// def tags = gettags.text.readLines().collect { it.split()[1].replaceAll('refs/heads/', '').replaceAll('refs/tags/', '').replaceAll("\\^\\{\\}", '')} +// List tagnames = tags.collect{ '' + it + '' } +// +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase(front_tag)) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase("Integration")) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// +// return tagnames +// +// Fallback Script: +// return ['Integration'] +// +// NOTE: this sets the default backend branch to "Integration" and is required for builds triggered from SCM polling. The branch defined here should math the default for Frontend_Image +// +// Referenced parameters: +// Frontend_Image + + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { +// git branch: "${params.Backend_Image}", +// url: 'https://github.com/CBIIT/bento-backend' + +// checkout([$class: 'GitSCM', +// branches: [[name: "${params.Backend_Image}"]], +// doGenerateSubmoduleConfigurations: +// false, extensions: [], submoduleCfg: [], +// userRemoteConfigs: +// [[url: 'https://github.com/CBIIT/bento-backend']]]) + +// dir('bento-frontend'){ +// git branch: "${params.Frontend_Image}", +// url: 'https://github.com/CBIIT/bento-frontend' +// } + +// checkout([$class: 'GitSCM', +// branches: [[name: "${params.Frontend_Image}"]], +// doGenerateSubmoduleConfigurations: false, +// extensions: [[$class: 'RelativeTargetDirectory', +// relativeTargetDir: 'bento-frontend']], +// submoduleCfg: [], +// userRemoteConfigs: +// [[url: 'https://github.com/CBIIT/bento-frontend']]]) + + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + } + + } + stage('Set Environment'){ + environment { + PPDC_SLACK_URL = "${PPDC_SLACK_URL}" + VERSION = "${params.Frontend_Image}" + } + steps { + + script { + currentBuild.displayName = "Tag: ${VERSION} Environment: ${params.Environment}" + } + + script { + switch("${params.Environment}") { + case "dev": + withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER')]) { + env.BEARER = "${BEARER}" + env.TIER = "dev" + + + + } + break + case "qa": + withCredentials([string(credentialsId: 'qa_bearer', variable: 'BEARER')]) { + env.BEARER = "${BEARER}" + env.TIER = "qa" + + + + } + break + case "perf": + withCredentials([string(credentialsId: 'perf_bearer', variable: 'BEARER')]) { + env.NEO4J_IP = "${NEO4J_PERF_IP}" + env.BEARER = "${BEARER}" + env.TIER = "perf" + + + + } + break + case "prod": + withCredentials([string(credentialsId: 'prod_bearer', variable: 'BEARER')]) { + env.BEARER = "${BEARER}" + env.TIER = "prod" + + + + } + break + default: + withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER')]) { + env.BEARER = "${BEARER}" + env.TIER = "dev" + + + + } + break + } + } + } + } + stage('deploy'){ + environment { + PPDC_SLACK_URL = "${PPDC_SLACK_URL}" + TIER = "${params.Environment}" + VERSION = "${params.Frontend_Image}" + FE_VERSION = "${params.Frontend_Image}" + BE_VERSION = "${params.Backend_Image}" + } + steps{ + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/bento-deploy.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + stage('update database monitoring agents'){ + steps{ + node('data-loader') { + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + echo "Recreating inventory file" + sh "cp ${WORKSPACE}/icdc-devops/ansible/hosts ${WORKSPACE}/icdc-devops/ansible/hosts.bak" + sh "echo [agent_setup] >> ${WORKSPACE}/icdc-devops/ansible/hosts" + sh "echo ${NEO4J_IP} ansible_ssh_private_key_file=/home/bento/.ssh/devops ansible_ssh_user=bento >> ${WORKSPACE}/icdc-devops/ansible/hosts" + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/agent-setup.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + extraVars: [ + env: "${params.Environment}", + app_type: 'database', + app_name: 'neo4j', + project: 'bento', + log_path: '/var/lib/neo4j/logs/*.log', + ], + colorized: true) + } + } + } + } + stage('clear redis cache'){ + agent { label 'data-loader' } + when { + expression { params.Flush_Redis } + } + + environment { + TIER = "${params.Environment}" + } + + steps{ + checkout( changelog:false, + poll: false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']] + ]) + + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/redis.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + } + post { + always { + + sendSlackMessage() + + } + + cleanup { + + cleanWs() + + } + } +} diff --git a/jenkins/jobs/qbranch/Jenkinsfile b/jenkins/jobs/qbranch/Jenkinsfile new file mode 100644 index 000000000..131cedde7 --- /dev/null +++ b/jenkins/jobs/qbranch/Jenkinsfile @@ -0,0 +1,88 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'cicd_microservice' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_MULTI_SELECT', + value: 'sandbox,dev,test,qa,stage,prod' ) + + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + checkout( poll: false, + changelog:false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']]]) + + } + } + stage('TestingDocker'){ + steps{ + script{ + docker.image('cbiitssrepo/ccdc-etl').inside{ + sh 'ls -l' + } + } + } + } + } + post { + always { + + sendSlackMessage() + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/qbranch/JenkinsfileQbranch b/jenkins/jobs/qbranch/JenkinsfileQbranch new file mode 100644 index 000000000..3f2ed51ac --- /dev/null +++ b/jenkins/jobs/qbranch/JenkinsfileQbranch @@ -0,0 +1,216 @@ + +import groovy.json.JsonOutput + +def sendSlackMessage() { + jenkins_image = ":jenkins:" + beer_image = ":beer:" + long epoch = System.currentTimeMillis()/1000 + def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] + + def slack = JsonOutput.toJson( + [ + icon_emoji: jenkins_image, + attachments: [[ + title: "Jenkins Job Alert - ${currentBuild.currentResult}", + text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", + fallback: "Bento Jenkins Build", + color: "${BUILD_COLORS[currentBuild.currentResult]}", + footer: "bento devops", + ts: epoch, + mrkdwn_in: ["footer", "title"], + ]] + ] + ) + try { + sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${SLACK_URL}'" + } catch (err) { + echo "${err} Slack notify failed" + } +} + + +pipeline { + agent { + node { + label 'slave-ncias-d2320-c' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'sandbox,dev,test,qa,stage,prod' ) + gitParameter(branchFilter: 'origin/(.*)', + defaultValue: 'integration', + name: 'Frontend_Tag', + type: 'PT_BRANCH_TAG', + quickFilterEnabled: false, + selectedValue: 'DEFAULT', + sortMode: 'ASCENDING_SMART', + tagFilter: '*', + useRepository: 'https://github.com/CBIIT/bento-frontend') +// gitParameter(branchFilter: 'origin/(.*)', +// defaultValue: 'Integration', +// name: 'Backend_Tag', +// type: 'PT_BRANCH_TAG', +// quickFilterEnabled: false, +// selectedValue: 'DEFAULT', +// sortMode: 'ASCENDING_SMART', +// tagFilter: '*', +// useRepository: 'https://github.com/CBIIT/bento-backend') +// +// The Backend_Tag parameter has been replaced in this job by a GUI parameter due to the fact that type "active choice reactive parameter" is not supported. +// The GUI definition for this parameter should follow Frontend_Tag and use the following values: +// +// Script: +// front_tag = Frontend_Tag +// def gettags = ("git ls-remote -h -t https://github.com/CBIIT/bento-backend.git").execute() +// def tags = gettags.text.readLines().collect { it.split()[1].replaceAll('refs/heads/', '').replaceAll('refs/tags/', '').replaceAll("\\^\\{\\}", '')} +// List tagnames = tags.collect{ '' + it + '' } +// +// for(int i=0; i < tagnames.size(); i++) { +// if(tagnames.get(i).equalsIgnoreCase(front_tag)) { +// new_value = tagnames[i] + ':selected' +// tagnames.set(i, new_value); +// } +// } +// +// return tagnames +// +// Fallback Script: +// return ['Integration'] +// +// NOTE: this sets the default backend branch to "Integration" and is required for builds triggered from SCM polling. The branch defined here should math the default for Frontend_Tag +// +// Referenced parameters: +// Frontend_Tag + + } + options { + ansiColor('xterm') + } + tools { + maven 'Default' + jdk 'Default' + } + stages{ + stage('checkout'){ + steps { + checkout([$class: 'GitSCM', + branches: [[name: "${params.Backend_Tag}"]], + doGenerateSubmoduleConfigurations: + false, extensions: [], submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/bento-backend']]]) + + checkout([$class: 'GitSCM', + branches: [[name: "${params.Frontend_Tag}"]], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'RelativeTargetDirectory', + relativeTargetDir: 'bento-frontend']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/bento-frontend']]]) + + checkout( poll: false, + changelog:false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']]]) + + } + + } + stage('Set Environment'){ + environment { + SLACK_URL = "${SLACK_URL}" + } + steps { + script { + switch("${params.Environment}") { + case "dev": + withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER')]) { + env.NEO4J_IP = "${NEO4J_DEV_IP}" + env.BEARER = "${BEARER}" + env.TIER = "dev" + + } + break + case "qa": + withCredentials([string(credentialsId: 'qa_bearer', variable: 'BEARER')]) { + env.NEO4J_IP = "${NEO4J_QA_IP}" + env.BEARER = "${BEARER}" + env.TIER = "qa" + + } + break + case "stage": + withCredentials([string(credentialsId: 'stage_bearer', variable: 'BEARER')]) { + env.NEO4J_IP = "${NEO4J_STAGE_IP}" + env.BEARER = "${BEARER}" + env.TIER = "stage" + + } + break + case "prod": + withCredentials([string(credentialsId: 'prod_bearer', variable: 'BEARER')]) { + env.NEO4J_IP = "${NEO4J_PROD_IP}" + env.BEARER = "${BEARER}" + env.TIER = "prod" + + } + break + default: + withCredentials([string(credentialsId: 'dev_bearer', variable: 'BEARER')]) { + env.NEO4J_IP = "${NEO4J_DEV_IP}" + env.BEARER = "${BEARER}" + env.TIER = "dev" + + } + break + } + } + } + } + stage('build'){ + environment { + TAG = "${params.Backend_Tag}" + } + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([usernamePassword(credentialsId: 'docker-login', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USER')]){ + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/bento-build.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + } + stage('deploy'){ + steps{ + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/bento-deploy.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + colorized: true) + } + } + } + } + post { + always { + + sendSlackMessage() + } + } +} \ No newline at end of file diff --git a/jenkins/jobs/redis-intergration/Jenkinsfile b/jenkins/jobs/redis-intergration/Jenkinsfile new file mode 100644 index 000000000..641197fc3 --- /dev/null +++ b/jenkins/jobs/redis-intergration/Jenkinsfile @@ -0,0 +1,85 @@ + +pipeline { + agent { + node { + // label 'docker-maven' + label 'commons-docker-ncias-p2236-v' + } + } + parameters { + extendedChoice( + name: 'Environment', + defaultValue: 'dev', + description: 'Choose the environment to build', + type: 'PT_SINGLE_SELECT', + value: 'dev,qa,stage,prod' ) + extendedChoice( + name: 'ProjectName', + defaultValue: 'icdc', + description: 'Choose aws region to build', + type: 'PT_SINGLE_SELECT', + value: 'icdc,bento' ) + string( + defaultValue: '127.0.0.1', + description: 'IP or Hostname of the redis server', + name: 'Hostname') + } + options { + ansiColor('xterm') + } + + stages{ + stage('checkout'){ + steps { + checkout( poll: false, + changelog:false, + scm: [$class: 'GitSCM', + branches: [[name: '*/master']], + doGenerateSubmoduleConfigurations: false, + extensions: [[$class: 'DisableRemotePoll'], + [$class: 'PathRestriction', excludedRegions: '*'], + [$class: 'RelativeTargetDirectory', + relativeTargetDir: 'icdc-devops']], + submoduleCfg: [], + userRemoteConfigs: + [[url: 'https://github.com/CBIIT/icdc-devops.git']]]) + + } + + } + + stage('check status'){ + steps { + wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) { + withCredentials([string(credentialsId: 'newrelic_license_key', variable: 'NEWRELIC_LIC_KEY')]) + { + sh "echo -e [${params.Environment}] >> ${WORKSPACE}/icdc-devops/ansible/hosts" + sh "echo -e ${params.Hostname} >> ${WORKSPACE}/icdc-devops/ansible/hosts" + + ansiblePlaybook( + playbook: '${WORKSPACE}/icdc-devops/ansible/redis-intergration.yml', + inventory: '${WORKSPACE}/icdc-devops/ansible/hosts', + credentialsId: 'commonsdocker', + hostKeyChecking: false, + extraVars: [ + env: "${params.Environment}", + newrelic_license_key: "${NEWRELIC_LIC_KEY}", + project: "${params.ProjectName}" + ], + colorized: true + ) + } + } + } + } + } + post { + + cleanup { + + cleanWs() + + } + + } +} diff --git a/jenkins/plugins.txt b/jenkins/plugins.txt new file mode 100644 index 000000000..fd0f90eee --- /dev/null +++ b/jenkins/plugins.txt @@ -0,0 +1,122 @@ +ace-editor +analysis-core +analysis-model-api +ansible +ansible-tower +ansicolor +ant +antisamy-markup-formatter +apache-httpcomponents-client-4-api +authentication-tokens +bouncycastle-api +branch-api +build-environment +build-timeout +build-with-parameters +cloudbees-folder +command-launcher +conditional-buildstep +configuration-as-code +configuration-as-code-groovy +copyartifact +credentials +credentials-binding +deploy +description-setter +display-url-api +docker-commons +docker-java-api +docker-plugin +docker-workflow +durable-task +email-ext +envinject +envinject-api +extended-choice-parameter +extensible-choice-parameter +filesystem-list-parameter-plugin +git +git-client +git-parameter +git-server +github +github-api +github-branch-source +github-oauth +gradle +handlebars +htmlpublisher +jackson2-api +javadoc +jdk-tool +job-dsl +jquery +jquery-detached +jquery-ui +jsch +junit +ldap +list-git-branches-parameter +lockable-resources +mailer +mapdb-api +matrix-auth +matrix-combinations-parameter +matrix-project +maven-plugin +momentjs +multiple-scms +pam-auth +Parameterized-Remote-Trigger +parameterized-scheduler +parameterized-trigger +pipeline-build-step +pipeline-github-lib +pipeline-graph-analysis +pipeline-input-step +pipeline-milestone-step +pipeline-model-api +pipeline-model-declarative-agent +pipeline-model-definition +pipeline-model-extensions +pipeline-rest-api +pipeline-stage-step +pipeline-stage-tags-metadata +pipeline-stage-view +plain-credentials +preSCMbuildstep +pretested-integration +purge-job-history +rebuild +resource-disposer +role-strategy +run-condition +scm-api +script-security +seed +slack +ssh-credentials +ssh-slaves +structs +subversion +text-finder +timestamper +token-macro +trilead-api +uno-choice +variant +warnings +warnings-ng +windows-slaves +workflow-aggregator +workflow-api +workflow-basic-steps +workflow-cps +workflow-cps-global-lib +workflow-durable-task-step +workflow-job +workflow-multibranch +workflow-scm-step +workflow-step-api +workflow-support +ws-cleanup \ No newline at end of file diff --git a/jobs/icdc/Jenkinsfile b/jobs/icdc/Jenkinsfile deleted file mode 100644 index fce181872..000000000 --- a/jobs/icdc/Jenkinsfile +++ /dev/null @@ -1,111 +0,0 @@ -//load shared library for slack notification -//@Library('shared-library')_ - -import groovy.json.JsonOutput - -def sendSlackMessage() { - jenkins_image = ":jenkins:" - beer_image = ":beer:" - long epoch = System.currentTimeMillis()/1000 - def BUILD_COLORS = ['SUCCESS': 'good', 'FAILURE': 'danger', 'UNSTABLE': 'danger', 'ABORTED': 'danger'] - - def slack = JsonOutput.toJson( - [ - icon_emoji: jenkins_image, - attachments: [[ - title: "Jenkins Job Alert - ${currentBuild.currentResult}", - text: "Job ${env.JOB_NAME} build ${env.BUILD_NUMBER} ${beer_image}\n Details at: ${env.BUILD_URL}console", - fallback: "ICDC Jenkins Build", - color: "${BUILD_COLORS[currentBuild.currentResult]}", - footer: "icdc devops", - ts: epoch, - mrkdwn_in: ["footer", "title"], - ]] - ] - ) - try { - sh "curl -X POST -H 'Content-type: application/json' --data '${slack}' '${SLACK_URL}'" - } catch (err) { - echo "${err} Slack notify failed" - } -} - -pipeline { - - agent { - node { - label 'icdc_maven' - } - } - parameters { - listGitBranches(branchFilter: '.*', credentialsId: '', defaultValue: 'master', name: 'Tag', quickFilterEnabled: false, remoteURL: 'https://github.com/CBIIT/icdc-codebase', selectedValue: 'DEFAULT', sortMode: 'NONE', tagFilter: '.*', type: 'PT_BRANCH_TAG') - } - triggers { - pollSCM('H/15 * * * 1-5') - cron('5 * * * 1-5') - } - options { - timestamps() - } - tools { - maven 'maven-3.6.1' - jdk 'jdk11' - } - stages { - stage('Checkout') { - steps { - checkout([$class: 'GitSCM', branches: [[name: "${params.Tag}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[url: 'https://github.com/CBIIT/icdc-codebase']]]) - } - } - stage('Build') { - environment { - TOMCAT01_IP = "${TOMCAT01_IP}" - TOMCAT02_IP = "${TOMCAT02_IP}" - SLACK_URL = "${SLACK_URL}" - NEO4J_IP = "${NEO4J_IP}" - //NEO4J_IP = "neo4j.essential-dev.com" - } - steps { - withCredentials([string(credentialsId: 'authorization_bearer', variable: 'BEARER'), usernamePassword(credentialsId: 'neo4j_user', passwordVariable: 'NEO4J_PASSWORD', usernameVariable: 'NEO4J_USER')]) { - sh "cd ${WORKSPACE}/src/main/resources/ && mv application_example.properties application.properties" - sh "cd ${WORKSPACE}/src/main/resources && sed -i 's/localhost/${NEO4J_IP}/g' application.properties" - sh "cd ${WORKSPACE}/src/main/resources && sed -i 's/Basic 123456/${BEARER}/' application.properties" - sh "cd ${WORKSPACE}/src/main/resources && sed -i 's/password=${NEO4J_IP}/password=${NEO4J_PASSWORD}/' application.properties" - sh "mvn package -DskipTests" - sh "mv target/ICDC-0.0.1.war target/ICDC.war" - } - } - } - stage('Deploy') { - when { - expression { - currentBuild.result == null || currentBuild.result == 'SUCCESS' - } - } - steps { - ansiColor('xterm') { - withCredentials([sshUserPrivateKey(credentialsId: 'deployer_ssh_key', keyFileVariable: 'deployer_key', passphraseVariable: '', usernameVariable: 'jenkins')]) { -sh label: '', script: ''' - -for server in $TOMCAT02_IP $TOMCAT01_IP; -do -pushd target -scp -i $deployer_key -o StrictHostKeyChecking=no ICDC.war jenkins@${server}:/local/deployments -popd -ssh -i $deployer_key -T -o StrictHostKeyChecking=no jenkins@${server} << EOF -cd /local/deployments -sudo docker cp ICDC.war k9dc:/usr/local/tomcat/webapps -EOF -done''' - } - } - } - } - } - post { - always { - - sendSlackMessage() - } - } -} \ No newline at end of file diff --git a/kubernetes/deployments/dev/bento-backend.yml b/kubernetes/deployments/dev/bento-backend.yml new file mode 100644 index 000000000..ba09a8dbe --- /dev/null +++ b/kubernetes/deployments/dev/bento-backend.yml @@ -0,0 +1,34 @@ +kind: Deployment +apiVersion: apps/v1 +metadata: + name: bento-backend +spec: + replicas: 1 + selector: + matchLabels: + app: bento + role: backend + env: dev + template: + metadata: + name: bento-backend + labels: + app: bento + role: backend + env: dev + spec: + containers: + - name: backend + image: cbiitssrepo/bento-backend:17 + resources: + limits: + memory: "500Mi" + cpu: "100m" + imagePullPolicy: Always + readinessProbe: + httpGet: + path: /ping + port: 8080 + ports: + - name: backend + containerPort: 8080 \ No newline at end of file diff --git a/kubernetes/deployments/dev/bento-frontend.yml b/kubernetes/deployments/dev/bento-frontend.yml new file mode 100644 index 000000000..dcd0fd159 --- /dev/null +++ b/kubernetes/deployments/dev/bento-frontend.yml @@ -0,0 +1,34 @@ +kind: Deployment +apiVersion: apps/v1 +metadata: + name: bento-frontend +spec: + replicas: + selector: + matchLabels: + app: bento + role: frontend + env: dev + template: + metadata: + name: bento-frontend + labels: + app: bento + role: frontend + env: dev + spec: + containers: + - name: frontend + image: cbiitssrepo/bento-frontend:17 + resources: + limits: + memory: "500Mi" + cpu: "100m" + imagePullPolicy: Always + readinessProbe: + httpGet: + path: / + port: 80 + ports: + - name: frontend + containerPort: 80 \ No newline at end of file diff --git a/kubernetes/services/dev/backend.yml b/kubernetes/services/dev/backend.yml new file mode 100644 index 000000000..ff53a1c32 --- /dev/null +++ b/kubernetes/services/dev/backend.yml @@ -0,0 +1,13 @@ +kind: Service +apiVersion: v1 +metadata: + name: bento-backend +spec: + ports: + - name: http + port: 8080 + targetPort: 8080 + protocol: TCP + selector: + role: backend + app: bento \ No newline at end of file diff --git a/kubernetes/services/dev/frontend.yml b/kubernetes/services/dev/frontend.yml new file mode 100644 index 000000000..33df1722a --- /dev/null +++ b/kubernetes/services/dev/frontend.yml @@ -0,0 +1,14 @@ +kind: Service +apiVersion: v1 +metadata: + name: bento-frontend +spec: + type: LoadBalancer + ports: + - name: http + port: 80 + targetPort: 80 + protocol: TCP + selector: + app: bento + role: frontend \ No newline at end of file diff --git a/monitoring/.gitignore b/monitoring/.gitignore new file mode 100644 index 000000000..19b412656 --- /dev/null +++ b/monitoring/.gitignore @@ -0,0 +1,3 @@ +dist/ +*.pyo +*.pyc \ No newline at end of file diff --git a/monitoring/dashboards/add_tier_dashboards.py b/monitoring/dashboards/add_tier_dashboards.py new file mode 100644 index 000000000..27ae0ef51 --- /dev/null +++ b/monitoring/dashboards/add_tier_dashboards.py @@ -0,0 +1,38 @@ +#!/usr/bin/python + +import sys, getopt +import set_overview + +def main(argv): + + global project + project = '' + global tier + tier = '' + global key + key = '' + + try: + opts, args = getopt.getopt(argv,"hp:t:v:k:",["project=","tier=","key="]) + except getopt.GetoptError: + print('add_tier_dashboards.py -p -t -k ') + sys.exit(2) + for opt, arg in opts: + if opt == '-h': + print('monitor_query.py -p -t -k ') + sys.exit() + elif opt in ("-p", "--project"): + project = arg + elif opt in ("-t", "--tier"): + tier = arg + elif opt in ("-k", "--key"): + key = arg + +if __name__ == "__main__": + main(sys.argv[1:]) + + print() + print('Adding Overview Dashboard For: {} {}'.format(project, tier)) + print() + + set_overview.setoverviewdashboard(project, tier, key) \ No newline at end of file diff --git a/monitoring/dashboards/set_overview.py b/monitoring/dashboards/set_overview.py new file mode 100644 index 000000000..42fb9a58e --- /dev/null +++ b/monitoring/dashboards/set_overview.py @@ -0,0 +1,93 @@ +#!/usr/bin/python + +import json +import requests +import re +from widgets.markdown import markdown_test +from widgets.apm_errors import apmErrorGraph +from widgets.apm_apdex import apmApdex +from widgets.db_cpu_usage import dbCPUUsage +from widgets.db_mem_usage import dbMemUsage +from widgets.db_disk_usage import dbDiskUsage + +def setoverviewdashboard(project, tier, key): + API_ENDPOINT = 'https://api.newrelic.com/graphql' + headers = { + "Api-Key": key, + "Content-Type": "application/json" + } + + def getDashboard(): + # search for existing dashboards with the same name + data = {'query':'{actor {' + 'entitySearch(query: "name = \'' + project.title() + ' Overview: ' + tier.lower() + '\' AND type IN (\'DASHBOARD\')") {' + 'results {' + 'entities {' + 'guid,' + '}' + '}' + '}' + '}}'} + + try: + response = requests.post(API_ENDPOINT, headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + return(re.search('"guid":"(.*)"', response.text)) + + # Set Dashboard Data - this is the data that will be added to the overview dashboard + # markdown widget + markdownWidget = markdown_test(project, tier) + + # APM errors + apmErrorWidget = apmErrorGraph(project, tier, key) + # APM apdex + apmApdexWidget = apmApdex(project, tier, key) + + # DB CPU Usage + dbCPUUsageWidget = dbCPUUsage(project, tier) + # DB Memeory Usage + dbMemUsageWidget = dbMemUsage(project, tier) + # DB Disk Usage + dbDiskUsageWidget = dbDiskUsage(project, tier) + + dash_data = ', dashboard: {'\ + 'name: "' + project.title() + ' Overview: ' + tier.lower() + '",'\ + 'permissions: PUBLIC_READ_ONLY,'\ + 'pages: {'\ + 'name: "page_1",'\ + 'widgets: ['\ + '' + markdownWidget + ','\ + '' + apmErrorWidget + ','\ + '' + apmApdexWidget + ','\ + '' + dbCPUUsageWidget + ','\ + '' + dbMemUsageWidget + ','\ + '' + dbDiskUsageWidget + ''\ + ']'\ + '}'\ + '}){'\ + 'errors {'\ + 'description,'\ + 'type'\ + '}'\ + '}'\ + '}' + + dash_guid = getDashboard() + + # set the query type for update or create + if dash_guid: + queryType = 'dashboardUpdate(guid: "' + dash_guid.group(1) + '"' + pageGuid = dash_guid.group(1) + else: + queryType = 'dashboardCreate(accountId: 2292606' + + data = {'query':'mutation {' + queryType + dash_data} + + try: + response = requests.post(API_ENDPOINT, headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + print('Dashboard added: {} {} Overview'.format(project, tier)) \ No newline at end of file diff --git a/monitoring/dashboards/widgets/apm_apdex.py b/monitoring/dashboards/widgets/apm_apdex.py new file mode 100644 index 000000000..fb8f87fa9 --- /dev/null +++ b/monitoring/dashboards/widgets/apm_apdex.py @@ -0,0 +1,59 @@ +import json +import requests +import re + +def apmApdex(project, tier, key): + API_ENDPOINT = 'https://api.newrelic.com/graphql' + headers = { + "Api-Key": key, + "Content-Type": "application/json" + } + + # search for APM app + if project.lower() == 'bento': + appName = project + '-aws-' + tier + '-backend' + else: + appName = project + '-cloudone-' + tier + '-backend' + + data = {'query':'{actor {' + 'entitySearch(query: "name = \'' + appName + '\' AND domain = \'APM\'") {' + 'results {' + 'entities {' + 'guid,' + '}' + '}' + '}' + '}}'} + + try: + response = requests.post(API_ENDPOINT, headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + apm_guid = re.search('"guid":"(.*)"', response.text) + if apm_guid: + #apm_guid.group(1) + + widgetContent = '{'\ + 'visualization: { id: "viz.line" },'\ + 'title: "APM Apdex Score",'\ + 'layout: {'\ + 'row: 1,'\ + 'column: 9,'\ + 'width: 4,'\ + 'height: 3'\ + '},'\ + 'rawConfiguration: {'\ + 'nrqlQueries: ['\ + '{'\ + 'accountId: 2292606,'\ + 'query: "SELECT apdex(apm.service.apdex) as \'App server\', apdex(apm.service.apdex.user) as \'End user\' FROM Metric WHERE (entity.guid = \'' + apm_guid.group(1) + '\') TIMESERIES auto"'\ + '}'\ + ']'\ + '}'\ + '}' + + return(widgetContent) + + else: + print('APM App not found: ' + appName) \ No newline at end of file diff --git a/monitoring/dashboards/widgets/apm_errors.py b/monitoring/dashboards/widgets/apm_errors.py new file mode 100644 index 000000000..ab3aecd5d --- /dev/null +++ b/monitoring/dashboards/widgets/apm_errors.py @@ -0,0 +1,63 @@ +import json +import requests +import re + +def apmErrorGraph(project, tier, key): + API_ENDPOINT = 'https://api.newrelic.com/graphql' + headers = { + "Api-Key": key, + "Content-Type": "application/json" + } + + # search for APM app + if project.lower() == 'bento': + appName = project + '-aws-' + tier + '-backend' + else: + appName = project + '-cloudone-' + tier + '-backend' + + data = {'query':'{actor {' + 'entitySearch(query: "name = \'' + appName + '\' AND domain = \'APM\'") {' + 'results {' + 'entities {' + 'guid,' + '}' + '}' + '}' + '}}'} + + try: + response = requests.post(API_ENDPOINT, headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + apm_guid = re.search('"guid":"(.*)"', response.text) + if apm_guid: + #apm_guid.group(1) + + widgetContent = '{'\ + 'visualization: { id: "viz.area" },'\ + 'title: "APM Errors",'\ + 'layout: {'\ + 'row: 1,'\ + 'column: 5,'\ + 'width: 4,'\ + 'height: 3'\ + '},'\ + 'rawConfiguration: {'\ + 'nrqlQueries: ['\ + '{'\ + 'accountId: 2292606,'\ + 'query: "SELECT count(apm.service.error.count) / count(apm.service.transaction.duration) as \'Web errors\' FROM Metric WHERE (entity.guid = \'' + apm_guid.group(1) + '\') AND (transactionType = \'Web\') SINCE 1800 seconds AGO TIMESERIES"'\ + '},'\ + '{'\ + 'accountId: 2292606,'\ + 'query: "SELECT count(apm.service.error.count) / count(apm.service.transaction.duration) as \'All errors\' FROM Metric WHERE (entity.guid = \'' + apm_guid.group(1) + '\') SINCE 1800 seconds AGO TIMESERIES"'\ + '}'\ + ']'\ + '}'\ + '}' + + return(widgetContent) + + else: + print('APM App not found: ' + appName) \ No newline at end of file diff --git a/monitoring/dashboards/widgets/db_cpu_usage.py b/monitoring/dashboards/widgets/db_cpu_usage.py new file mode 100644 index 000000000..f632e72ff --- /dev/null +++ b/monitoring/dashboards/widgets/db_cpu_usage.py @@ -0,0 +1,20 @@ +def dbCPUUsage(project, tier): + # CPU Usage Stats + widgetContent = '{'\ + 'visualization: { id: "viz.line" },'\ + 'title: "Database CPU Usage (%)",'\ + 'layout: {'\ + 'row: 7,'\ + 'column: 1,'\ + 'width: 4,'\ + 'height: 3'\ + '},'\ + 'rawConfiguration: {'\ + 'nrqlQueries: [{'\ + 'accountId: 2292606,'\ + 'query: "SELECT average(cpuSystemPercent) AS \'System\', average(cpuIOWaitPercent) AS \'I/O wait\', average(cpuUserPercent) AS \'User\', average(cpuStealPercent) AS \'Steal\' FROM SystemSample WHERE label.Name IN (\'' + project + '-' + tier + '-neo4j-4\') TIMESERIES auto"'\ + '}]'\ + '}'\ + '}' + + return(widgetContent) \ No newline at end of file diff --git a/monitoring/dashboards/widgets/db_disk_usage.py b/monitoring/dashboards/widgets/db_disk_usage.py new file mode 100644 index 000000000..82d6ebb72 --- /dev/null +++ b/monitoring/dashboards/widgets/db_disk_usage.py @@ -0,0 +1,20 @@ +def dbDiskUsage(project, tier): + # Disk Usage Stats + widgetContent = '{'\ + 'visualization: { id: "viz.area" },'\ + 'title: "Database Instance Disk Usage (%)",'\ + 'layout: {'\ + 'row: 7,'\ + 'column: 9,'\ + 'width: 4,'\ + 'height: 3'\ + '},'\ + 'rawConfiguration: {'\ + 'nrqlQueries: [{'\ + 'accountId: 2292606,'\ + 'query: "SELECT average(diskUsedPercent) as \'Storage used %\' FROM StorageSample WHERE label.Name IN (\'' + project + '-' + tier + '-neo4j-4\') TIMESERIES auto"'\ + '}]'\ + '}'\ + '}' + + return(widgetContent) \ No newline at end of file diff --git a/monitoring/dashboards/widgets/db_mem_usage.py b/monitoring/dashboards/widgets/db_mem_usage.py new file mode 100644 index 000000000..66a904091 --- /dev/null +++ b/monitoring/dashboards/widgets/db_mem_usage.py @@ -0,0 +1,20 @@ +def dbMemUsage(project, tier): + # CPU Usage Stats + widgetContent = '{'\ + 'visualization: { id: "viz.area" },'\ + 'title: "Database Memory Usage (%)",'\ + 'layout: {'\ + 'row: 7,'\ + 'column: 5,'\ + 'width: 4,'\ + 'height: 3'\ + '},'\ + 'rawConfiguration: {'\ + 'nrqlQueries: [{'\ + 'accountId: 2292606,'\ + 'query: "SELECT average(memoryUsedPercent) AS \'Memory used %\' FROM SystemSample WHERE label.Name IN (\'' + project + '-' + tier + '-neo4j-4\') TIMESERIES auto"'\ + '}]'\ + '}'\ + '}' + + return(widgetContent) \ No newline at end of file diff --git a/monitoring/dashboards/widgets/markdown.py b/monitoring/dashboards/widgets/markdown.py new file mode 100644 index 000000000..7acaca28e --- /dev/null +++ b/monitoring/dashboards/widgets/markdown.py @@ -0,0 +1,17 @@ +def markdown_test(project, tier): + # markdown test text + widgetContent = '{'\ + 'visualization: { id: "viz.markdown" },'\ + 'title: "Overview Dashboard: ' + project + ' ' + tier + '",'\ + 'layout: {'\ + 'row: 1,'\ + 'column: 1,'\ + 'width: 4,'\ + 'height: 3'\ + '},'\ + 'rawConfiguration: {'\ + 'text: "This Dashboard provides an overview of the APM, DB Instance, and connected apps for this tier."'\ + '}'\ + '}' + + return(widgetContent) \ No newline at end of file diff --git a/monitoring/monitor_query.py b/monitoring/monitor_query.py new file mode 100644 index 000000000..1721a7561 --- /dev/null +++ b/monitoring/monitor_query.py @@ -0,0 +1,53 @@ +#!/usr/bin/python + +import sys, getopt +import json +import requests +import subprocess +from monitors.synthetics import get_url_monitor +from monitors.alerts.channels import get_alert_channels +from monitors.alerts.policies import get_alert_policies +from monitors.apm import get_apm_apps +from monitors.sumologic.collectors import get_sumo_collectors + +def main(argv): + global project + project = '' + global tier + tier = '' + global key + key = '' + global auth + auth = '' + try: + opts, args = getopt.getopt(argv,"hp:t:k:a:",["project=","tier=","key=","auth="]) + except getopt.GetoptError: + print('monitor_query.py -p -t -k -a ') + sys.exit(2) + for opt, arg in opts: + if opt == '-h': + print('monitor_query.py -p -t -k ') + sys.exit() + elif opt in ("-p", "--project"): + project = arg + elif opt in ("-t", "--tier"): + tier = arg + elif opt in ("-k", "--key"): + key = arg + elif opt in ("-a", "--auth"): + auth = arg + #print('Project is ', project) + #print('Tier is ', tier) + +if __name__ == "__main__": + main(sys.argv[1:]) + + print() + print('Monitor Information for: {} {}'.format(project, tier)) + print() + + get_url_monitor.geturlmonitor(project, tier, key) + get_alert_channels.getalertchannels(project, tier, key) + get_alert_policies.getalertpolicies(project, tier, key) + get_apm_apps.getapmapps(project, tier, key) + get_sumo_collectors.getsumocollectors(project, tier, auth) \ No newline at end of file diff --git a/monitoring/monitor_update.py b/monitoring/monitor_update.py new file mode 100644 index 000000000..118ee9ee4 --- /dev/null +++ b/monitoring/monitor_update.py @@ -0,0 +1,54 @@ +#!/usr/bin/python + +import sys, getopt +from monitors.alerts.channels import set_email_channel, set_slack_channel +from monitors.alerts.policies import set_url_policy, set_apm_policy, set_db_policy, set_aws_policy, set_redis_policy, set_nginx_policy +from monitors.synthetics import set_url_monitor + +def main(argv): + + global project + project = '' + global tier + tier = '' + global key + key = '' + global auth + auth = '' + try: + opts, args = getopt.getopt(argv,"hp:t:k:a:",["project=","tier=","key=","auth="]) + except getopt.GetoptError: + print('monitor_query.py -p -t -k -a ') + sys.exit(2) + for opt, arg in opts: + if opt == '-h': + print('monitor_query.py -p -t -k ') + sys.exit() + elif opt in ("-p", "--project"): + project = arg + elif opt in ("-t", "--tier"): + tier = arg + elif opt in ("-k", "--key"): + key = arg + elif opt in ("-a", "--auth"): + auth = arg + +if __name__ == "__main__": + main(sys.argv[1:]) + + print() + print('Adding Monitor Configuration For: {} {}'.format(project, tier)) + print() + + email_id = set_email_channel.setalertemail(project, tier, key) + slack_id = set_slack_channel.setalertslack(project, tier, key) + synthetics_id = set_url_monitor.seturlmonitor(project, tier, key) + set_url_policy.seturlalertpolicy(project, tier, email_id, slack_id, synthetics_id, key) + set_apm_policy.setapmalertpolicy(project, tier, email_id, slack_id, key) + set_db_policy.setdbalertpolicy(project, tier, email_id, synthetics_id, key) + set_nginx_policy.setnginxalertpolicy(project, tier, email_id, key) + + if project.lower() == 'bento': + set_aws_policy.setawsalertpolicy(project, tier, email_id, key) + else: + set_redis_policy.setredisalertpolicy(project, tier, email_id, key) \ No newline at end of file diff --git a/monitoring/monitors/alerts/channels/get_alert_channels.py b/monitoring/monitors/alerts/channels/get_alert_channels.py new file mode 100644 index 000000000..6a0a9d774 --- /dev/null +++ b/monitoring/monitors/alerts/channels/get_alert_channels.py @@ -0,0 +1,20 @@ +#!/usr/bin/python + +import json +import requests + +def getalertchannels(project, tier, key): + API_ENDPOINT = 'https://api.newrelic.com/v2/alerts_channels.json' + + headers = {'Api-Key': key} + + try: + response = requests.get('{}'.format(API_ENDPOINT), headers=headers) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + print('Alert Channels:') + for x in response.json()['channels']: + if project.lower() in x.get("name", "none").lower() and tier.lower() in x.get("name", "none").lower(): + print(' ' + x.get("name", "none")) + print() diff --git a/monitoring/monitors/alerts/channels/set_email_channel.py b/monitoring/monitors/alerts/channels/set_email_channel.py new file mode 100644 index 000000000..b4c95d989 --- /dev/null +++ b/monitoring/monitors/alerts/channels/set_email_channel.py @@ -0,0 +1,51 @@ +#!/usr/bin/python + +import os +import json +import requests + +def setalertemail(project, tier, key): + API_ENDPOINT = 'https://api.newrelic.com/v2/alerts_channels.json' + DEVOPS_EMAIL = os.getenv('EMAIL') + + channel_name = '{}-{} Email Alerts'.format(project.title(), tier.title()) + channel_found = False + headers = {'Api-Key': key} + + try: + response = requests.get('{}'.format(API_ENDPOINT), headers=headers) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + for x in response.json()['channels']: + if channel_name in x.get("name", "none"): + channel_found = True + channel_id = x.get('id') + + if not channel_found: + headers = { + "Api-Key": key, + "Content-Type": "application/json" + } + + data = { + "channel": { + "name": channel_name, + "type": "Email", + "configuration": { + "recipients": DEVOPS_EMAIL, + "include_json_attachment": True + } + } + } + + try: + response = requests.post('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + print("Channel {} created".format(channel_name)) + channel_id = response.json()['channels'][0].get('id') + else: + print("Channel {} already exists".format(channel_name)) + + return(channel_id) \ No newline at end of file diff --git a/monitoring/monitors/alerts/channels/set_slack_channel.py b/monitoring/monitors/alerts/channels/set_slack_channel.py new file mode 100644 index 000000000..2e8f45072 --- /dev/null +++ b/monitoring/monitors/alerts/channels/set_slack_channel.py @@ -0,0 +1,52 @@ +#!/usr/bin/python + +import os +import json +import requests + +def setalertslack(project, tier, key): + API_ENDPOINT = 'https://api.newrelic.com/v2/alerts_channels.json' + DEVOPS_SLACK_URL = os.getenv('SLACK_URL') + DEVOPS_SLACK_CHANNEL = os.getenv('SLACK_CHANNEL') + + channel_name = '{}-{} Slack Alerts'.format(project.title(), tier.title()) + channel_found = False + headers = {'Api-Key': key} + + try: + response = requests.get('{}'.format(API_ENDPOINT), headers=headers) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + for x in response.json()['channels']: + if channel_name in x.get("name", "none"): + channel_found = True + channel_id = x.get('id') + + if not channel_found: + headers = { + "Api-Key": key, + "Content-Type": "application/json" + } + + data = { + "channel": { + "name": channel_name, + "type": "Slack", + "configuration": { + "url": DEVOPS_SLACK_URL, + "channel": DEVOPS_SLACK_CHANNEL + } + } + } + + try: + response = requests.post('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + print("Channel {} created".format(channel_name)) + channel_id = response.json()['channels'][0].get('id') + else: + print("Channel {} already exists".format(channel_name)) + + return(channel_id) \ No newline at end of file diff --git a/monitoring/monitors/alerts/conditions/get_alert_conditions.py b/monitoring/monitors/alerts/conditions/get_alert_conditions.py new file mode 100644 index 000000000..c47400ac3 --- /dev/null +++ b/monitoring/monitors/alerts/conditions/get_alert_conditions.py @@ -0,0 +1,21 @@ +#!/usr/bin/python + +import json +import requests + +def getalertconditions(policy, key): + API_ENDPOINT = 'https://api.newrelic.com/v2/alerts_conditions.json' + + headers = {'Api-Key': key} + data = {'policy_id': policy} + + try: + response = requests.get('{}'.format(API_ENDPOINT), headers=headers, data=data) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + if response.json()['conditions']: + print(' Alert Conditions:') + for x in response.json()['conditions']: + print(' - ' + x.get("name", "none")) + print() \ No newline at end of file diff --git a/monitoring/monitors/alerts/conditions/get_external_services.py b/monitoring/monitors/alerts/conditions/get_external_services.py new file mode 100644 index 000000000..af9f636eb --- /dev/null +++ b/monitoring/monitors/alerts/conditions/get_external_services.py @@ -0,0 +1,21 @@ +#!/usr/bin/python + +import json +import requests + +def getexternalservices(policy, key): + API_ENDPOINT = 'https://api.newrelic.com/v2/alerts_external_service_conditions.json' + + headers = {'Api-Key': key} + data = {'policy_id': policy} + + try: + response = requests.get('{}'.format(API_ENDPOINT), headers=headers, data=data) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + if response.json()['external_service_conditions']: + print(' Alert NRQL Conditions:') + for x in response.json()['external_service_conditions']: + print(' - ' + x.get("name", "none")) + print() \ No newline at end of file diff --git a/monitoring/monitors/alerts/conditions/get_nrql_conditions.py b/monitoring/monitors/alerts/conditions/get_nrql_conditions.py new file mode 100644 index 000000000..33276b3c2 --- /dev/null +++ b/monitoring/monitors/alerts/conditions/get_nrql_conditions.py @@ -0,0 +1,21 @@ +#!/usr/bin/python + +import json +import requests + +def getnrqlconditions(policy, key): + API_ENDPOINT = 'https://api.newrelic.com/v2/alerts_nrql_conditions.json' + + headers = {'Api-Key': key} + data = {'policy_id': policy} + + try: + response = requests.get('{}'.format(API_ENDPOINT), headers=headers, data=data) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + if response.json()['nrql_conditions']: + print(' Alert NRQL Conditions:') + for x in response.json()['nrql_conditions']: + print(' - ' + x.get("name", "none")) + print() \ No newline at end of file diff --git a/monitoring/monitors/alerts/conditions/get_synthetics_conditions.py b/monitoring/monitors/alerts/conditions/get_synthetics_conditions.py new file mode 100644 index 000000000..bdd01beef --- /dev/null +++ b/monitoring/monitors/alerts/conditions/get_synthetics_conditions.py @@ -0,0 +1,21 @@ +#!/usr/bin/python + +import json +import requests + +def getsyntheticsconditions(policy, key): + API_ENDPOINT = 'https://api.newrelic.com/v2/alerts_synthetics_conditions.json' + + headers = {'Api-Key': key} + data = {'policy_id': policy} + + try: + response = requests.get('{}'.format(API_ENDPOINT), headers=headers, data=data) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + if response.json()['synthetics_conditions']: + print(' Alert Synthetics Conditions:') + for x in response.json()['synthetics_conditions']: + print(' - ' + x.get("name", "none")) + print() \ No newline at end of file diff --git a/monitoring/monitors/alerts/conditions/set_apdex_condition.py b/monitoring/monitors/alerts/conditions/set_apdex_condition.py new file mode 100644 index 000000000..9661b74ab --- /dev/null +++ b/monitoring/monitors/alerts/conditions/set_apdex_condition.py @@ -0,0 +1,94 @@ +#!/usr/bin/python + +import json +import requests +from monitors.apm import get_apm_apps + +def setapdexcondition(project, tier, key, policy_id): + + # get apm app ids + API_ENDPOINT = 'https://api.newrelic.com/v2/applications.json' + + headers = {'Api-Key': key} + + try: + response = requests.get('{}'.format(API_ENDPOINT), headers=headers) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + apm_id=[] + for x in response.json()['applications']: + if project.lower() in x.get("name", "none").lower() and tier.lower() in x.get("name", "none").lower(): + apm_id.append(x.get("id", "none")) + + #set apdex for apm apps + condition_name = '{}-{} APM Apdex'.format(project.title(), tier.title()) + + headers = { + "Api-Key": key, + "Content-Type": "application/json" + } + + data = { + "condition": { + "type": "apm_app_metric", + "name": condition_name, + "enabled": True, + "entities": apm_id, + "metric": "apdex", + "condition_scope": "application", + "terms": [ + { + "duration": "5", + "operator": "below", + "priority": "critical", + "threshold": "0.7", + "time_function": "all" + }, + { + "duration": "5", + "operator": "below", + "priority": "warning", + "threshold": "0.8", + "time_function": "all" + } + ] + } + } + + API_ENDPOINT = 'https://api.newrelic.com/v2/alerts_conditions.json' + + condition_found = False + headers = {'Api-Key': key} + data = {'policy_id': policy_id} + + try: + response = requests.get('{}'.format(API_ENDPOINT), headers=headers, data=data) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + for x in response.json()['conditions']: + if condition_name in x.get("name", "none"): + condition_found = True + condition_id = x.get("id", "none") + + if not condition_found: + # create condition + API_ENDPOINT = 'https://infra-api.newrelic.com/v2/alerts_conditions/policies' + + try: + response = requests.post('{}/{}.json'.format(API_ENDPOINT, policy_id), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + print('{} Created'.format(condition_name)) + + else: + print('{} already exists - updating with the latest configuration'.format(condition_name)) + + API_ENDPOINT = 'https://api.newrelic.com/v2/alerts_conditions/{}.json'.format(condition_id) + + # update condition + try: + response = requests.put('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) \ No newline at end of file diff --git a/monitoring/monitors/alerts/conditions/set_aws_redis_command_condition.py b/monitoring/monitors/alerts/conditions/set_aws_redis_command_condition.py new file mode 100644 index 000000000..a919f445b --- /dev/null +++ b/monitoring/monitors/alerts/conditions/set_aws_redis_command_condition.py @@ -0,0 +1,74 @@ +#!/usr/bin/python + +import json +import requests + +def setawsredisconditions(key, project, tier, policy_id): + API_ENDPOINT = 'https://infra-api.newrelic.com/v2/alerts/conditions?policy_id={}'.format(policy_id) + + condition_name = '{}-{} AWS Redis Command Condition'.format(project.title(), tier.title()) + condition_found = False + headers = {'Api-Key': key} + data = {'policy_id': policy_id} + + try: + response = requests.get('{}'.format(API_ENDPOINT), headers=headers) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + for x in response.json()['data']: + if condition_name in x.get("name", "none"): + condition_found = True + condition_id = x.get("id", "none") + + host_query = "(displayName LIKE '{}-{}-redis-cluster-%')".format(project, tier) + + headers = { + "Api-Key": key, + "Content-Type": "application/json" + } + + data = { + "data":{ + "type":"infra_metric", + "name":condition_name, + "enabled":True, + "where_clause":host_query, + "policy_id":policy_id, + "integration_provider":"ElastiCacheRedisNode", + "event_type":"DatastoreSample", + "select_value":"provider.stringBasedCmds.Average", + "comparison":"above", + "critical_threshold":{ + "value":10, + "duration_minutes":5, + "time_function":"any" + }, + "warning_threshold":{ + "value":5, + "duration_minutes":10, + "time_function":"any" + } + } + } + + if not condition_found: + # create policy + API_ENDPOINT = 'https://infra-api.newrelic.com/v2/alerts/conditions' + + try: + response = requests.post('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + print('{} Created'.format(condition_name)) + + else: + print('{} already exists - updating with the latest configuration'.format(condition_name)) + + API_ENDPOINT = 'https://infra-api.newrelic.com/v2/alerts/conditions/{}'.format(condition_id) + + # update condition + try: + response = requests.put('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) \ No newline at end of file diff --git a/monitoring/monitors/alerts/conditions/set_aws_redis_conditions.py b/monitoring/monitors/alerts/conditions/set_aws_redis_conditions.py new file mode 100644 index 000000000..ac68a72d9 --- /dev/null +++ b/monitoring/monitors/alerts/conditions/set_aws_redis_conditions.py @@ -0,0 +1,78 @@ +#!/usr/bin/python + +import json +import requests + +def setawsredisconditions(key, project, tier, policy_id): + API_ENDPOINT = 'https://infra-api.newrelic.com/v2/alerts/conditions' + + headers = { + "Api-Key": key, + "Content-Type": "application/json" + } + + host_query = "(displayName LIKE '{}-{}-redis-cluster-%')".format(project, tier) + + # set redis system alerts + condition_name = '{}-{} AWS Redis Memory Condition'.format(project.title(), tier.title()) + data = { + "data":{ + "type":"infra_metric", + "name":condition_name, + "enabled":True, + "where_clause":host_query, + "policy_id":policy_id, + "integration_provider":"ElastiCacheRedisCluster", + "event_type":"DatastoreSample", + "select_value":"provider.databaseMemoryUsagePercentage.Average", + "comparison":"above", + "critical_threshold":{ + "value":1, + "duration_minutes":5, + "time_function":"any" + }, + "warning_threshold":{ + "value":0.8, + "duration_minutes":10, + "time_function":"any" + } + } + } + + try: + response = requests.post('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + print('{} Created'.format(condition_name)) + + # set redis performance alerts + condition_name = '{}-{} AWS Redis Command Condition'.format(project.title(), tier.title()) + data = { + "data":{ + "type":"infra_metric", + "name":condition_name, + "enabled":True, + "where_clause":host_query, + "policy_id":policy_id, + "integration_provider":"ElastiCacheRedisNode", + "event_type":"DatastoreSample", + "select_value":"provider.stringBasedCmds.Average", + "comparison":"above", + "critical_threshold":{ + "value":10, + "duration_minutes":5, + "time_function":"any" + }, + "warning_threshold":{ + "value":5, + "duration_minutes":10, + "time_function":"any" + } + } + } + + try: + response = requests.post('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + print('{} Created'.format(condition_name)) \ No newline at end of file diff --git a/monitoring/monitors/alerts/conditions/set_aws_redis_mem_condition.py b/monitoring/monitors/alerts/conditions/set_aws_redis_mem_condition.py new file mode 100644 index 000000000..b7b084d23 --- /dev/null +++ b/monitoring/monitors/alerts/conditions/set_aws_redis_mem_condition.py @@ -0,0 +1,74 @@ +#!/usr/bin/python + +import json +import requests + +def setawsredisconditions(key, project, tier, policy_id): + API_ENDPOINT = 'https://infra-api.newrelic.com/v2/alerts/conditions?policy_id={}'.format(policy_id) + + condition_name = '{}-{} AWS Redis Memory Condition'.format(project.title(), tier.title()) + condition_found = False + headers = {'Api-Key': key} + data = {'policy_id': policy_id} + + try: + response = requests.get('{}'.format(API_ENDPOINT), headers=headers) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + for x in response.json()['data']: + if condition_name in x.get("name", "none"): + condition_found = True + condition_id = x.get("id", "none") + + host_query = "(displayName LIKE '{}-{}-redis-cluster-%')".format(project, tier) + + headers = { + "Api-Key": key, + "Content-Type": "application/json" + } + + data = { + "data":{ + "type":"infra_metric", + "name":condition_name, + "enabled":True, + "where_clause":host_query, + "policy_id":policy_id, + "integration_provider":"ElastiCacheRedisCluster", + "event_type":"DatastoreSample", + "select_value":"provider.databaseMemoryUsagePercentage.Average", + "comparison":"above", + "critical_threshold":{ + "value":1, + "duration_minutes":5, + "time_function":"any" + }, + "warning_threshold":{ + "value":0.8, + "duration_minutes":10, + "time_function":"any" + } + } + } + + if not condition_found: + # create policy + API_ENDPOINT = 'https://infra-api.newrelic.com/v2/alerts/conditions' + + try: + response = requests.post('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + print('{} Created'.format(condition_name)) + + else: + print('{} already exists - updating with the latest configuration'.format(condition_name)) + + API_ENDPOINT = 'https://infra-api.newrelic.com/v2/alerts/conditions/{}'.format(condition_id) + + # update condition + try: + response = requests.put('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) \ No newline at end of file diff --git a/monitoring/monitors/alerts/conditions/set_cpu_condition.py b/monitoring/monitors/alerts/conditions/set_cpu_condition.py new file mode 100644 index 000000000..af99359d4 --- /dev/null +++ b/monitoring/monitors/alerts/conditions/set_cpu_condition.py @@ -0,0 +1,73 @@ +#!/usr/bin/python + +import json +import requests + +def setcpucondition(key, host, policy_id): + API_ENDPOINT = 'https://infra-api.newrelic.com/v2/alerts/conditions?policy_id={}'.format(policy_id) + + condition_name = '{} CPU Used Condition'.format(host.title()) + condition_found = False + headers = {'Api-Key': key} + data = {'policy_id': policy_id} + + try: + response = requests.get('{}'.format(API_ENDPOINT), headers=headers) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + for x in response.json()['data']: + if condition_name in x.get("name", "none"): + condition_found = True + condition_id = x.get("id", "none") + + host_query = "(displayName IN ('{}'))".format(host) + + headers = { + "Api-Key": key, + "Content-Type": "application/json" + } + + data = { + "data":{ + "type":"infra_metric", + "name":condition_name, + "enabled":True, + "where_clause":host_query, + "policy_id":policy_id, + "event_type":"SystemSample", + "select_value":"cpuPercent", + "comparison":"above", + "critical_threshold":{ + "value":80, + "duration_minutes":5, + "time_function":"all" + }, + "warning_threshold":{ + "value":80, + "duration_minutes":2, + "time_function":"all" + } + } + } + + if not condition_found: + # create policy + API_ENDPOINT = 'https://infra-api.newrelic.com/v2/alerts/conditions' + + try: + response = requests.post('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + print('{} Created'.format(condition_name)) + + else: + print('{} already exists - updating with the latest configuration'.format(condition_name)) + + API_ENDPOINT = 'https://infra-api.newrelic.com/v2/alerts/conditions/{}'.format(condition_id) + + # update condition + try: + response = requests.put('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) \ No newline at end of file diff --git a/monitoring/monitors/alerts/conditions/set_disk_space_condition.py b/monitoring/monitors/alerts/conditions/set_disk_space_condition.py new file mode 100644 index 000000000..1c028f84f --- /dev/null +++ b/monitoring/monitors/alerts/conditions/set_disk_space_condition.py @@ -0,0 +1,73 @@ +#!/usr/bin/python + +import json +import requests + +def setdiskspacecondition(key, host, policy_id): + API_ENDPOINT = 'https://infra-api.newrelic.com/v2/alerts/conditions?policy_id={}'.format(policy_id) + + condition_name = '{} Disk Space Condition'.format(host.title()) + condition_found = False + headers = {'Api-Key': key} + data = {'policy_id': policy_id} + + try: + response = requests.get('{}'.format(API_ENDPOINT), headers=headers) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + for x in response.json()['data']: + if condition_name in x.get("name", "none"): + condition_found = True + condition_id = x.get("id", "none") + + host_query = "(displayName IN ('{}'))".format(host) + + headers = { + "Api-Key": key, + "Content-Type": "application/json" + } + + data = { + "data":{ + "type":"infra_metric", + "name":condition_name, + "enabled":True, + "where_clause":host_query, + "policy_id":policy_id, + "event_type":"StorageSample", + "select_value":"diskFreePercent", + "comparison":"below", + "critical_threshold":{ + "value":10, + "duration_minutes":1, + "time_function":"any" + }, + "warning_threshold":{ + "value":30, + "duration_minutes":2, + "time_function":"any" + } + } + } + + if not condition_found: + # create policy + API_ENDPOINT = 'https://infra-api.newrelic.com/v2/alerts/conditions' + + try: + response = requests.post('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + print('{} Created'.format(condition_name)) + + else: + print('{} already exists - updating with the latest configuration'.format(condition_name)) + + API_ENDPOINT = 'https://infra-api.newrelic.com/v2/alerts/conditions/{}'.format(condition_id) + + # update condition + try: + response = requests.put('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) \ No newline at end of file diff --git a/monitoring/monitors/alerts/conditions/set_memory_condition.py b/monitoring/monitors/alerts/conditions/set_memory_condition.py new file mode 100644 index 000000000..852a6a18a --- /dev/null +++ b/monitoring/monitors/alerts/conditions/set_memory_condition.py @@ -0,0 +1,73 @@ +#!/usr/bin/python + +import json +import requests + +def setmemorycondition(key, host, policy_id): + API_ENDPOINT = 'https://infra-api.newrelic.com/v2/alerts/conditions?policy_id={}'.format(policy_id) + + condition_name = '{} Memory Used Condition'.format(host.title()) + condition_found = False + headers = {'Api-Key': key} + data = {'policy_id': policy_id} + + try: + response = requests.get('{}'.format(API_ENDPOINT), headers=headers) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + for x in response.json()['data']: + if condition_name in x.get("name", "none"): + condition_found = True + condition_id = x.get("id", "none") + + host_query = "(displayName IN ('{}'))".format(host) + + headers = { + "Api-Key": key, + "Content-Type": "application/json" + } + + data = { + "data":{ + "type":"infra_metric", + "name":condition_name, + "enabled":True, + "where_clause":host_query, + "policy_id":policy_id, + "event_type":"SystemSample", + "select_value":"memoryFreeBytes/memoryTotalBytes*100", + "comparison":"below", + "critical_threshold":{ + "value":10, + "duration_minutes":5, + "time_function":"all" + }, + "warning_threshold":{ + "value":30, + "duration_minutes":5, + "time_function":"all" + } + } + } + + if not condition_found: + # create policy + API_ENDPOINT = 'https://infra-api.newrelic.com/v2/alerts/conditions' + + try: + response = requests.post('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + print('{} Created'.format(condition_name)) + + else: + print('{} already exists - updating with the latest configuration'.format(condition_name)) + + API_ENDPOINT = 'https://infra-api.newrelic.com/v2/alerts/conditions/{}'.format(condition_id) + + # update condition + try: + response = requests.put('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) \ No newline at end of file diff --git a/monitoring/monitors/alerts/conditions/set_nginx_conditions.py b/monitoring/monitors/alerts/conditions/set_nginx_conditions.py new file mode 100644 index 000000000..12941a1ad --- /dev/null +++ b/monitoring/monitors/alerts/conditions/set_nginx_conditions.py @@ -0,0 +1,76 @@ +#!/usr/bin/python + +import json +import requests + +def setnginxconditions(key, project, tier, policy_id): + API_ENDPOINT = 'https://infra-api.newrelic.com/v2/alerts/conditions' + + headers = { + "Api-Key": key, + "Content-Type": "application/json" + } + + host_query = "(label.Project = '{}' AND label.Environment = '{}')".format(project, tier) + + # set nginx performance alerts + condition_name = '{}-{} Nginx Performance Condition'.format(project.title(), tier.title()) + data = { + "data":{ + "type":"infra_metric", + "name":condition_name, + "enabled":True, + "where_clause":host_query, + "policy_id":policy_id, + "event_type":"NginxSample", + "select_value":"net.connectionsWaiting", + "comparison":"above", + "critical_threshold":{ + "value":2, + "duration_minutes":5, + "time_function":"any" + }, + "warning_threshold":{ + "value":1, + "duration_minutes":10, + "time_function":"any" + } + } + } + + try: + response = requests.post('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + print('{} Created'.format(condition_name)) + + # set nginx error alerts + condition_name = '{}-{} Nginx Error Condition'.format(project.title(), tier.title()) + data = { + "data":{ + "type":"infra_metric", + "name":condition_name, + "enabled":True, + "where_clause":host_query, + "policy_id":policy_id, + "event_type":"NginxSample", + "select_value":"net.connectionsDroppedPerSecond", + "comparison":"above", + "critical_threshold":{ + "value":0, + "duration_minutes":5, + "time_function":"any" + }, + "warning_threshold":{ + "value":0, + "duration_minutes":10, + "time_function":"any" + } + } + } + + try: + response = requests.post('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + print('{} Created'.format(condition_name)) \ No newline at end of file diff --git a/monitoring/monitors/alerts/conditions/set_nginx_error_condition.py b/monitoring/monitors/alerts/conditions/set_nginx_error_condition.py new file mode 100644 index 000000000..2bc2f2579 --- /dev/null +++ b/monitoring/monitors/alerts/conditions/set_nginx_error_condition.py @@ -0,0 +1,73 @@ +#!/usr/bin/python + +import json +import requests + +def setnginxconditions(key, project, tier, policy_id): + API_ENDPOINT = 'https://infra-api.newrelic.com/v2/alerts/conditions?policy_id={}'.format(policy_id) + + condition_name = '{}-{} Nginx Error Condition'.format(project.title(), tier.title()) + condition_found = False + headers = {'Api-Key': key} + data = {'policy_id': policy_id} + + try: + response = requests.get('{}'.format(API_ENDPOINT), headers=headers) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + for x in response.json()['data']: + if condition_name in x.get("name", "none"): + condition_found = True + condition_id = x.get("id", "none") + + host_query = "(label.Project = '{}' AND label.Environment = '{}')".format(project, tier) + + headers = { + "Api-Key": key, + "Content-Type": "application/json" + } + + data = { + "data":{ + "type":"infra_metric", + "name":condition_name, + "enabled":True, + "where_clause":host_query, + "policy_id":policy_id, + "event_type":"NginxSample", + "select_value":"net.connectionsDroppedPerSecond", + "comparison":"above", + "critical_threshold":{ + "value":0, + "duration_minutes":5, + "time_function":"any" + }, + "warning_threshold":{ + "value":0, + "duration_minutes":10, + "time_function":"any" + } + } + } + + if not condition_found: + # create policy + API_ENDPOINT = 'https://infra-api.newrelic.com/v2/alerts/conditions' + + try: + response = requests.post('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + print('{} Created'.format(condition_name)) + + else: + print('{} already exists - updating with the latest configuration'.format(condition_name)) + + API_ENDPOINT = 'https://infra-api.newrelic.com/v2/alerts/conditions/{}'.format(condition_id) + + # update condition + try: + response = requests.put('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) \ No newline at end of file diff --git a/monitoring/monitors/alerts/conditions/set_nginx_perf_condition.py b/monitoring/monitors/alerts/conditions/set_nginx_perf_condition.py new file mode 100644 index 000000000..a6b8de4be --- /dev/null +++ b/monitoring/monitors/alerts/conditions/set_nginx_perf_condition.py @@ -0,0 +1,73 @@ +#!/usr/bin/python + +import json +import requests + +def setnginxconditions(key, project, tier, policy_id): + API_ENDPOINT = 'https://infra-api.newrelic.com/v2/alerts/conditions?policy_id={}'.format(policy_id) + + condition_name = '{}-{} Nginx Performance Condition'.format(project.title(), tier.title()) + condition_found = False + headers = {'Api-Key': key} + data = {'policy_id': policy_id} + + try: + response = requests.get('{}'.format(API_ENDPOINT), headers=headers) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + for x in response.json()['data']: + if condition_name in x.get("name", "none"): + condition_found = True + condition_id = x.get("id", "none") + + host_query = "(label.Project = '{}' AND label.Environment = '{}')".format(project, tier) + + headers = { + "Api-Key": key, + "Content-Type": "application/json" + } + + data = { + "data":{ + "type":"infra_metric", + "name":condition_name, + "enabled":True, + "where_clause":host_query, + "policy_id":policy_id, + "event_type":"NginxSample", + "select_value":"net.connectionsWaiting", + "comparison":"above", + "critical_threshold":{ + "value":2, + "duration_minutes":5, + "time_function":"any" + }, + "warning_threshold":{ + "value":1, + "duration_minutes":10, + "time_function":"any" + } + } + } + + if not condition_found: + # create policy + API_ENDPOINT = 'https://infra-api.newrelic.com/v2/alerts/conditions' + + try: + response = requests.post('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + print('{} Created'.format(condition_name)) + + else: + print('{} already exists - updating with the latest configuration'.format(condition_name)) + + API_ENDPOINT = 'https://infra-api.newrelic.com/v2/alerts/conditions/{}'.format(condition_id) + + # update condition + try: + response = requests.put('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) \ No newline at end of file diff --git a/monitoring/monitors/alerts/conditions/set_redis_cmd_condition.py b/monitoring/monitors/alerts/conditions/set_redis_cmd_condition.py new file mode 100644 index 000000000..4a94faae0 --- /dev/null +++ b/monitoring/monitors/alerts/conditions/set_redis_cmd_condition.py @@ -0,0 +1,73 @@ +#!/usr/bin/python + +import json +import requests + +def setredisconditions(key, project, tier, policy_id): + API_ENDPOINT = 'https://infra-api.newrelic.com/v2/alerts/conditions?policy_id={}'.format(policy_id) + + condition_name = '{}-{} Redis Command Condition'.format(project.title(), tier.title()) + condition_found = False + headers = {'Api-Key': key} + data = {'policy_id': policy_id} + + try: + response = requests.get('{}'.format(API_ENDPOINT), headers=headers) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + for x in response.json()['data']: + if condition_name in x.get("name", "none"): + condition_found = True + condition_id = x.get("id", "none") + + host_query = "(label.Project = '{}' AND label.Environment = '{}')".format(project, tier) + + headers = { + "Api-Key": key, + "Content-Type": "application/json" + } + + data = { + "data":{ + "type":"infra_metric", + "name":condition_name, + "enabled":True, + "where_clause":host_query, + "policy_id":policy_id, + "event_type":"RedisSample", + "select_value":"net.commandsProcessedPerSecond", + "comparison":"below", + "critical_threshold":{ + "value":0.1, + "duration_minutes":5, + "time_function":"any" + }, + "warning_threshold":{ + "value":0.2, + "duration_minutes":10, + "time_function":"any" + } + } + } + + if not condition_found: + # create policy + API_ENDPOINT = 'https://infra-api.newrelic.com/v2/alerts/conditions' + + try: + response = requests.post('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + print('{} Created'.format(condition_name)) + + else: + print('{} already exists - updating with the latest configuration'.format(condition_name)) + + API_ENDPOINT = 'https://infra-api.newrelic.com/v2/alerts/conditions/{}'.format(condition_id) + + # update condition + try: + response = requests.put('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) \ No newline at end of file diff --git a/monitoring/monitors/alerts/conditions/set_redis_conditions.py b/monitoring/monitors/alerts/conditions/set_redis_conditions.py new file mode 100644 index 000000000..d27d708da --- /dev/null +++ b/monitoring/monitors/alerts/conditions/set_redis_conditions.py @@ -0,0 +1,102 @@ +#!/usr/bin/python + +import json +import requests + +def setredisconditions(key, project, tier, policy_id): + API_ENDPOINT = 'https://infra-api.newrelic.com/v2/alerts/conditions' + + headers = { + "Api-Key": key, + "Content-Type": "application/json" + } + + host_query = "(label.Project = '{}' AND label.Environment = '{}')".format(project, tier) + + # set redis system alerts + condition_name = '{}-{} Redis Memory Condition'.format(project.title(), tier.title()) + data = { + "data":{ + "type":"infra_metric", + "name":condition_name, + "enabled":True, + "where_clause":host_query, + "policy_id":policy_id, + "event_type":"RedisSample", + "select_value":"system.totalSystemMemoryBytes", + "comparison":"below", + "critical_threshold":{ + "value":2000000000, + "duration_minutes":5, + "time_function":"any" + }, + "warning_threshold":{ + "value":4000000000, + "duration_minutes":10, + "time_function":"any" + } + } + } + + try: + response = requests.post('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + print('{} Created'.format(condition_name)) + + # set redis performance alerts + condition_name = '{}-{} Redis Command Condition'.format(project.title(), tier.title()) + data = { + "data":{ + "type":"infra_metric", + "name":condition_name, + "enabled":True, + "where_clause":host_query, + "policy_id":policy_id, + "event_type":"RedisSample", + "select_value":"net.commandsProcessedPerSecond", + "comparison":"below", + "critical_threshold":{ + "value":0.1, + "duration_minutes":5, + "time_function":"any" + }, + "warning_threshold":{ + "value":0.2, + "duration_minutes":10, + "time_function":"any" + } + } + } + + try: + response = requests.post('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + print('{} Created'.format(condition_name)) + + # set redis error alerts + condition_name = '{}-{} Redis Error Condition'.format(project.title(), tier.title()) + data = { + "data":{ + "type":"infra_metric", + "name":condition_name, + "enabled":True, + "where_clause":host_query, + "policy_id":policy_id, + "event_type":"RedisSample", + "select_value":"net.rejectedConnectionsPerSecond", + "comparison":"above", + "critical_threshold":{ + "value":0, + "duration_minutes":5, + "time_function":"any" + } + } + } + + try: + response = requests.post('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + print('{} Created'.format(condition_name)) \ No newline at end of file diff --git a/monitoring/monitors/alerts/conditions/set_redis_mem_condition.py b/monitoring/monitors/alerts/conditions/set_redis_mem_condition.py new file mode 100644 index 000000000..19634fd99 --- /dev/null +++ b/monitoring/monitors/alerts/conditions/set_redis_mem_condition.py @@ -0,0 +1,73 @@ +#!/usr/bin/python + +import json +import requests + +def setredisconditions(key, project, tier, policy_id): + API_ENDPOINT = 'https://infra-api.newrelic.com/v2/alerts/conditions?policy_id={}'.format(policy_id) + + condition_name = '{}-{} Redis Memory Condition'.format(project.title(), tier.title()) + condition_found = False + headers = {'Api-Key': key} + data = {'policy_id': policy_id} + + try: + response = requests.get('{}'.format(API_ENDPOINT), headers=headers) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + for x in response.json()['data']: + if condition_name in x.get("name", "none"): + condition_found = True + condition_id = x.get("id", "none") + + host_query = "(label.Project = '{}' AND label.Environment = '{}')".format(project, tier) + + headers = { + "Api-Key": key, + "Content-Type": "application/json" + } + + data = { + "data":{ + "type":"infra_metric", + "name":condition_name, + "enabled":True, + "where_clause":host_query, + "policy_id":policy_id, + "event_type":"RedisSample", + "select_value":"system.totalSystemMemoryBytes", + "comparison":"below", + "critical_threshold":{ + "value":2000000000, + "duration_minutes":5, + "time_function":"any" + }, + "warning_threshold":{ + "value":4000000000, + "duration_minutes":10, + "time_function":"any" + } + } + } + + if not condition_found: + # create policy + API_ENDPOINT = 'https://infra-api.newrelic.com/v2/alerts/conditions' + + try: + response = requests.post('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + print('{} Created'.format(condition_name)) + + else: + print('{} already exists - updating with the latest configuration'.format(condition_name)) + + API_ENDPOINT = 'https://infra-api.newrelic.com/v2/alerts/conditions/{}'.format(condition_id) + + # update condition + try: + response = requests.put('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) \ No newline at end of file diff --git a/monitoring/monitors/alerts/conditions/set_synthetics_condition.py b/monitoring/monitors/alerts/conditions/set_synthetics_condition.py new file mode 100644 index 000000000..77c5009af --- /dev/null +++ b/monitoring/monitors/alerts/conditions/set_synthetics_condition.py @@ -0,0 +1,56 @@ +#!/usr/bin/python + +import json +import requests + +def setsyntheticscondition(project, tier, key, synthetics_id, policy_id): + API_ENDPOINT = 'https://api.newrelic.com/v2/alerts_synthetics_conditions.json' + + condition_name = '{}-{} Url Condition'.format(project.title(), tier.title()) + condition_found = False + headers = {'Api-Key': key} + data = {'policy_id': policy_id} + + try: + response = requests.get('{}'.format(API_ENDPOINT), headers=headers, data=data) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + for x in response.json()['synthetics_conditions']: + if condition_name in x.get("name", "none"): + condition_found = True + condition_id = x.get("id", "none") + + headers = { + "Api-Key": key, + "Content-Type": "application/json" + } + + data = { + "synthetics_condition": { + "name": condition_name, + "monitor_id": synthetics_id, + "enabled": True + } + } + + if not condition_found: + # create condition + API_ENDPOINT = 'https://api.newrelic.com/v2/alerts_synthetics_conditions/policies' + + try: + response = requests.post('{}/{}.json'.format(API_ENDPOINT, policy_id), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + print('{} Created'.format(condition_name)) + + else: + print('{} already exists - updating with the latest configuration'.format(condition_name)) + + API_ENDPOINT = 'https://api.newrelic.com/v2/alerts_synthetics_conditions/{}.json'.format(condition_id) + + # update condition + try: + response = requests.put('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) \ No newline at end of file diff --git a/monitoring/monitors/alerts/policies/get_alert_policies.py b/monitoring/monitors/alerts/policies/get_alert_policies.py new file mode 100644 index 000000000..fd9211a46 --- /dev/null +++ b/monitoring/monitors/alerts/policies/get_alert_policies.py @@ -0,0 +1,25 @@ +#!/usr/bin/python + +import json +import requests +from monitors.alerts.conditions import get_alert_conditions, get_nrql_conditions, get_external_services, get_synthetics_conditions + +def getalertpolicies(project, tier, key): + API_ENDPOINT = 'https://api.newrelic.com/v2/alerts_policies.json' + + headers = {'Api-Key': key} + + try: + response = requests.get('{}'.format(API_ENDPOINT), headers=headers) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + print('Alert Policies:') + for x in response.json()['policies']: + if project.lower() in x.get("name", "none").lower() and tier.lower() in x.get("name", "none").lower(): + print(' ' + x.get("name", "none")) + get_alert_conditions.getalertconditions(x.get("id", "none"), key) + get_nrql_conditions.getnrqlconditions(x.get("id", "none"), key) + get_external_services.getexternalservices(x.get("id", "none"), key) + get_synthetics_conditions.getsyntheticsconditions(x.get("id", "none"), key) + print() \ No newline at end of file diff --git a/monitoring/monitors/alerts/policies/set_apm_policy.py b/monitoring/monitors/alerts/policies/set_apm_policy.py new file mode 100644 index 000000000..7941d7e72 --- /dev/null +++ b/monitoring/monitors/alerts/policies/set_apm_policy.py @@ -0,0 +1,69 @@ +#!/usr/bin/python + +import os +import json +import requests +from monitors.alerts.conditions import set_apdex_condition + +def setapmalertpolicy(project, tier, email_id, slack_id, key): + API_ENDPOINT = 'https://api.newrelic.com/v2/alerts_policies.json' + + policy_name = '{}-{} APM Policy'.format(project.title(), tier.title()) + policy_found = False + headers = {'Api-Key': key} + try: + response = requests.get('{}'.format(API_ENDPOINT), headers=headers) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + for x in response.json()['policies']: + if policy_name in x.get("name", "none"): + policy_found = True + policy_id = x.get("id", "none") + + headers = { + "Api-Key": key, + "Content-Type": "application/json" + } + + data = { + "policy": { + "incident_preference": "PER_POLICY", + "name": policy_name + } + } + + if not policy_found: + + # create policy + try: + response = requests.post('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + policy_id = response.json()['policy'].get("id", "none") + + # add notification channels + data = { + "policy_id": '{}'.format(policy_id), + "channel_ids": '{},{}'.format(email_id, slack_id) + } + + try: + response = requests.put('https://api.newrelic.com/v2/alerts_policy_channels.json', headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + print('{} Created'.format(policy_name)) + + else: + print('{} already exists - updating with the latest configuration'.format(policy_name)) + + API_ENDPOINT = 'https://api.newrelic.com/v2/alerts_policies/{}.json'.format(policy_id) + + # update policy + try: + response = requests.put('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + # add apdex condition + set_apdex_condition.setapdexcondition(project, tier, key, policy_id) \ No newline at end of file diff --git a/monitoring/monitors/alerts/policies/set_aws_policy.py b/monitoring/monitors/alerts/policies/set_aws_policy.py new file mode 100644 index 000000000..2ce8ece78 --- /dev/null +++ b/monitoring/monitors/alerts/policies/set_aws_policy.py @@ -0,0 +1,71 @@ +#!/usr/bin/python + +import os +import json +import requests +from monitors.alerts.conditions import set_aws_redis_mem_condition,set_aws_redis_command_condition + +def setawsalertpolicy(project, tier, email_id, key): + API_ENDPOINT = 'https://api.newrelic.com/v2/alerts_policies.json' + + policy_name = '{}-{} AWS Policy'.format(project.title(), tier.title()) + policy_found = False + headers = {'Api-Key': key} + + try: + response = requests.get('{}'.format(API_ENDPOINT), headers=headers) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + for x in response.json()['policies']: + if policy_name in x.get("name", "none"): + policy_found = True + policy_id = x.get("id", "none") + + headers = { + "Api-Key": key, + "Content-Type": "application/json" + } + + data = { + "policy": { + "incident_preference": "PER_POLICY", + "name": policy_name + } + } + + if not policy_found: + + # create policy + try: + response = requests.post('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + policy_id = response.json()['policy'].get("id", "none") + + # add notification channels + data = { + "policy_id": '{}'.format(policy_id), + "channel_ids": '{}'.format(email_id) + } + + try: + response = requests.put('https://api.newrelic.com/v2/alerts_policy_channels.json', headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + print('{} Created'.format(policy_name)) + + else: + print('{} already exists - updating with the latest configuration'.format(policy_name)) + + API_ENDPOINT = 'https://api.newrelic.com/v2/alerts_policies/{}.json'.format(policy_id) + + # update policy + try: + response = requests.put('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + # add redis conditions + set_aws_redis_mem_condition.setawsredisconditions(key, project, tier, policy_id) + set_aws_redis_command_condition.setawsredisconditions(key, project, tier, policy_id) \ No newline at end of file diff --git a/monitoring/monitors/alerts/policies/set_db_policy.py b/monitoring/monitors/alerts/policies/set_db_policy.py new file mode 100644 index 000000000..3c9e9600d --- /dev/null +++ b/monitoring/monitors/alerts/policies/set_db_policy.py @@ -0,0 +1,76 @@ +#!/usr/bin/python + +import os +import json +import requests +from monitors.alerts.conditions import set_disk_space_condition, set_memory_condition, set_cpu_condition + +def setdbalertpolicy(project, tier, email_id, synthetics_id, key): + API_ENDPOINT = 'https://api.newrelic.com/v2/alerts_policies.json' + + policy_name = '{}-{} DB Policy'.format(project.title(), tier.title()) + policy_found = False + headers = {'Api-Key': key} + + try: + response = requests.get('{}'.format(API_ENDPOINT), headers=headers) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + for x in response.json()['policies']: + if policy_name in x.get("name", "none"): + policy_found = True + policy_id = x.get("id", "none") + + headers = { + "Api-Key": key, + "Content-Type": "application/json" + } + + data = { + "policy": { + "incident_preference": "PER_POLICY", + "name": policy_name + } + } + + if not policy_found: + + # create policy + try: + response = requests.post('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + policy_id = response.json()['policy'].get("id", "none") + + # add notification channels + data = { + "policy_id": '{}'.format(policy_id), + "channel_ids": '{}'.format(email_id) + } + + try: + response = requests.put('https://api.newrelic.com/v2/alerts_policy_channels.json', headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + print('{} Created'.format(policy_name)) + + else: + print('{} already exists - updating with the latest configuration'.format(policy_name)) + + API_ENDPOINT = 'https://api.newrelic.com/v2/alerts_policies/{}.json'.format(policy_id) + + # update policy + try: + response = requests.put('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + # add disk space condition + set_disk_space_condition.setdiskspacecondition(key, '{}-aws-{}-neo4j'.format(project.lower(), tier.lower()), policy_id) + + # add memory condition + set_memory_condition.setmemorycondition(key, '{}-aws-{}-neo4j'.format(project.lower(), tier.lower()), policy_id) + + # add cpu condition + set_cpu_condition.setcpucondition(key, '{}-aws-{}-neo4j'.format(project.lower(), tier.lower()), policy_id) \ No newline at end of file diff --git a/monitoring/monitors/alerts/policies/set_nginx_policy.py b/monitoring/monitors/alerts/policies/set_nginx_policy.py new file mode 100644 index 000000000..9791eea63 --- /dev/null +++ b/monitoring/monitors/alerts/policies/set_nginx_policy.py @@ -0,0 +1,71 @@ +#!/usr/bin/python + +import os +import json +import requests +from monitors.alerts.conditions import set_nginx_perf_condition, set_nginx_error_condition + +def setnginxalertpolicy(project, tier, email_id, key): + API_ENDPOINT = 'https://api.newrelic.com/v2/alerts_policies.json' + + policy_name = '{}-{} Nginx Policy'.format(project.title(), tier.title()) + policy_found = False + headers = {'Api-Key': key} + + try: + response = requests.get('{}'.format(API_ENDPOINT), headers=headers) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + for x in response.json()['policies']: + if policy_name in x.get("name", "none"): + policy_found = True + policy_id = x.get("id", "none") + + headers = { + "Api-Key": key, + "Content-Type": "application/json" + } + + data = { + "policy": { + "incident_preference": "PER_POLICY", + "name": policy_name + } + } + + if not policy_found: + + # create policy + try: + response = requests.post('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + policy_id = response.json()['policy'].get("id", "none") + + # add notification channels + data = { + "policy_id": '{}'.format(policy_id), + "channel_ids": '{}'.format(email_id) + } + + try: + response = requests.put('https://api.newrelic.com/v2/alerts_policy_channels.json', headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + print('{} Created'.format(policy_name)) + + else: + print('{} already exists - updating with the latest configuration'.format(policy_name)) + + API_ENDPOINT = 'https://api.newrelic.com/v2/alerts_policies/{}.json'.format(policy_id) + + # update policy + try: + response = requests.put('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + # add nginx conditions + set_nginx_perf_condition.setnginxconditions(key, project, tier, policy_id) + set_nginx_error_condition.setnginxconditions(key, project, tier, policy_id) \ No newline at end of file diff --git a/monitoring/monitors/alerts/policies/set_redis_policy.py b/monitoring/monitors/alerts/policies/set_redis_policy.py new file mode 100644 index 000000000..45ef87f2c --- /dev/null +++ b/monitoring/monitors/alerts/policies/set_redis_policy.py @@ -0,0 +1,71 @@ +#!/usr/bin/python + +import os +import json +import requests +from monitors.alerts.conditions import set_redis_cmd_condition, set_redis_mem_condition + +def setredisalertpolicy(project, tier, email_id, key): + API_ENDPOINT = 'https://api.newrelic.com/v2/alerts_policies.json' + + policy_name = '{}-{} Redis Policy'.format(project.title(), tier.title()) + policy_found = False + headers = {'Api-Key': key} + + try: + response = requests.get('{}'.format(API_ENDPOINT), headers=headers) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + for x in response.json()['policies']: + if policy_name in x.get("name", "none"): + policy_found = True + policy_id = x.get("id", "none") + + headers = { + "Api-Key": key, + "Content-Type": "application/json" + } + + data = { + "policy": { + "incident_preference": "PER_POLICY", + "name": policy_name + } + } + + if not policy_found: + + # create policy + try: + response = requests.post('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + policy_id = response.json()['policy'].get("id", "none") + + # add notification channels + data = { + "policy_id": '{}'.format(policy_id), + "channel_ids": '{}'.format(email_id) + } + + try: + response = requests.put('https://api.newrelic.com/v2/alerts_policy_channels.json', headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + print('{} Created'.format(policy_name)) + + else: + print('{} already exists - updating with the latest configuration'.format(policy_name)) + + API_ENDPOINT = 'https://api.newrelic.com/v2/alerts_policies/{}.json'.format(policy_id) + + # update policy + try: + response = requests.put('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + # add redis conditions + set_redis_cmd_condition.setredisconditions(key, project, tier, policy_id) + set_redis_mem_condition.setredisconditions(key, project, tier, policy_id) \ No newline at end of file diff --git a/monitoring/monitors/alerts/policies/set_url_policy.py b/monitoring/monitors/alerts/policies/set_url_policy.py new file mode 100644 index 000000000..3523696b5 --- /dev/null +++ b/monitoring/monitors/alerts/policies/set_url_policy.py @@ -0,0 +1,70 @@ +#!/usr/bin/python + +import os +import json +import requests +from monitors.alerts.conditions import set_synthetics_condition + +def seturlalertpolicy(project, tier, email_id, slack_id, synthetics_id, key): + API_ENDPOINT = 'https://api.newrelic.com/v2/alerts_policies.json' + + policy_name = '{}-{} Url Policy'.format(project.title(), tier.title()) + policy_found = False + headers = {'Api-Key': key} + + try: + response = requests.get('{}'.format(API_ENDPOINT), headers=headers) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + for x in response.json()['policies']: + if policy_name in x.get("name", "none"): + policy_found = True + policy_id = x.get("id", "none") + + headers = { + "Api-Key": key, + "Content-Type": "application/json" + } + + data = { + "policy": { + "incident_preference": "PER_POLICY", + "name": policy_name + } + } + + if not policy_found: + + # create policy + try: + response = requests.post('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + policy_id = response.json()['policy'].get("id", "none") + + # add notification channels + data = { + "policy_id": '{}'.format(policy_id), + "channel_ids": '{},{}'.format(email_id, slack_id) + } + + try: + response = requests.put('https://api.newrelic.com/v2/alerts_policy_channels.json', headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + print('{} Created'.format(policy_name)) + + else: + print('{} already exists - updating with the latest configuration'.format(policy_name)) + + API_ENDPOINT = 'https://api.newrelic.com/v2/alerts_policies/{}.json'.format(policy_id) + + # update policy + try: + response = requests.put('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + # add synthetics condition + set_synthetics_condition.setsyntheticscondition(project, tier, key, synthetics_id, policy_id) \ No newline at end of file diff --git a/monitoring/monitors/apm/get_apm_apps.py b/monitoring/monitors/apm/get_apm_apps.py new file mode 100644 index 000000000..99cc01b19 --- /dev/null +++ b/monitoring/monitors/apm/get_apm_apps.py @@ -0,0 +1,20 @@ +#!/usr/bin/python + +import json +import requests + +def getapmapps(project, tier, key): + API_ENDPOINT = 'https://api.newrelic.com/v2/applications.json' + + headers = {'Api-Key': key} + + try: + response = requests.get('{}'.format(API_ENDPOINT), headers=headers) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + print('APM Applications:') + for x in response.json()['applications']: + if project.lower() in x.get("name", "none").lower() and tier.lower() in x.get("name", "none").lower(): + print(' ' + x.get("name", "none")) + print() \ No newline at end of file diff --git a/monitoring/monitors/sumologic/collectors/get_sumo_collectors.py b/monitoring/monitors/sumologic/collectors/get_sumo_collectors.py new file mode 100644 index 000000000..9c3d77900 --- /dev/null +++ b/monitoring/monitors/sumologic/collectors/get_sumo_collectors.py @@ -0,0 +1,21 @@ +#!/usr/bin/python + +import json +import requests + +def getsumocollectors(project, tier, auth): + API_ENDPOINT = 'https://api.fed.sumologic.com/api/v1/collectors' + + headers = {'Authorization': '{}'.format(auth)} + + try: + response = requests.get('{}'.format(API_ENDPOINT), headers=headers) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + print('Sumo Collectors: ') + print() + for x in response.json()['collectors']: + if project.lower() in x.get("name", "none").lower() and tier.lower() in x.get("name", "none").lower(): + print(' ' + x.get("name", "none")) + print() \ No newline at end of file diff --git a/monitoring/monitors/synthetics/get_url_monitor.py b/monitoring/monitors/synthetics/get_url_monitor.py new file mode 100644 index 000000000..0f02dce8d --- /dev/null +++ b/monitoring/monitors/synthetics/get_url_monitor.py @@ -0,0 +1,20 @@ +#!/usr/bin/python + +import json +import requests + +def geturlmonitor(project, tier, key): + API_ENDPOINT = 'https://synthetics.newrelic.com/synthetics/api' + + headers = {'Api-Key': key} + + try: + response = requests.get('{}/v3/monitors'.format(API_ENDPOINT), headers=headers) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + print('Synthetics Monitors:') + for x in response.json()['monitors']: + if project.lower() in x.get("name", "none").lower() and tier.lower() in x.get("name", "none").lower(): + print(' ' + x.get("name", "none")) + print() \ No newline at end of file diff --git a/monitoring/monitors/synthetics/set_url_monitor.py b/monitoring/monitors/synthetics/set_url_monitor.py new file mode 100644 index 000000000..62f24887c --- /dev/null +++ b/monitoring/monitors/synthetics/set_url_monitor.py @@ -0,0 +1,78 @@ +#!/usr/bin/python + +import os +import json +import requests +import re +from tags import set_tags_nrql + +def seturlmonitor(project, tier, key): + API_ENDPOINT = 'https://synthetics.newrelic.com/synthetics/api/v3/monitors' + DOMAIN = os.getenv('URL_DOMAIN') + + if tier.lower() == 'prod': + freq = 10 + monitor_uri = 'https://{}'.format(DOMAIN) + else: + freq = 30 + monitor_uri = 'https://{}-{}.{}'.format(project, tier, DOMAIN) + + # set monitor configuration + monitor_name = '{}-{} Url Monitor'.format(project.title(), tier.title()) + data = { + "name": monitor_name, + "type": "BROWSER", + "frequency": freq, + "uri": monitor_uri, + "locations": [ "AWS_US_EAST_1" ], + "status": "ENABLED", + "slaThreshold": 7.0, + } + + monitor_found = False + headers = {'Api-Key': key} + + try: + response = requests.get('{}'.format(API_ENDPOINT), headers=headers) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + for x in response.json()['monitors']: + if monitor_name in x.get("name", "none"): + print('{} already exists - updating with current configuration'.format(monitor_name)) + + headers = { + "Api-Key": key, + "Content-Type": "application/json" + } + + try: + requests.put('{}/{}'.format(API_ENDPOINT, x.get("id", "none")), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + else: + headers = { + "Api-Key": key, + "Content-Type": "application/json" + } + + try: + response = requests.post('{}'.format(API_ENDPOINT), headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + # get the newly created monitor + try: + response = requests.get('{}'.format(API_ENDPOINT), headers=headers) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + for x in response.json()['monitors']: + if monitor_name in x.get("name", "none"): + url_monitor = x + + # set tags on the monitor + set_tags_nrql.settagsnrql(project, tier, url_monitor.get('name'), key) + + return(url_monitor.get('id')) \ No newline at end of file diff --git a/monitoring/releases/add_apm_release.py b/monitoring/releases/add_apm_release.py new file mode 100644 index 000000000..56be62ed0 --- /dev/null +++ b/monitoring/releases/add_apm_release.py @@ -0,0 +1,42 @@ +#!/usr/bin/python + +import sys, getopt +import set_release + +def main(argv): + + global project + project = '' + global tier + tier = '' + global version + version = '' + global key + key = '' + + try: + opts, args = getopt.getopt(argv,"hp:t:v:k:",["project=","tier=","version=","key="]) + except getopt.GetoptError: + print('monitor_query.py -p -t -v -k ') + sys.exit(2) + for opt, arg in opts: + if opt == '-h': + print('monitor_query.py -p -t -v -k ') + sys.exit() + elif opt in ("-p", "--project"): + project = arg + elif opt in ("-t", "--tier"): + tier = arg + elif opt in ("-v", "--version"): + version = arg + elif opt in ("-k", "--key"): + key = arg + +if __name__ == "__main__": + main(sys.argv[1:]) + + print() + print('Adding APM Release For: {} {} {}'.format(project, tier, version)) + print() + + set_release.setapmrelease(project, tier, version, key) \ No newline at end of file diff --git a/monitoring/releases/set_release.py b/monitoring/releases/set_release.py new file mode 100644 index 000000000..ba2b304e5 --- /dev/null +++ b/monitoring/releases/set_release.py @@ -0,0 +1,46 @@ +#!/usr/bin/python + +import json +import requests +from datetime import datetime + +def setapmrelease(project, tier, version, key): + + # get apm app ids + API_ENDPOINT = 'https://api.newrelic.com/v2/applications.json' + + headers = {'Api-Key': key} + + try: + response = requests.get('{}'.format(API_ENDPOINT), headers=headers) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + for x in response.json()['applications']: + if project.lower() in x.get("name", "none").lower() and tier.lower() in x.get("name", "none").lower(): + apm_id = x.get("id", "none") + + #set release for apm apps + API_ENDPOINT = 'https://api.newrelic.com/v2/applications/{}/deployments.json'.format(apm_id) + + revision_name = '{}'.format(version) + revision_description = '{} {} updated to v{}'.format(project, tier, version) + #revision_time = datetime.utcnow() + headers = { + "Api-Key": key, + "Content-Type": "application/json" + } + + data = { + "deployment": { + "revision": revision_name, + "description": revision_description, + #"timestamp": revision_time.isoformat(), + } + } + + try: + response = requests.post(API_ENDPOINT, headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + print('{} Created'.format(revision_name)) \ No newline at end of file diff --git a/monitoring/requirements.txt b/monitoring/requirements.txt new file mode 100644 index 000000000..663bd1f6a --- /dev/null +++ b/monitoring/requirements.txt @@ -0,0 +1 @@ +requests \ No newline at end of file diff --git a/monitoring/tags/set_tags_nrql.py b/monitoring/tags/set_tags_nrql.py new file mode 100644 index 000000000..6f76863bc --- /dev/null +++ b/monitoring/tags/set_tags_nrql.py @@ -0,0 +1,37 @@ +#!/usr/bin/python + +import json +import requests +import re + +def settagsnrql(project, tier, entity, key): + API_ENDPOINT = 'https://api.newrelic.com/graphql' + headers = { + "Api-Key": key, + "Content-Type": "application/json" + } + + # set tags + data = {"query":"{\n actor {\n entitySearch(query: \"name = \'" + entity + "\'\") {\n query\n results {\n entities {\n guid\n }\n }\n }\n }\n}\n", "variables":""} + + try: + response = requests.post(API_ENDPOINT, headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + guid = re.findall(r'^.*?\bguid\b\":\"([^$]*?)\"',response.text)[0] + + tagdefs = { + 'key: "Environment", values: "{}"'.format(tier), + 'key: "Project", values: "{}"'.format(project) + } + + for tag in tagdefs: + data = {"query":"mutation {\n taggingAddTagsToEntity(guid: \"" + guid + "\", tags: { " + tag + " }) {\n errors {\n message\n }\n }\n}\n", "variables":""} + + try: + response = requests.post(API_ENDPOINT, headers=headers, data=json.dumps(data), allow_redirects=False) + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + print('Added tags to {}'.format(entity)) \ No newline at end of file diff --git a/scripts/python/modify-alb/main.py b/scripts/python/modify-alb/main.py new file mode 100644 index 000000000..c085066c8 --- /dev/null +++ b/scripts/python/modify-alb/main.py @@ -0,0 +1,16 @@ +# This is a sample Python script. + +# Press ⌃R to execute it or replace it with your code. +# Press Double ⇧ to search everywhere for classes, files, tool windows, actions, and settings. +import boto3 +from botocore.config import Config + +config = Config( + region_name="us-east-1" +) +session = boto3.Session(profile_name="icdc") +client = session.client('elbv2', config=config) + +response = client.describe_load_balancers() +albs = [alb["LoadBalancerName"] for alb in response["LoadBalancers"]] +print(albs) diff --git a/terraform/deployed/Rserver/alb.tf b/terraform/deployed/Rserver/alb.tf new file mode 100644 index 000000000..b024ab09e --- /dev/null +++ b/terraform/deployed/Rserver/alb.tf @@ -0,0 +1,11 @@ + +module "alb" { + source = "../../modules/networks/alb" + stack_name = var.stack_name + vpc_id = data.terraform_remote_state.network.outputs.vpc_id + certificate_arn = data.aws_acm_certificate.certificate.arn + subnets =data.terraform_remote_state.network.outputs.public_subnets_ids + tags = var.tags + env = var.env + region = var.region +} diff --git a/terraform/deployed/Rserver/asg.tf b/terraform/deployed/Rserver/asg.tf new file mode 100644 index 000000000..24c355683 --- /dev/null +++ b/terraform/deployed/Rserver/asg.tf @@ -0,0 +1,367 @@ +resource "aws_launch_configuration" "asg_launch_config" { + name = "${var.stack_name}-${var.env}-launch-configuration" + image_id = data.aws_ami.centos.id + instance_type = var.fronted_instance_type + iam_instance_profile = aws_iam_instance_profile.ecs-instance-profile.id + security_groups = [aws_security_group.frontend_sg.id] + associate_public_ip_address = var.associate_public_ip_address + key_name = var.ssh_key_name + user_data = data.template_cloudinit_config.user_data.rendered + root_block_device { + volume_type = var.evs_volume_type + volume_size = var.instance_volume_size + delete_on_termination = true + } + + lifecycle { + create_before_destroy = true + } + +} + +resource "aws_autoscaling_group" "asg_frontend" { + name = join("-",[var.stack_name,var.env,var.frontend_asg_name]) + max_size = var.max_size + min_size = var.min_size + desired_capacity = var.desired_ec2_instance_capacity + vpc_zone_identifier = data.terraform_remote_state.network.outputs.private_subnets_ids + launch_configuration = aws_launch_configuration.asg_launch_config.name + target_group_arns = [aws_lb_target_group.frontend_target_group.arn,aws_lb_target_group.db_target_group.arn] + health_check_type = var.health_check_type + tag { + key = "Name" + propagate_at_launch = true + value = "${var.stack_name}-${var.env}-${var.frontend_asg_name}" + } + dynamic "tag" { + for_each = var.tags + content { + key = tag.key + value = tag.value + propagate_at_launch = true + } + } +} + +resource "aws_autoscaling_schedule" "shutdown" { + autoscaling_group_name = aws_autoscaling_group.asg_frontend.name + scheduled_action_name = "bento-auto-stop" + recurrence = var.shutdown_schedule + desired_capacity = 0 +} + +resource "aws_autoscaling_schedule" "startup" { + autoscaling_group_name = aws_autoscaling_group.asg_frontend.name + scheduled_action_name = "bento-auto-start" + recurrence = var.startup_schedule + desired_capacity = var.desired_ec2_instance_capacity + min_size = var.min_size + max_size = var.max_size +} + +resource "aws_security_group" "frontend_sg" { + name = "${var.stack_name}-${var.env}-frontend-sg" + vpc_id = data.terraform_remote_state.network.outputs.vpc_id + tags = merge( + { + "Name" = format("%s-%s-frontend-sg",var.stack_name,var.env), + }, + var.tags, + ) +} + +resource "aws_security_group_rule" "inbound_bastion_frontend" { + from_port = local.bastion_port + protocol = local.tcp_protocol + to_port = local.bastion_port + security_group_id = aws_security_group.frontend_sg.id + source_security_group_id = data.terraform_remote_state.bastion.outputs.bastion_security_group_id + type = "ingress" +} + +resource "aws_security_group_rule" "inbound_frontend_alb" { + from_port = var.frontend_container_port + protocol = local.tcp_protocol + to_port = var.frontend_container_port + security_group_id = aws_security_group.frontend_sg.id + source_security_group_id = module.alb.alb_security_group_id + type = "ingress" +} + +resource "aws_security_group_rule" "inbound_db_alb" { + from_port = var.db_container_port + protocol = local.tcp_protocol + to_port = var.db_container_port + security_group_id = aws_security_group.frontend_sg.id + source_security_group_id = module.alb.alb_security_group_id + type = "ingress" +} + +resource "aws_security_group_rule" "all_outbound_frontend" { + from_port = local.any_port + protocol = local.any_protocol + to_port = local.any_port + cidr_blocks = local.all_ips + + security_group_id = aws_security_group.frontend_sg.id + type = "egress" +} + +#create alb target group +resource "aws_lb_target_group" "frontend_target_group" { + name = "${var.stack_name}-${var.env}-frontend" + port = var.frontend_container_port + protocol = "HTTP" + vpc_id = data.terraform_remote_state.network.outputs.vpc_id + stickiness { + type = "lb_cookie" + cookie_duration = 1800 + enabled = true + } + health_check { + path = "/" + protocol = "HTTP" + matcher = "200" + interval = 15 + timeout = 3 + healthy_threshold = 2 + unhealthy_threshold = 2 + } + tags = merge( + { + "Name" = format("%s-%s",var.stack_name,"frontend-alb-target-group") + }, + var.tags, + ) +} + +#create alb target group +resource "aws_lb_target_group" "db_target_group" { + name = "${var.stack_name}-${var.env}-db" + port = var.db_container_port + protocol = "HTTP" + vpc_id = data.terraform_remote_state.network.outputs.vpc_id + stickiness { + type = "lb_cookie" + cookie_duration = 1800 + enabled = true + } + health_check { + path = "/" + protocol = "HTTP" + matcher = "200" + interval = 15 + timeout = 3 + healthy_threshold = 2 + unhealthy_threshold = 2 + } + tags = merge( + { + "Name" = format("%s-%s",var.stack_name,"db-alb-target") + }, + var.tags, + ) +} + +resource "aws_lb_listener_rule" "frontend_alb_listener_prod" { + count = var.env == "prod" ? 1:0 + listener_arn = module.alb.alb_https_listener_arn + priority = var.fronted_rule_priority + action { + type = "forward" + target_group_arn = aws_lb_target_group.frontend_target_group.arn + } + + condition { + host_header { + values = ["${var.stack_name}.${var.domain_name}"] + } + } + condition { + path_pattern { + values = ["/*"] + } + } +} + + +resource "aws_lb_listener_rule" "bento_prod" { + count = var.env == "prod" && var.stack_name == "bento" ? 1:0 + listener_arn = module.alb.alb_https_listener_arn + action { + type = "forward" + target_group_arn = aws_lb_target_group.frontend_target_group.arn + } + + condition { + host_header { + values = [var.domain_name] + } + } + condition { + path_pattern { + values = ["/*"] + } + } +} + + +resource "aws_lb_listener_rule" "bento_www" { + count = var.env == "prod" && var.stack_name == "bento" ? 1:0 + listener_arn = module.alb.alb_https_listener_arn + action { + type = "forward" + target_group_arn = aws_lb_target_group.frontend_target_group.arn + } + + condition { + host_header { + values = ["www.${var.domain_name}"] + } + } + condition { + path_pattern { + values = ["/*"] + } + } +} + + +variable "db_rule_priority" { + default = "" +} +resource "aws_lb_listener_rule" "db_alb_listener_prod" { + count = var.env == "prod" ? 1:0 + listener_arn = module.alb.alb_https_listener_arn + priority = var.db_rule_priority + action { + type = "forward" + target_group_arn = aws_lb_target_group.db_target_group.arn + } + + condition { + host_header { + values = ["${var.stack_name}.${var.domain_name}"] + } + } + condition { + path_pattern { + values = ["/service*"] + } + } +} + +resource "aws_lb_listener_rule" "frontend_alb_listener" { + count = var.env != "prod" ? 1:0 + listener_arn = module.alb.alb_https_listener_arn + priority = var.fronted_rule_priority + action { + type = "forward" + target_group_arn = aws_lb_target_group.frontend_target_group.arn + } + + condition { + host_header { + values = ["${lower(var.stack_name)}-${var.env}.${var.domain_name}"] + } + } + condition { + path_pattern { + values = ["/*"] + } + } + +} + +resource "aws_lb_listener_rule" "backend_alb_listener" { + count = var.env != "prod" ? 1:0 + listener_arn = module.alb.alb_https_listener_arn + priority = var.db_rule_priority + action { + type = "forward" + target_group_arn = aws_lb_target_group.db_target_group.arn + } + + condition { + host_header { + values = ["${lower(var.stack_name)}-${var.env}.${var.domain_name}"] + } + + } + condition { + path_pattern { + values = ["/service*"] + } + } +} + + +#create boostrap script to hook up the node to ecs cluster +resource "aws_ssm_document" "ssm_doc_boostrap" { + name = "${var.stack_name}-${var.env}-bootstrap-ecs-node" + document_type = "Command" + document_format = "YAML" + content = < 30) + transition { + storage_class = "STANDARD_IA" + days = var.s3_object_standard_ia_transition_days + } + noncurrent_version_transition { + days = var.s3_object_nonactive_expiration_days - 30 > 30 ? 30 : var.s3_object_nonactive_expiration_days + 30 + storage_class = "STANDARD_IA" + } + } + lifecycle_rule { + id = "expire_objects" + enabled = true + expiration { + days = var.s3_object_expiration_days + } + noncurrent_version_expiration { + days = var.s3_object_nonactive_expiration_days + } + } + tags = var.tags +} diff --git a/terraform/deployed/bento/albs3logging/outputs.tf b/terraform/deployed/bento/albs3logging/outputs.tf new file mode 100644 index 000000000..e69de29bb diff --git a/terraform/deployed/bento/albs3logging/provider.tf b/terraform/deployed/bento/albs3logging/provider.tf new file mode 100644 index 000000000..f1cf3216c --- /dev/null +++ b/terraform/deployed/bento/albs3logging/provider.tf @@ -0,0 +1,12 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "3.67.0" + } + } +} +provider "aws" { + profile = var.profile + region = var.region +} \ No newline at end of file diff --git a/terraform/deployed/bento/albs3logging/variables.tf b/terraform/deployed/bento/albs3logging/variables.tf new file mode 100644 index 000000000..ab1af0f41 --- /dev/null +++ b/terraform/deployed/bento/albs3logging/variables.tf @@ -0,0 +1,53 @@ +variable "tags" { + description = "tags to associate with this instance" + type = map(string) + default = { + ManagedBy = "terraform" + Project = "bento" + Environment = "prod" + POC = "Amit" + } +} +variable "stack_name" { + description = "name of the project" + type = string + default = "bento" +} +variable "region" { + description = "aws region to deploy" + type = string + default = "us-east-1" +} +variable "profile" { + description = "iam user profile to use" + type = string + default = "icdc" +} +variable "s3_object_expiration_days" { + description = "number of days for object to live" + type = number + default = 720 +} +variable "s3_object_nonactive_expiration_days" { + description = "number of days to retain non active objects" + type = number + default = 90 +} +variable "s3_object_standard_ia_transition_days" { + description = "number of days for an object to transition to standard_ia storage class" + default = 120 + type = number +} +variable "s3_object_glacier_transition_days" { + description = "number of days for an object to transition to glacier storage class" + default = 180 + type = number +} + +variable "aws_account_id" { + type = map(string) + description = "aws account to allow for alb s3 logging" + default = { + us-east-1 = "127311923021" + } +} \ No newline at end of file diff --git a/terraform/deployed/bento/app/alb.tf b/terraform/deployed/bento/app/alb.tf new file mode 100644 index 000000000..333dbb2f0 --- /dev/null +++ b/terraform/deployed/bento/app/alb.tf @@ -0,0 +1,11 @@ + +module "alb" { + source = "../../../modules/networks/alb" + stack_name = var.stack_name + vpc_id = data.terraform_remote_state.network.outputs.vpc_id + certificate_arn = data.aws_acm_certificate.certificate.arn + subnets =data.terraform_remote_state.network.outputs.public_subnets_ids + alb_s3_bucket_name = "${var.stack_name}-alb-${terraform.workspace}-access-logs" + tags = var.tags + env = var.env +} diff --git a/terraform/deployed/bento/app/asg.tf b/terraform/deployed/bento/app/asg.tf new file mode 100644 index 000000000..0cd1d236c --- /dev/null +++ b/terraform/deployed/bento/app/asg.tf @@ -0,0 +1,481 @@ + +resource "aws_launch_configuration" "asg_launch_config" { + name = "${var.stack_name}-${var.env}-launch-configuration" + image_id = data.aws_ami.centos.id + instance_type = var.fronted_instance_type + iam_instance_profile = aws_iam_instance_profile.ecs-instance-profile.id + security_groups = [aws_security_group.frontend_sg.id] + associate_public_ip_address = var.associate_public_ip_address + key_name = var.ssh_key_name + user_data = data.template_cloudinit_config.user_data.rendered + root_block_device { + volume_type = var.evs_volume_type + volume_size = var.instance_volume_size + delete_on_termination = true + } + + lifecycle { + create_before_destroy = true + } + +} + +resource "aws_autoscaling_group" "asg_frontend" { + name = join("-",[var.stack_name,var.env,var.frontend_asg_name]) + max_size = var.max_size + min_size = var.min_size + desired_capacity = var.desired_ec2_instance_capacity + vpc_zone_identifier = data.terraform_remote_state.network.outputs.private_subnets_ids + launch_configuration = aws_launch_configuration.asg_launch_config.name + target_group_arns = [aws_lb_target_group.frontend_target_group.arn,aws_lb_target_group.backend_target_group.arn] + health_check_type = var.health_check_type + tag { + key = "Name" + propagate_at_launch = true + value = "${var.stack_name}-${var.env}-${var.frontend_asg_name}" + } + dynamic "tag" { + for_each = var.tags + content { + key = tag.key + value = tag.value + propagate_at_launch = true + } + } +} + +resource "aws_autoscaling_schedule" "shutdown" { + autoscaling_group_name = aws_autoscaling_group.asg_frontend.name + scheduled_action_name = "bento-auto-stop" + recurrence = var.shutdown_schedule + desired_capacity = 0 +} + +resource "aws_autoscaling_schedule" "startup" { + autoscaling_group_name = aws_autoscaling_group.asg_frontend.name + scheduled_action_name = "bento-auto-start" + recurrence = var.startup_schedule + desired_capacity = var.desired_ec2_instance_capacity + min_size = var.min_size + max_size = var.max_size +} + +resource "aws_security_group" "frontend_sg" { + name = "${var.stack_name}-${var.env}-frontend-sg" + vpc_id = data.terraform_remote_state.network.outputs.vpc_id + tags = merge( + { + "Name" = format("%s-%s-frontend-sg",var.stack_name,var.env), + }, + var.tags, + ) +} + +resource "aws_security_group_rule" "inbound_bastion_frontend" { + from_port = local.bastion_port + protocol = local.tcp_protocol + to_port = local.bastion_port + security_group_id = aws_security_group.frontend_sg.id + source_security_group_id = data.terraform_remote_state.bastion.outputs.bastion_security_group_id + type = "ingress" +} + +resource "aws_security_group_rule" "inbound_frontend_alb" { + from_port = var.frontend_container_port + protocol = local.tcp_protocol + to_port = var.frontend_container_port + security_group_id = aws_security_group.frontend_sg.id + source_security_group_id = module.alb.alb_security_group_id + type = "ingress" +} + +resource "aws_security_group_rule" "inbound_backend_alb" { + from_port = var.backend_container_port + protocol = local.tcp_protocol + to_port = var.backend_container_port + security_group_id = aws_security_group.frontend_sg.id + source_security_group_id = module.alb.alb_security_group_id + type = "ingress" +} + +resource "aws_security_group_rule" "all_outbound_frontend" { + from_port = local.any_port + protocol = local.any_protocol + to_port = local.any_port + cidr_blocks = local.all_ips + + security_group_id = aws_security_group.frontend_sg.id + type = "egress" +} + +#create alb target group +resource "aws_lb_target_group" "frontend_target_group" { + name = "${var.stack_name}-${var.env}-frontend" + port = var.frontend_container_port + protocol = "HTTP" + vpc_id = data.terraform_remote_state.network.outputs.vpc_id + stickiness { + type = "lb_cookie" + cookie_duration = 1800 + enabled = true + } + health_check { + path = "/" + protocol = "HTTP" + matcher = "200" + interval = 15 + timeout = 3 + healthy_threshold = 2 + unhealthy_threshold = 2 + } + tags = merge( + { + "Name" = format("%s-%s",var.stack_name,"frontend-alb-target-group") + }, + var.tags, + ) +} + +#create alb target group +resource "aws_lb_target_group" "backend_target_group" { + name = "${var.stack_name}-${var.env}-backend" + port = var.backend_container_port + protocol = "HTTP" + vpc_id = data.terraform_remote_state.network.outputs.vpc_id + stickiness { + type = "lb_cookie" + cookie_duration = 1800 + enabled = true + } + health_check { + path = "/ping" + protocol = "HTTP" + matcher = "200" + interval = 15 + timeout = 3 + healthy_threshold = 2 + unhealthy_threshold = 2 + } + tags = merge( + { + "Name" = format("%s-%s",var.stack_name,"backend-alb-target") + }, + var.tags, + ) +} + + +resource "aws_lb_listener_rule" "frontend_alb_listener_prod" { + count = var.env == "prod" ? 1:0 + listener_arn = module.alb.alb_https_listener_arn + priority = var.fronted_rule_priority + action { + type = "forward" + target_group_arn = aws_lb_target_group.frontend_target_group.arn + } + + condition { + host_header { + values = ["${var.stack_name}.${var.domain_name}"] + } + } + condition { + path_pattern { + values = ["/*"] + } + } +} + + +resource "aws_lb_listener_rule" "bento_prod" { + count = var.env == "prod" && var.stack_name == "bento" ? 1:0 + listener_arn = module.alb.alb_https_listener_arn + action { + type = "forward" + target_group_arn = aws_lb_target_group.frontend_target_group.arn + } + + condition { + host_header { + values = [var.domain_name] + } + } + condition { + path_pattern { + values = ["/*"] + } + } +} + + +resource "aws_lb_listener_rule" "bento_www" { + count = var.env == "prod" && var.stack_name == "bento" ? 1:0 + listener_arn = module.alb.alb_https_listener_arn + action { + type = "forward" + target_group_arn = aws_lb_target_group.frontend_target_group.arn + } + + condition { + host_header { + values = ["www.${var.domain_name}"] + } + } + condition { + path_pattern { + values = ["/*"] + } + } +} + + +resource "aws_lb_listener_rule" "backend_alb_listener_prod" { + count = var.env == "prod" ? 1:0 + listener_arn = module.alb.alb_https_listener_arn + priority = var.backend_rule_priority + action { + type = "forward" + target_group_arn = aws_lb_target_group.backend_target_group.arn + } + + condition { + host_header { + values = ["${var.stack_name}.${var.domain_name}"] + } + } + condition { + path_pattern { + values = ["/v1/graphql/*"] + } + } +} + + +//resource "aws_lb_listener_rule" "backend_alb_listener_prod_others" { +// count = var.env == "prod" ? 1:0 +// listener_arn = module.alb.alb_https_listener_arn +// priority = var.backend_rule_priority +// action { +// type = "forward" +// target_group_arn = aws_lb_target_group.backend_target_group.arn +// } +// +// condition { +// host_header { +// values = ["${lower(var.stack_name)}.${var.domain_name}"] +// } +// } +// condition { +// path_pattern { +// values = ["/v1/graphql/*"] +// } +// } +//} + + +resource "aws_lb_listener_rule" "frontend_alb_listener" { + count = var.env != "prod" ? 1:0 + listener_arn = module.alb.alb_https_listener_arn + priority = var.fronted_rule_priority + action { + type = "forward" + target_group_arn = aws_lb_target_group.frontend_target_group.arn + } + + condition { + host_header { + values = ["${lower(var.stack_name)}-${var.env}.${var.domain_name}"] + } + } + condition { + path_pattern { + values = ["/*"] + } + } + +} + +//resource "aws_lb_listener_rule" "frontend_alb_listener_others" { +// count = var.stack_name != "bento" && var.env != "prod" ? 1:0 +// listener_arn = module.alb.alb_https_listener_arn +// priority = var.fronted_rule_priority +// action { +// type = "forward" +// target_group_arn = aws_lb_target_group.frontend_target_group.arn +// } +// +// condition { +// host_header { +// values = ["${lower(var.stack_name)}-${var.env}.${var.domain_name}"] +// } +// } +// condition { +// path_pattern { +// values = ["/*"] +// } +// } +// +//} + + + +resource "aws_lb_listener_rule" "backend_alb_listener" { + count = var.env != "prod" ? 1:0 + listener_arn = module.alb.alb_https_listener_arn + priority = var.backend_rule_priority + action { + type = "forward" + target_group_arn = aws_lb_target_group.backend_target_group.arn + } + + condition { + host_header { + values = ["${lower(var.stack_name)}-${var.env}.${var.domain_name}"] + } + + } + condition { + path_pattern { + values = ["/v1/graphql/*"] + } + } +} + +//resource "aws_lb_listener_rule" "backend_alb_listener_others" { +// count = var.stack_name != "bento" && var.env != "prod" ? 1:0 +// listener_arn = module.alb.alb_https_listener_arn +// priority = var.backend_rule_priority +// action { +// type = "forward" +// target_group_arn = aws_lb_target_group.backend_target_group.arn +// } +// +// condition { +// host_header { +// values = ["${lower(var.stack_name)}-${var.env}.${var.domain_name}"] +// } +// +// } +// condition { +// path_pattern { +// values = ["/v1/graphql/*"] +// } +// } +//} + +//resource "aws_lb_listener_rule" "www" { +// count = var.env == "prod" ? 1:0 +// listener_arn = module.alb.alb_https_listener_arn +// priority = "120" +// action { +// type = "forward" +// target_group_arn = aws_lb_target_group.frontend_target_group.arn +// } +// +// condition { +// host_header { +// values = [join(".",["www",var.domain_name])] +// } +// } +// condition { +// path_pattern { +// values = ["/*"] +// } +// } +//} + +#create boostrap script to hook up the node to ecs cluster +resource "aws_ssm_document" "ssm_doc_boostrap" { + name = "${var.stack_name}-${var.env}-bootstrap-ecs-node" + document_type = "Command" + document_format = "YAML" + content = <= 2: + black_listed_ips.append(ip) + all_blocked_ips = remove_item(all_blocked_ips, ip) + if len(all_blocked_ips) >= 1: + with open(file_name, 'w') as f: + for line in all_blocked_ips: + f.write(line + '\n') + return black_listed_ips + +#upload file to s3 +def upload_blocked_ips(bucket_name, key, file_name): + s3.meta.client.upload_file(file_name, bucket_name, key) + if os.path.exists(file_name): + os.remove(file_name) + +#main lambda entry +def handler(event, context): + waf_blocked_ips = get_all_blocked_ips(s3_bucket_name, block_ip_file_name, key_prefix, local_blocked_ip_file_name) + upload_blocked_ips(s3_bucket_name, block_ip_file_name, local_blocked_ip_file_name) + update_blocked_ip_list(name_of_blocked_ip_list, waf_blocked_ips) + if waf_blocked_ips: + send_blocked_ips_report(waf_blocked_ips) + diff --git a/terraform/deployed/bento/cloudfront/cloudwatch.tf b/terraform/deployed/bento/cloudfront/cloudwatch.tf new file mode 100644 index 000000000..bd6e784fb --- /dev/null +++ b/terraform/deployed/bento/cloudfront/cloudwatch.tf @@ -0,0 +1,62 @@ +resource "aws_cloudwatch_metric_alarm" "cloudfront_alarm" { + for_each = var.alarms + alarm_name = "${var.stack_name}-${terraform.workspace}-${each.key}-cloudfront-alarm" + comparison_operator = "GreaterThanOrEqualToThreshold" + evaluation_periods = "5" + metric_name = each.value["name"] + namespace = "AWS/CloudFront" + period = "60" + statistic = "Sum" + threshold = each.value["threshold"] + alarm_description = "CloudFront alarm for ${each.value["name"]}" + insufficient_data_actions = [] + dimensions = { + DistributionId = aws_cloudfront_distribution.bento_distribution.id + Region = "Global" + } + alarm_actions = [aws_sns_topic.cloudfront_alarm_topic.arn] + ok_actions = [aws_sns_topic.cloudfront_alarm_topic.arn] +} + + +resource "aws_sns_topic" "cloudfront_alarm_topic" { + name = "${var.stack_name}-${terraform.workspace}-cloudfront-4xx-5xx-errors" + delivery_policy = < 30) + transition { + storage_class = "STANDARD_IA" + days = var.s3_object_standard_ia_transition_days + } + noncurrent_version_transition { + days = var.s3_object_nonactive_expiration_days - 30 > 30 ? 30 : var.s3_object_nonactive_expiration_days + 30 + storage_class = "STANDARD_IA" + } + } + lifecycle_rule { + id = "expire_objects" + enabled = true + expiration { + days = var.s3_object_expiration_days + } + noncurrent_version_expiration { + days = var.s3_object_nonactive_expiration_days + } + } +} diff --git a/terraform/deployed/bento/ecs/backend.tf b/terraform/deployed/bento/ecs/backend.tf new file mode 100644 index 000000000..6e09dbbf1 --- /dev/null +++ b/terraform/deployed/bento/ecs/backend.tf @@ -0,0 +1,5 @@ +#set the backend for state file +terraform { + backend "s3" { + } +} diff --git a/terraform/deployed/bento/ecs/data.tf b/terraform/deployed/bento/ecs/data.tf new file mode 100644 index 000000000..3f98a01a6 --- /dev/null +++ b/terraform/deployed/bento/ecs/data.tf @@ -0,0 +1,135 @@ +data "aws_caller_identity" "current" {} + +data "aws_acm_certificate" "cert" { + domain = var.certificate_domain_name + types = ["AMAZON_ISSUED"] + most_recent = true +} + +data "aws_iam_policy_document" "s3_policy" { + statement { + sid = "allowalbaccount" + effect = "Allow" + principals { + identifiers = ["arn:aws:iam::${lookup(var.aws_account_id,var.region,"us-east-1" )}:root"] + type = "AWS" + } + actions = ["s3:PutObject"] + resources = ["arn:aws:s3:::${local.alb_s3_bucket_name}/*"] + } + statement { + sid = "allowalblogdelivery" + effect = "Allow" + principals { + identifiers = ["delivery.logs.amazonaws.com"] + type = "Service" + } + actions = ["s3:PutObject"] + resources = ["arn:aws:s3:::${local.alb_s3_bucket_name}/*"] + condition { + test = "StringEquals" + values = ["bucket-owner-full-control"] + variable = "s3:x-amz-acl" + } + } + statement { + sid = "awslogdeliveryacl" + effect = "Allow" + actions = ["s3:GetBucketAcl"] + resources = ["arn:aws:s3:::${local.alb_s3_bucket_name}"] + principals { + identifiers = ["delivery.logs.amazonaws.com"] + type = "Service" + } + } +} + +data "aws_iam_policy_document" "ecr_policy_doc" { + + statement { + sid = "ElasticContainerRegistryPushAndPull" + effect = "Allow" + + principals { + identifiers = [local.account_arn] + type = "AWS" + } + actions = [ + "ecr:GetDownloadUrlForLayer", + "ecr:BatchGetImage", + "ecr:BatchCheckLayerAvailability", + "ecr:PutImage", + "ecr:InitiateLayerUpload", + "ecr:UploadLayerPart", + "ecr:CompleteLayerUpload", + ] + } +} + +data "aws_iam_policy_document" "task_execution_policy" { + statement { + actions = ["sts:AssumeRole"] + effect = "Allow" + principals { + identifiers = ["ecs-tasks.amazonaws.com"] + type = "Service" + } + } +} + + +data "aws_iam_policy_document" "task_logs_policy" { + statement { + actions = [ + "logs:CreateLogGroup" + ] + effect = "Allow" + resources = [ + "arn:aws:logs:*:*:*" + ] + } +} + +data "aws_iam_policy_document" "ecs_policy_doc" { + statement { + effect = "Allow" + actions = ["ecs:*"] + resources = ["*"] + } + statement { + effect = "Allow" + actions = [ + "ecr:*" + ] + resources = ["*"] + } + statement { + effect = "Allow" + actions = [ + "ssm:*" + ] + resources = ["*"] + } + statement { + effect = "Allow" + actions = [ + "s3:*" + ] + resources = ["*"] + } + statement { + effect = "Allow" + actions = [ + "es:*" + ] + resources = ["*"] + } + statement { + effect = "Allow" + actions = [ + "elasticache:*" + ] + resources = ["*"] + } + +} \ No newline at end of file diff --git a/terraform/deployed/bento/ecs/ecr.tf b/terraform/deployed/bento/ecs/ecr.tf new file mode 100644 index 000000000..aed5781fc --- /dev/null +++ b/terraform/deployed/bento/ecs/ecr.tf @@ -0,0 +1,39 @@ +resource "aws_ecr_repository" "ecr" { + for_each = toset(var.app_ecr_registry_names) + name = "${lower(var.stack_name)}-${each.key}" + image_tag_mutability = "MUTABLE" + tags = merge( + { + "Name" = format("%s-%s-%s",var.stack_name,terraform.workspace,"ecr-registry") + }, + var.tags, + ) +} + +resource "aws_ecr_repository_policy" "ecr_policy" { + for_each = toset(var.app_ecr_registry_names) + repository = aws_ecr_repository.ecr[each.key].name + policy = data.aws_iam_policy_document.ecr_policy_doc.json +} + +resource "aws_ecr_lifecycle_policy" "ecr_life_cycle" { + for_each = toset(var.app_ecr_registry_names) + repository = aws_ecr_repository.ecr[each.key].name + + policy = jsonencode({ + rules = [{ + rulePriority = 1 + description = "keep last 20 images" + action = { + type = "expire" + } + selection = { + tagStatus = "any" + countType = "imageCountMoreThan" + countNumber = 15 + } + }] + }) +} + + diff --git a/terraform/deployed/bento/ecs/ecs.tf b/terraform/deployed/bento/ecs/ecs.tf new file mode 100644 index 000000000..e86fd496e --- /dev/null +++ b/terraform/deployed/bento/ecs/ecs.tf @@ -0,0 +1,326 @@ +#create ecs cluster +resource "aws_appautoscaling_target" "frontend_target" { + max_capacity = 5 + min_capacity = 1 + resource_id = "service/${aws_ecs_cluster.ecs_cluster.name}/${aws_ecs_service.ecs_service_frontend.name}" + scalable_dimension = "ecs:service:DesiredCount" + service_namespace = "ecs" +} + +resource "aws_appautoscaling_target" "backend_target" { + max_capacity = 5 + min_capacity = 1 + resource_id = "service/${aws_ecs_cluster.ecs_cluster.name}/${aws_ecs_service.ecs_service_backend.name}" + scalable_dimension = "ecs:service:DesiredCount" + service_namespace = "ecs" +} + +resource "aws_appautoscaling_policy" "backend_scaling_cpu" { + name = "cpu-autoscaling" + policy_type = "TargetTrackingScaling" + resource_id = aws_appautoscaling_target.backend_target.resource_id + scalable_dimension = aws_appautoscaling_target.backend_target.scalable_dimension + service_namespace = aws_appautoscaling_target.backend_target.service_namespace + + target_tracking_scaling_policy_configuration { + predefined_metric_specification { + predefined_metric_type = "ECSServiceAverageCPUUtilization" + } + + target_value = 80 + } +} + + +resource "aws_appautoscaling_policy" "frontend_scaling_cpu" { + name = "cpu-autoscaling" + policy_type = "TargetTrackingScaling" + resource_id = aws_appautoscaling_target.frontend_target.resource_id + scalable_dimension = aws_appautoscaling_target.frontend_target.scalable_dimension + service_namespace = aws_appautoscaling_target.frontend_target.service_namespace + + target_tracking_scaling_policy_configuration { + predefined_metric_specification { + predefined_metric_type = "ECSServiceAverageCPUUtilization" + } + + target_value = 80 + } +} + + +resource "aws_ecs_cluster" "ecs_cluster" { + name = "${var.stack_name}-${terraform.workspace}-ecs" + + tags = merge( + { + "Name" = format("%s-%s",var.stack_name,"ecs-cluster") + }, + var.tags, + ) + +} + + resource "aws_ecs_service" "ecs_service_backend" { + name = "${var.stack_name}-${terraform.workspace}-backend" + cluster = aws_ecs_cluster.ecs_cluster.id + task_definition = aws_ecs_task_definition.backend.arn + desired_count = var.number_container_replicas + launch_type = var.ecs_launch_type + scheduling_strategy = var.ecs_scheduling_strategy + deployment_minimum_healthy_percent = 50 + deployment_maximum_percent = 200 + network_configuration { + security_groups = [aws_security_group.app_sg.id] + subnets = var.private_subnets + assign_public_ip = false + } + load_balancer { + target_group_arn = aws_lb_target_group.backend_target_group.arn + container_name = "backend" + container_port = var.backend_container_port + } + lifecycle { + ignore_changes = [task_definition, desired_count] + } + } + +resource "aws_ecs_service" "ecs_service_frontend" { + name = "${var.stack_name}-${terraform.workspace}-frontend" + cluster = aws_ecs_cluster.ecs_cluster.id + task_definition = aws_ecs_task_definition.frontend.arn + desired_count = var.number_container_replicas + launch_type = var.ecs_launch_type + scheduling_strategy = var.ecs_scheduling_strategy + deployment_minimum_healthy_percent = 50 + deployment_maximum_percent = 200 + network_configuration { + security_groups = [aws_security_group.app_sg.id] + subnets = var.private_subnets + assign_public_ip = false + } + load_balancer { + target_group_arn = aws_lb_target_group.frontend_target_group.arn + container_name = "frontend" + container_port = var.frontend_container_port + } + lifecycle { + ignore_changes = [task_definition, desired_count] + } +} + +resource "aws_ecs_task_definition" "frontend" { + family = "${var.stack_name}-${terraform.workspace}-frontend" + network_mode = var.ecs_network_mode + requires_compatibilities = ["FARGATE"] + cpu = "256" + memory = "512" + execution_role_arn = aws_iam_role.task_execution_role.arn + task_role_arn = aws_iam_role.task_role.arn + container_definitions = jsonencode([ + { + name = "frontend" + image = "${var.frontend_container_image_name}:latest" + essential = true + portMappings = [ + { + protocol = "tcp" + containerPort = var.frontend_container_port + hostPort = var.frontend_container_port + } + ] + }]) + tags = merge( + { + "Name" = format("%s-%s-%s",var.stack_name,terraform.workspace,"task-definition") + }, + var.tags, + ) +} + + +resource "aws_ecs_task_definition" "backend" { + family = "${var.stack_name}-${terraform.workspace}-backend" + network_mode = var.ecs_network_mode + requires_compatibilities = ["FARGATE"] + cpu = "512" + memory = "1024" + execution_role_arn = aws_iam_role.task_execution_role.arn + task_role_arn = aws_iam_role.task_role.arn + container_definitions = jsonencode([ + { + name = "backend" + image = "${var.backend_container_image_name}:latest" + essential = true + portMappings = [ + { + protocol = "tcp" + containerPort = var.backend_container_port + hostPort = var.backend_container_port + } + ] + }]) + tags = merge( + { + "Name" = format("%s-%s-%s",var.stack_name,terraform.workspace,"task-definition") + }, + var.tags, + ) +} + +resource "aws_security_group" "app_sg" { + name = "${var.stack_name}-${terraform.workspace}-app-sg" + vpc_id = var.vpc_id + tags = merge( + { + "Name" = format("%s-%s-frontend-sg",var.stack_name,terraform.workspace), + }, + var.tags, + ) +} + +resource "aws_security_group_rule" "inbound_frontend_alb" { + from_port = var.frontend_container_port + protocol = local.tcp_protocol + to_port = var.frontend_container_port + security_group_id = aws_security_group.app_sg.id + source_security_group_id = aws_security_group.alb-sg.id + type = "ingress" +} + +resource "aws_security_group_rule" "inbound_backend_alb" { + from_port = var.backend_container_port + protocol = local.tcp_protocol + to_port = var.backend_container_port + security_group_id = aws_security_group.app_sg.id + source_security_group_id = aws_security_group.alb-sg.id + type = "ingress" +} + +resource "aws_security_group_rule" "all_outbound_frontend" { + from_port = local.any_port + protocol = local.any_protocol + to_port = local.any_port + cidr_blocks = local.all_ips + security_group_id = aws_security_group.app_sg.id + type = "egress" +} + +#create alb target group +resource "aws_lb_target_group" "frontend_target_group" { + name = "${var.stack_name}-${terraform.workspace}-frontend" + port = var.frontend_container_port + protocol = "HTTP" + vpc_id = var.vpc_id + target_type = var.alb_target_type + stickiness { + type = "lb_cookie" + cookie_duration = 1800 + enabled = true + } + health_check { + path = "/" + protocol = "HTTP" + matcher = "200" + interval = 15 + timeout = 3 + healthy_threshold = 2 + unhealthy_threshold = 2 + } + tags = merge( + { + "Name" = format("%s-%s",var.stack_name,"frontend-alb-target-group") + }, + var.tags, + ) +} + +#create alb target group +resource "aws_lb_target_group" "backend_target_group" { + name = "${var.stack_name}-${terraform.workspace}-backend" + port = var.backend_container_port + protocol = "HTTP" + vpc_id = var.vpc_id + target_type = var.alb_target_type + stickiness { + type = "lb_cookie" + cookie_duration = 1800 + enabled = true + } + health_check { + path = "/ping" + protocol = "HTTP" + port = var.backend_container_port + matcher = "200" + interval = 15 + timeout = 3 + healthy_threshold = 2 + unhealthy_threshold = 2 + } + tags = merge( + { + "Name" = format("%s-%s",var.stack_name,"backend-alb-target") + }, + var.tags, + ) +} + +resource "aws_lb_listener_rule" "frontend_alb_listener_prod" { + listener_arn = aws_lb_listener.listener_https.arn + priority = var.fronted_rule_priority + action { + type = "forward" + target_group_arn = aws_lb_target_group.frontend_target_group.arn + } + + condition { + host_header { + values = [local.app_url] + } + } + condition { + path_pattern { + values = ["/*"] + } + } +} + +resource "aws_lb_listener_rule" "bento_www" { + count = terraform.workspace == "prod" ? 1:0 + listener_arn = aws_lb_listener.listener_https.arn + action { + type = "forward" + target_group_arn = aws_lb_target_group.frontend_target_group.arn + } + + condition { + host_header { + values = ["www.${local.app_url}"] + } + } + condition { + path_pattern { + values = ["/*"] + } + } +} + +resource "aws_lb_listener_rule" "backend_alb_listener_prod" { + listener_arn = aws_lb_listener.listener_https.arn + priority = var.backend_rule_priority + action { + type = "forward" + target_group_arn = aws_lb_target_group.backend_target_group.arn + } + + condition { + host_header { + values = [local.app_url] + } + } + condition { + path_pattern { + values = ["/v1/graphql/*"] + } + } +} diff --git a/terraform/deployed/bento/ecs/locals.tf b/terraform/deployed/bento/ecs/locals.tf new file mode 100644 index 000000000..ad4f82745 --- /dev/null +++ b/terraform/deployed/bento/ecs/locals.tf @@ -0,0 +1,11 @@ +locals { + alb_s3_bucket_name = "${var.stack_name}-alb-${terraform.workspace}-access-logs" + http_port = 80 + any_port = 0 + any_protocol = "-1" + tcp_protocol = "tcp" + https_port = "443" + all_ips = ["0.0.0.0/0"] + account_arn = format("arn:aws:iam::%s:root", data.aws_caller_identity.current.account_id) + app_url = "${var.app_sub_domain}.${var.domain_name}" +} diff --git a/terraform/deployed/bento/ecs/outputs.tf b/terraform/deployed/bento/ecs/outputs.tf new file mode 100644 index 000000000..b46ca9800 --- /dev/null +++ b/terraform/deployed/bento/ecs/outputs.tf @@ -0,0 +1,15 @@ +output "alb_dns_name" { + value = aws_lb.alb.dns_name + description = "ALB dns name" +} +output "alb_security_group_id" { + value = aws_security_group.alb-sg.id +} +output "alb_https_listener_arn" { + description = "https listerner arn" + value = aws_lb_listener.listener_https.arn +} +output "alb_zone_id" { + description = "https listerner arn" + value = aws_lb.alb.zone_id +} \ No newline at end of file diff --git a/terraform/deployed/bento/ecs/provider.tf b/terraform/deployed/bento/ecs/provider.tf new file mode 100644 index 000000000..6b5ac25f1 --- /dev/null +++ b/terraform/deployed/bento/ecs/provider.tf @@ -0,0 +1,12 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "3.67.0" + } + } +} + +provider "aws" { + region = var.region +} \ No newline at end of file diff --git a/terraform/deployed/bento/ecs/roles.tf b/terraform/deployed/bento/ecs/roles.tf new file mode 100644 index 000000000..bd1b0c8e4 --- /dev/null +++ b/terraform/deployed/bento/ecs/roles.tf @@ -0,0 +1,35 @@ +resource "aws_iam_role" "task_execution_role" { + name = "${var.stack_name}-${terraform.workspace}-task-execution-role" + assume_role_policy = data.aws_iam_policy_document.task_execution_policy.json +} + +resource "aws_iam_role" "task_role" { + name = "${var.stack_name}-${terraform.workspace}-task-role" + assume_role_policy = data.aws_iam_policy_document.task_execution_policy.json +} + +resource "aws_iam_role_policy_attachment" "task-execution-role-policy-attachment" { + role = aws_iam_role.task_execution_role.name + policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy" +} + +resource "aws_iam_policy" "ecs_log_policy" { + name = "${var.stack_name}-${terraform.workspace}-log-policy" + policy = data.aws_iam_policy_document.task_logs_policy.json +} + +resource "aws_iam_role_policy_attachment" "task-log-policy-attachment" { + role = aws_iam_role.task_execution_role.name + policy_arn = aws_iam_policy.ecs_log_policy.arn +} + + +resource "aws_iam_policy" "ecs_policy" { + name = "${var.stack_name}-${terraform.workspace}-ecs-policy" + policy = data.aws_iam_policy_document.ecs_policy_doc.json +} +resource "aws_iam_policy_attachment" "ecs_policy_attachment" { + name = "${var.stack_name}-${terraform.workspace}-ecs-policy-attachement" + policy_arn = aws_iam_policy.ecs_policy.arn + roles = [aws_iam_role.task_role.name] +} \ No newline at end of file diff --git a/terraform/deployed/bento/ecs/route53.tf b/terraform/deployed/bento/ecs/route53.tf new file mode 100644 index 000000000..38b2fb8a8 --- /dev/null +++ b/terraform/deployed/bento/ecs/route53.tf @@ -0,0 +1,25 @@ + +data "aws_route53_zone" "zone" { + name = var.domain_name +} + +resource "aws_route53_record" "www" { + count = terraform.workspace == "prod" ? 1 : 0 + name = "www" + type = "CNAME" + zone_id = data.aws_route53_zone.zone.zone_id + ttl = "5" + records = [var.domain_name] +} + +resource "aws_route53_record" "lower_tiers_records" { + name = var.app_sub_domain + type = "A" + zone_id = data.aws_route53_zone.zone.zone_id + alias { + evaluate_target_health = false + name = aws_lb.alb.dns_name + zone_id = aws_lb.alb.zone_id + } +} + diff --git a/terraform/deployed/bento/ecs/variables.tf b/terraform/deployed/bento/ecs/variables.tf new file mode 100644 index 000000000..474ef18cd --- /dev/null +++ b/terraform/deployed/bento/ecs/variables.tf @@ -0,0 +1,188 @@ +variable "tags" { + description = "tags to associate with this instance" + type = map(string) +} +variable "stack_name" { + description = "name of the project" + type = string +} +variable "region" { + description = "aws region to deploy" + type = string + default = "us-east-1" +} +variable "profile" { + description = "iam user profile to use" + type = string + default = "default" +} +variable "s3_object_expiration_days" { + description = "number of days for object to live" + type = number + default = 720 +} +variable "s3_object_nonactive_expiration_days" { + description = "number of days to retain non active objects" + type = number + default = 90 +} +variable "s3_object_standard_ia_transition_days" { + description = "number of days for an object to transition to standard_ia storage class" + default = 120 + type = number +} +variable "s3_object_glacier_transition_days" { + description = "number of days for an object to transition to glacier storage class" + default = 180 + type = number +} + +variable "alb_name" { + description = "Name for the ALB" + type = string + default = "alb" +} +variable "create_alb" { + description = "choose to create alb or not" + type = bool + default = true +} +variable "lb_type" { + description = "Type of loadbalance" + type = string + default = "application" +} +variable "internal_alb" { + description = "is this alb internal?" + default = false + type = bool +} + +variable "ssl_policy" { + description = "specify ssl policy to use" + default = "ELBSecurityPolicy-2016-08" + type = string +} + +variable "default_message" { + description = "default message response from alb when resource is not available" + default = "The requested resource is not available" +} + +variable "domain_name" { + description = "domain name for the application" + type = string +} + +variable "public_subnets" { + description = "Provide list of public subnets to use in this VPC. Example 10.0.1.0/24,10.0.2.0/24" + default = [] + type = list(string) +} + +variable "private_subnets" { + description = "Provide list private subnets to use in this VPC. Example 10.0.10.0/24,10.0.11.0/24" + default = [] + type = list(string) +} + +variable "vpc_id" { + description = "VPC Id to to launch the ALB" + type = string +} + +#added frontend app name to accomodate ppdc-otg and ppdc-otp +variable "app_name" { + description = "it will be either otp or otg" + type = string + default = null +} + +variable "certificate_domain_name" { + description = "domain name for the ssl cert" + type = string +} + +variable "aws_account_id" { + type = map(string) + description = "aws account to allow for alb s3 logging" + default = { + us-east-1 = "127311923021" + } +} + +variable "ecs_launch_type" { + description = "ecs launch type - FARGATE or EC2" + type = string + default = "FARGATE" +} + + +variable "number_container_replicas" { + description = "specify the number of container to run" + type = number + default = 1 +} + +variable "ecs_scheduling_strategy" { + description = "ecs scheduling strategy" + type = string + default = "REPLICA" +} +variable "frontend_container_port" { + description = "port on which the container listens" + type = number +} +variable "backend_container_port" { + description = "port on which the container listens" + type = number +} + +variable "fronted_rule_priority" { + description = "priority number to assign to alb rule" + type = number + default = 101 +} + +variable "backend_rule_priority" { + description = "priority number to assign to alb rule" + type = number + default = 100 +} + +variable "app_sub_domain" { + description = "url of the app" + type = string +} + +variable "ecs_network_mode" { + description = "ecs network mode - bridge,host,awsvpc" + type = string + default = "awsvpc" +} +variable "frontend_container_image_name" { + description = "name of the frontend container image" + type = string +} + +variable "backend_container_image_name" { + description = "name of the frontend container image" + type = string +} + +variable "alb_target_type" { + type = string + description = "type of alb target - ip , instance, lambda" + default = "ip" +} +variable "create_app_ecr_registry" { + default = true + description = "create list of registry defined in app_ecr_registry_names" + type = bool +} + +variable "app_ecr_registry_names" { + type = list(string) + description = "names of app ecr regis" + default = ["backend","frontend","auth","files"] +} \ No newline at end of file diff --git a/terraform/deployed/bento/ecs/workspace/cds/cds-dev.tfvars b/terraform/deployed/bento/ecs/workspace/cds/cds-dev.tfvars new file mode 100644 index 000000000..d6327fd59 --- /dev/null +++ b/terraform/deployed/bento/ecs/workspace/cds/cds-dev.tfvars @@ -0,0 +1,24 @@ +public_subnets = [ + "subnet-03bb1c845d35aacc5", + "subnet-0a575f7e0c97cad77" +] +private_subnets = [ + "subnet-09b0c7407416d4730", + "subnet-07d177a4d9df5cd32" +] +vpc_id = "vpc-08f154f94dc8a0e34" +stack_name = "cds" +app_name = "cds" +profile = "icdc" +domain_name = "bento-tools.org" +tags = { + Project = "CDS" + CreatedWith = "Terraform" + POC = "ye.wu@nih.gov" +} +certificate_domain_name = "*.bento-tools.org" +backend_container_port = 8080 +frontend_container_port = 80 +app_sub_domain = "cds-dev" +frontend_container_image_name = "cbiitssrepo/bento-frontend" +backend_container_image_name = "cbiitssrepo/bento-backend" \ No newline at end of file diff --git a/terraform/deployed/bento/ecs/workspace/cds/datacommons-nonprod.tfbackend b/terraform/deployed/bento/ecs/workspace/cds/datacommons-nonprod.tfbackend new file mode 100644 index 000000000..9cd0d15c7 --- /dev/null +++ b/terraform/deployed/bento/ecs/workspace/cds/datacommons-nonprod.tfbackend @@ -0,0 +1,5 @@ + bucket = "bento-terraform-remote-state" + key = "datacommons/terraform.tfstate" + workspace_key_prefix = "env" + region = "us-east-1" + encrypt = true \ No newline at end of file diff --git a/terraform/deployed/bento/ecs/workspace/cds/datacommons-prod.tfbackend b/terraform/deployed/bento/ecs/workspace/cds/datacommons-prod.tfbackend new file mode 100644 index 000000000..843a252ce --- /dev/null +++ b/terraform/deployed/bento/ecs/workspace/cds/datacommons-prod.tfbackend @@ -0,0 +1,5 @@ + bucket = "bento-terraform-remote-state" + key = "datacommons/ecs/terraform.tfstate" + workspace_key_prefix = "env" + region = "us-east-1" + encrypt = true \ No newline at end of file diff --git a/terraform/deployed/bento/ecs/workspace/gmb/datacommons-nonprod.tfbackend b/terraform/deployed/bento/ecs/workspace/gmb/datacommons-nonprod.tfbackend new file mode 100644 index 000000000..9cd0d15c7 --- /dev/null +++ b/terraform/deployed/bento/ecs/workspace/gmb/datacommons-nonprod.tfbackend @@ -0,0 +1,5 @@ + bucket = "bento-terraform-remote-state" + key = "datacommons/terraform.tfstate" + workspace_key_prefix = "env" + region = "us-east-1" + encrypt = true \ No newline at end of file diff --git a/terraform/deployed/bento/ecs/workspace/gmb/datacommons-prod.tfbackend b/terraform/deployed/bento/ecs/workspace/gmb/datacommons-prod.tfbackend new file mode 100644 index 000000000..843a252ce --- /dev/null +++ b/terraform/deployed/bento/ecs/workspace/gmb/datacommons-prod.tfbackend @@ -0,0 +1,5 @@ + bucket = "bento-terraform-remote-state" + key = "datacommons/ecs/terraform.tfstate" + workspace_key_prefix = "env" + region = "us-east-1" + encrypt = true \ No newline at end of file diff --git a/terraform/deployed/bento/ecs/workspace/gmb/gmb-dev.tfvars b/terraform/deployed/bento/ecs/workspace/gmb/gmb-dev.tfvars new file mode 100644 index 000000000..97bc47afa --- /dev/null +++ b/terraform/deployed/bento/ecs/workspace/gmb/gmb-dev.tfvars @@ -0,0 +1,24 @@ +public_subnets = [ + "subnet-03bb1c845d35aacc5", + "subnet-0a575f7e0c97cad77" +] +private_subnets = [ + "subnet-09b0c7407416d4730", + "subnet-07d177a4d9df5cd32" +] +vpc_id = "vpc-08f154f94dc8a0e34" +stack_name = "gmb" +app_name = "gmb" +profile = "icdc" +domain_name = "bento-tools.org" +tags = { + Project = "GMB" + CreatedWith = "Terraform" + POC = "ye.wu@nih.gov" +} +certificate_domain_name = "*.bento-tools.org" +backend_container_port = 8080 +frontend_container_port = 80 +app_sub_domain = "cds-dev" +frontend_container_image_name = "cbiitssrepo/bento-frontend" +backend_container_image_name = "cbiitssrepo/bento-backend" \ No newline at end of file diff --git a/terraform/deployed/bento/elasticsearch/backend.tf b/terraform/deployed/bento/elasticsearch/backend.tf new file mode 100644 index 000000000..cde57e0d6 --- /dev/null +++ b/terraform/deployed/bento/elasticsearch/backend.tf @@ -0,0 +1,4 @@ +terraform { + backend "s3" { + } +} diff --git a/terraform/deployed/bento/elasticsearch/data.tf b/terraform/deployed/bento/elasticsearch/data.tf new file mode 100644 index 000000000..c3dd4ca02 --- /dev/null +++ b/terraform/deployed/bento/elasticsearch/data.tf @@ -0,0 +1,5 @@ +data "aws_region" "region" {} + +data "aws_caller_identity" "caller" {} + + diff --git a/terraform/deployed/bento/elasticsearch/main.tf b/terraform/deployed/bento/elasticsearch/main.tf new file mode 100644 index 000000000..9bf30db62 --- /dev/null +++ b/terraform/deployed/bento/elasticsearch/main.tf @@ -0,0 +1,101 @@ +locals { + http_port = 80 + any_port = 0 + any_protocol = "-1" + tcp_protocol = "tcp" + https_port = "443" + all_ips = ["0.0.0.0/0"] + domain_name = "${var.stack_name}-${terraform.workspace}-elasticsearch" +} + +resource "aws_security_group" "es" { + name = "${var.stack_name}-${terraform.workspace}-elasticsearch-sg" + vpc_id = var.vpc_id + + ingress { + from_port = local.https_port + to_port = local.https_port + protocol = local.tcp_protocol + cidr_blocks = var.subnet_ip_block + } +} + +resource "aws_security_group_rule" "all_outbound" { + from_port = local.any_port + protocol = local.any_protocol + to_port = local.any_port + cidr_blocks = local.all_ips + + security_group_id = aws_security_group.es.id + type = "egress" +} + +resource "aws_iam_service_linked_role" "es" { + count = var.create_es_service_role ? 1: 0 + aws_service_name = "es.amazonaws.com" +} + +resource "aws_elasticsearch_domain" "es" { + domain_name = local.domain_name + elasticsearch_version = var.elasticsearch_version + vpc_options { + subnet_ids = var.private_subnet_ids + security_group_ids = [aws_security_group.es.id] + } + + ebs_options { + ebs_enabled = true + volume_size = 120 + } + + access_policies = < + + + + + + + + + + + + + + + + + + + + + + + + + + 1589979804113 + + + + + + \ No newline at end of file diff --git a/terraform/experiment/bento/ecs/.idea/workspace.xml b/terraform/experiment/bento/ecs/.idea/workspace.xml new file mode 100644 index 000000000..6713891b7 --- /dev/null +++ b/terraform/experiment/bento/ecs/.idea/workspace.xml @@ -0,0 +1,44 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 1589982029292 + + + + + + \ No newline at end of file diff --git a/terraform/experiment/bento/ecs/acm.tf b/terraform/experiment/bento/ecs/acm.tf new file mode 100644 index 000000000..0a1c762c6 --- /dev/null +++ b/terraform/experiment/bento/ecs/acm.tf @@ -0,0 +1,5 @@ +#fetch essential-dev cets +data "aws_acm_certificate" "certificate" { + domain = join(".",["*",var.domain]) + types = ["AMAZON_ISSUED"] +} \ No newline at end of file diff --git a/terraform/experiment/bento/ecs/alb.tf b/terraform/experiment/bento/ecs/alb.tf new file mode 100644 index 000000000..b67987bee --- /dev/null +++ b/terraform/experiment/bento/ecs/alb.tf @@ -0,0 +1,73 @@ +#create s3 bucket for logs + +# data "aws_elb_service_account" "icdc" {} + +# module "alb_s3_bucket_log" { +# source = "../../modules/s3_logs" +# bucket_name = "${var.stack_name}-alb-log" +# account_arn = "${data.aws_elb_service_account.icdc.arn}" +# } + +#Create ALB +resource "aws_alb" "alb" { + name = "${var.stack_name}-alb" + subnets = [data.terraform_remote_state.network.outputs.public_subnet_a_id, data.terraform_remote_state.network.outputs.public_subnet_c_id] + + security_groups = [data.terraform_remote_state.network.outputs.public_security_id] + internal = false + idle_timeout = var.alb_idle_timeout + load_balancer_type = "application" + enable_deletion_protection = false + tags = { + Name = "${var.stack_name}-alb" + } + # access_logs { + # bucket = "${module.alb_s3_bucket_log.s3_bucket_name}" + # prefix = "${var.stack_name}-alb" + # enabled = true + # } +} + +#create https redirect +resource "aws_alb_listener" "alb_listener_redirect_https" { + load_balancer_arn = aws_alb.alb.arn + port = "80" + protocol = "HTTP" + + default_action { + type = "redirect" + redirect { + port = "443" + protocol = "HTTPS" + status_code = "HTTP_301" + } + } +} + +# data "aws_acm_certificate" "certificate" { +# domain = "*.essential-dev.com" +# types = ["AMAZON_ISSUED"] +# } + +resource "aws_lb_listener" "alb_listener_https" { + load_balancer_arn = aws_alb.alb.arn + port = "443" + protocol = "HTTPS" + ssl_policy = "ELBSecurityPolicy-2016-08" + certificate_arn = data.aws_acm_certificate.certificate.arn + + default_action { + type = "fixed-response" + + fixed_response { + content_type = "text/plain" + message_body = "ICDC page is in maintenance ..." + status_code = "200" + } + } +} + +output "alb_dns_name" { + description = "The DNS name of the load balancer." + value = aws_alb.alb.dns_name +} diff --git a/terraform/experiment/bento/ecs/backend.tf b/terraform/experiment/bento/ecs/backend.tf new file mode 100644 index 000000000..51e30a3ec --- /dev/null +++ b/terraform/experiment/bento/ecs/backend.tf @@ -0,0 +1,10 @@ +terraform { + backend "s3" { + bucket = "icdc-sandbox-terraform-state" + key = "state/terraform.tfstate" + dynamodb_table = "icdc-sandbox-terraform-state-lock" + encrypt = "true" + region = "us-east-1" + } +} + diff --git a/terraform/experiment/bento/ecs/cloudfront.tf b/terraform/experiment/bento/ecs/cloudfront.tf new file mode 100644 index 000000000..f255b0dc6 --- /dev/null +++ b/terraform/experiment/bento/ecs/cloudfront.tf @@ -0,0 +1,88 @@ +# locals { +# alb_origin_id = "cloudfront_alb_origin" +# } +# resource "aws_cloudfront_distribution" "site_distribution" { + +# origin { + +# custom_origin_config { +# http_port = 80 +# https_port = 443 +# origin_protocol_policy = "http-only" +# origin_ssl_protocols = ["TLSv1.2"] +# } + +# domain_name = aws_s3_bucket.s3-site.website_endpoint +# origin_id = join(".",[var.site,var.domain]) +# } + +# origin { +# domain_name = aws_alb.alb.dns_name +# origin_id = local.alb_origin_id +# custom_origin_config { +# http_port = 80 +# https_port = 443 +# origin_protocol_policy = "https-only" +# origin_ssl_protocols = ["TLSv1.2"] +# } +# } + +# enabled = true +# default_root_object = var.index_document +# aliases = [join(".",[var.site,var.domain])] +# is_ipv6_enabled = true + +# default_cache_behavior { +# viewer_protocol_policy = "redirect-to-https" +# compress = true +# allowed_methods = ["GET", "HEAD", "OPTIONS"] +# cached_methods = ["GET", "HEAD", "OPTIONS"] + +# target_origin_id = join(".",[var.site,var.domain]) +# min_ttl = 0 +# default_ttl = 86400 +# max_ttl = 86400 + +# forwarded_values { +# query_string = false +# headers = ["*"] +# cookies { +# forward = "none" +# } +# } +# } + +# # Cache behavior +# ordered_cache_behavior { +# path_pattern = "/api/*" +# allowed_methods = ["DELETE","GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"] +# cached_methods = ["GET", "HEAD"] +# target_origin_id = local.alb_origin_id +# forwarded_values { +# query_string = true +# headers = ["*"] +# cookies { +# forward = "all" +# } +# } +# default_ttl = 0 +# min_ttl = 0 +# max_ttl = 0 +# compress = true +# viewer_protocol_policy = "redirect-to-https" +# } + +# restrictions { +# geo_restriction { +# restriction_type = "none" +# } +# } + +# viewer_certificate { +# acm_certificate_arn = data.aws_acm_certificate.certificate.arn +# ssl_support_method = "sni-only" +# } +# tags = { +# ProvisionedBy = "Terraform" +# } +# } \ No newline at end of file diff --git a/terraform/experiment/bento/ecs/cluster.tf b/terraform/experiment/bento/ecs/cluster.tf new file mode 100644 index 000000000..b5dd9b719 --- /dev/null +++ b/terraform/experiment/bento/ecs/cluster.tf @@ -0,0 +1,3 @@ +resource "aws_ecs_cluster" "ecs-cluster" { + name = var.ecs_cluster_name +} \ No newline at end of file diff --git a/terraform/experiment/bento/ecs/ecs.tf b/terraform/experiment/bento/ecs/ecs.tf new file mode 100644 index 000000000..cc16f38f4 --- /dev/null +++ b/terraform/experiment/bento/ecs/ecs.tf @@ -0,0 +1,208 @@ +data "aws_ssm_parameter" "vault_password" { + name = "vault_password" + depends_on = [aws_ssm_parameter.vault] +} + +data "template_cloudinit_config" "ecs_userdata" { + gzip = false + base64_encode = false + part { + content = < ./ctn/mypassword", - "ansible-playbook -i ./ctn/hosts --vault-password-file ./ctn/mypassword ./ctn/k9dc.yml -e env=\"${var.environment}\" -e access_id=\"${data.aws_ssm_parameter.sumologic_accessid.value}\" -e access_key=\"${data.aws_ssm_parameter.sumologic_accesskey.value}\"" - ] - } - ] - } - } -} -DOC -} - -resource "aws_ssm_association" "k9dc" { - name = "${aws_ssm_document.k9dc.name}" - targets { - key = "tag:aws:autoscaling:groupName" - values = ["k9dc"] - } - output_location { - s3_bucket_name = "icdc-sandbox-runcmd" - } -} - -resource "aws_ssm_document" "neo4j" { - name = "bootstrap_neo4j" - document_type = "Command" - - content = < ./ctn/mypassword", - "ansible-playbook -i ./ctn/hosts --vault-password-file ./ctn/mypassword ./ctn/neo4j.yml -e neo4j_password=\"${data.aws_ssm_parameter.neo4j_password.value}\" -e env=\"${var.environment}\" -e access_id=\"${data.aws_ssm_parameter.sumologic_accessid.value}\" -e access_key=\"${data.aws_ssm_parameter.sumologic_accesskey.value}\"" - - ] - } - ] - } - } -} -DOC -} -resource "aws_ssm_association" "neo4j" { - name = "${aws_ssm_document.neo4j.name}" - targets { - key = "tag:aws:autoscaling:groupName" - values = ["neo4j"] - } - output_location { - s3_bucket_name = "icdc-sandbox-runcmd" - } - -} \ No newline at end of file diff --git a/terraform/icdc/dev/k9dc/outputs.tf b/terraform/icdc/dev/k9dc/outputs.tf deleted file mode 100644 index 88287091d..000000000 --- a/terraform/icdc/dev/k9dc/outputs.tf +++ /dev/null @@ -1,17 +0,0 @@ -# output "tomcat_ip" { -# value = "${element(aws_instance.k9dc.*.private_ip,0)}" -# } -# output "tomcat_ip_1" { -# value = "${element(aws_instance.k9dc.*.private_ip,1)}" -# } - -output "tomcat01_ip" { - value = "${element(data.aws_instances.k9dc.private_ips,0)}" -} -output "tomcat02_ip" { - value = "${element(data.aws_instances.k9dc.private_ips,1)}" -} - -output "neo4j_ip" { - value = "${data.aws_instance.neo4j.private_ip}" -} diff --git a/terraform/icdc/dev/k9dc/variables.tf b/terraform/icdc/dev/k9dc/variables.tf deleted file mode 100644 index af869277d..000000000 --- a/terraform/icdc/dev/k9dc/variables.tf +++ /dev/null @@ -1,141 +0,0 @@ -variable "profile" { - description = "Profile for launching vm" - default = "icdc" -} - -variable "region" { - description = "Region to provision resources" - default = "us-east-1" -} - - -variable "insecure_no_strict_host_key_checking" { - default = false -} -variable "jenkins_home" { - default = "/local/jenkins" -} - -variable "k9dc_home" { - default = "/local/k9dc" -} -variable "docker_home" { - default = "/local/docker" -} -variable "app_name" { - default = "ICDC" -} - - -variable "k9dc_instance_type" { - default = "t2.medium" -} - -variable "neo4j_instance_type" { - default = "t2.medium" -} -variable "neo4j_home" { - default = "/local/neo4j" -} -variable "insecure_bastion_no_strict_host_key_checking" { - default = false -} -variable "neo4j_version" { - default = "3.5.0.3" -} -variable "health_check" { - type = "map" - default = { - k9dc = "/" - neo4j = "/" - } -} - - -variable "forward_protocol_k9dc" { - default = { - k9dc = "HTTP" - } -} - -variable "forward_protocol_neo4j" { - default = { - neo4j = "HTTPS" - } -} -variable "forward_protocol_bolt" { - default = { - neo4j = "HTTPS" - } -} -variable "listener_port_k9dc" { - default = { - ssl = 443 - } -} -variable "listener_port_neo4j" { - default = { - ssl = 7473 - } -} - -variable "listener_port_bolt" { - default = { - ssl = 7687 - } -} -variable "alb_rules_k9dc" { - type = "map" - default = { - k9dc = 80 - } -} - -variable "alb_rules_neo4j" { - type = "map" - default = { - neo4j = 7473 - } -} -variable "alb_rules_bolt" { - type = "map" - default = { - bolt = 7687 - } -} -variable "health_check_k9dc" { - default = { - port = 80 - path = "/" - } -} -variable "health_check_neo4j" { - default = { - port = 7474 - path = "/" - } -} - -variable "health_check_bolt" { - default = { - port = 7474 - path = "/" - } -} - -variable "k9dc_name" { - default = "k9dc" -} -variable "neo4j_name" { - default = "neo4j" -} -variable "domain" { - default = "essential-dev.com" -} -variable "extra_userdata_merge" { - default = "list(append)+dict(recurse_array)+str()" - description = "Control how cloud-init merges user-data sections" -} -variable "environment" { - default = "sandbox" -} diff --git a/terraform/icdc/modules/alb/main.tf b/terraform/icdc/modules/alb/main.tf deleted file mode 100644 index 07bbab285..000000000 --- a/terraform/icdc/modules/alb/main.tf +++ /dev/null @@ -1,50 +0,0 @@ - - -#Create ALB -resource "aws_alb" "alb" { - name = "${var.alb_name}" - subnets = ["${var.alb_subnets}"] - security_groups = ["${var.alb_security_groups}"] - internal = "${var.internal_alb}" - idle_timeout = "${var.idle_timeout}" - load_balancer_type = "application" - enable_deletion_protection = false - tags { - Name = "${var.alb_name}" - } - access_logs { - bucket = "${var.s3_bucket_name}" - prefix = "icdc_alb" - enabled = true - } -} -#create https redirect -resource "aws_alb_listener" "alb_listener_redirect_https" { - load_balancer_arn = "${aws_alb.alb.arn}" - port = "80" - protocol = "HTTP" - - default_action { - type = "redirect" - redirect { - port = "443" - protocol = "HTTPS" - status_code = "HTTP_301" - } - } -} -resource "aws_alb_listener" "alb_listener_redirect_bolt" { - load_balancer_arn = "${aws_alb.alb.arn}" - port = "7474" - protocol = "HTTP" - - default_action { - type = "redirect" - redirect { - port = "7473" - protocol = "HTTPS" - status_code = "HTTP_301" - } - } -} - diff --git a/terraform/icdc/modules/alb/outputs.tf b/terraform/icdc/modules/alb/outputs.tf deleted file mode 100644 index 734325728..000000000 --- a/terraform/icdc/modules/alb/outputs.tf +++ /dev/null @@ -1,9 +0,0 @@ -output "alb_arn" { - value = "${aws_alb.alb.arn}" -} -output "alb_dns" { - value = "${aws_alb.alb.dns_name}" -} -output "alb_zone_id" { - value = "${aws_alb.alb.zone_id}" -} \ No newline at end of file diff --git a/terraform/icdc/playbook/docker.yml b/terraform/icdc/playbook/docker.yml deleted file mode 100644 index 5bdc03399..000000000 --- a/terraform/icdc/playbook/docker.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: setup jenkins server - hosts: all - become: yes - - - roles: - - setup-docker diff --git a/terraform/icdc/playbook/jenkins.yml b/terraform/icdc/playbook/jenkins.yml deleted file mode 100644 index c112d1f81..000000000 --- a/terraform/icdc/playbook/jenkins.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -- name: setup jenkins server - hosts: all - become: yes - - - roles: - - setup-docker - - setup-jenkins diff --git a/terraform/icdc/roles/setup-jenkins/templates/docker-compose.yml.j2 b/terraform/icdc/roles/setup-jenkins/templates/docker-compose.yml.j2 deleted file mode 100644 index decd6ba1f..000000000 --- a/terraform/icdc/roles/setup-jenkins/templates/docker-compose.yml.j2 +++ /dev/null @@ -1,37 +0,0 @@ -version: '3.1' -services: - jenkins: - image: k9dc/jenkins - ports: - - 80:8080 - - 5001:5000 - volumes: - - /local/jenkins:/var/jenkins_home - secrets: - - jenkinsAdmin - - bearer - - vdonkor - - neo4j - - sshkey - environment: - - CASC_JENKINS_CONFIG=https://raw.githubusercontent.com/vdonkor/ctn/master/jenkins.yaml - - DOCKER_AGENT_IP={{ docker_agent_ip }} - - TOMCAT01_IP={{ tomcat01_ip }} - - TOMCAT02_IP={{ tomcat02_ip }} - - NEO4J_IP={{ neo4j_ip }} - - SLACK_URL={{ slack_url }} - restart: always - -secrets: - jenkinsAdmin: - file: /local/secret/jenkinsAdmin - vdonkor: - file: /local/secret/vdonkor - bearer: - file: /local/secret/bearer - neo4j: - file: /local/secret/neo4j - sshkey: - file: /local/secret/sshkey - - diff --git a/terraform/modules/backend/s3/main.tf b/terraform/modules/backend/s3/main.tf new file mode 100644 index 000000000..947a20150 --- /dev/null +++ b/terraform/modules/backend/s3/main.tf @@ -0,0 +1,8 @@ +terraform { + backend "s3" { + bucket = var.remote_state_bucket + key = var.remote_state_key + region = "us-east-1" + encrypt = true + } +} diff --git a/terraform/modules/backend/s3/outputs.tf b/terraform/modules/backend/s3/outputs.tf new file mode 100644 index 000000000..e69de29bb diff --git a/terraform/modules/backend/s3/variables.tf b/terraform/modules/backend/s3/variables.tf new file mode 100644 index 000000000..f6948cfea --- /dev/null +++ b/terraform/modules/backend/s3/variables.tf @@ -0,0 +1,7 @@ +variable "remote_state_bucket" { + description = "name of the remote state bucket" + type = string +} +variable "remote_state_key" { + description = "name of the path to the terraform state" +} \ No newline at end of file diff --git a/terraform/modules/cloudfront/blocked.py b/terraform/modules/cloudfront/blocked.py new file mode 100644 index 000000000..9cf6234fb --- /dev/null +++ b/terraform/modules/cloudfront/blocked.py @@ -0,0 +1,156 @@ +import datetime +import boto3 +import json +import os +from io import StringIO +from notification import send_blocked_ips_report + +# list holding blocked ips +block_ip_file_name = os.getenv("BLOCK_IP_FILE_NAME") +waf_scope = os.getenv("WAF_SCOPE") +s3_bucket_name = os.getenv("S3_BUCKET_NAME") + +#get previous date +previous_date = datetime.datetime.today() - datetime.timedelta(days=1) + +#temporal file use to reconstruct list of blocked ip +local_blocked_ip_file_name = os.getenv("TMP_FILE_NAME") +#s3 key prefix since files are stored in days +key_prefix = '{:04d}'.format(previous_date.year) + "/" + '{:02d}'.format(previous_date.month) + "/" + '{:02d}'.format(previous_date.day) + +session = boto3.session.Session() +s3 = session.resource('s3') +wafv2 = session.client('wafv2', region_name='us-east-1') + +#get name of waf rule that contains blocked ip list +name_of_blocked_ip_list = os.getenv("IP_SETS_NAME") + + +#this function was used to create a test ip sets. +def create_blocked_ip_list(name, ips: list): + response = wafv2.create_ip_set( + Name=name, + Scope=waf_scope, + Description="List of ip blocked from cloudfront for violating files download limit", + IPAddressVersion="IPV4", + Addresses=ips + ) + return response + +#retrieve list of ips being blocked by waf previously. This takes name of the ip sets +def get_blocked_ip_list(name): + id_blocked_ip_list = get_blocked_ip_list_id(name) + response = wafv2.get_ip_set( + Name=name, + Scope=waf_scope, + Id=id_blocked_ip_list + ) + addresses = response['IPSet']['Addresses'] + lock_token = response['LockToken'] + return lock_token,addresses + +#update list of ip sets returned above with ones +def update_blocked_ip_list(name, ip_addresses: list): + id_blocked_ip_list = get_blocked_ip_list_id(name) + lock_token, blocked_ips = get_blocked_ip_list(name) + for i in ip_addresses: + blocked_ips.append(i + '/32') + wafv2.update_ip_set( + Name=name, + Scope=waf_scope, + Id=id_blocked_ip_list, + Addresses=blocked_ips, + LockToken=lock_token + ) + + +#get id of the ip set by passing in the name of the ip set +def get_blocked_ip_list_id(name): + response = wafv2.list_ip_sets( + Scope=waf_scope + ) + ip_sets = response['IPSets'] + for ip_set in ip_sets: + if ip_set['Name'] == name: + return ip_set['Id'] + + +#function to read s3 object, it takes bucket name and key +def read_s3_object(bucket_name, key): + s3_read_object = s3.Object(bucket_name, key) + content = s3_read_object.get()['Body'].read().decode('utf-8') + data = StringIO(content) + return data.readlines() + +# extract blocked ip from s3 object +def read_blocked_ips(bucket_name, key): + ips = [] + file_contents = read_s3_object(bucket_name, key) + for content in file_contents: + content = json.loads(content) + if not content["httpRequest"]["clientIp"]: + continue + ips.append(content["httpRequest"]["clientIp"]) + return ips + +#update list of blocked in key in s3 +def write_blocked_ips(bucket_name, key, blocked_ips: list): + s3_write_object = s3.Object(bucket_name, key) + for ip in blocked_ips: + s3_write_object.put(Body=str(ip).encode('ascii')) + +#helper function to get content of s3 object +def read_current_files(bucket_name, prefix): + s3_files = [] + s3_objects = s3.Bucket(bucket_name) + for s3_object in s3_objects.objects.filter(Prefix=prefix): + s3_files.append(s3_object.key) + return s3_files + +#get list of all recently blocked ip that are not in ip sets +def get_newly_blocked_ips(bucket_name, prefix): + blocked_ips = [] + files = read_current_files(bucket_name, prefix) + for s3_key in files: + blocked_ips.extend(read_blocked_ips(s3_bucket_name, s3_key)) + return list(set(blocked_ips)) + +#helper function to process list variable +def remove_item(items: list, item): + for i in items: + if i == item: + items.remove(i) + return items + +#this is the function that processes blocked ips from log stream +def get_all_blocked_ips(bucket_name, key, prefix, file_name): + black_listed_ips = [] + all_blocked_ips = read_s3_object(bucket_name, key) + if all_blocked_ips: + all_blocked_ips = [i.strip() for i in all_blocked_ips if not i == '\n' or i == ''] + new_blocked_ips = get_newly_blocked_ips(bucket_name, prefix) + all_blocked_ips.extend(new_blocked_ips) + for ip in all_blocked_ips: + if all_blocked_ips.count(ip) >= 2: + black_listed_ips.append(ip) + all_blocked_ips = remove_item(all_blocked_ips, ip) + if len(all_blocked_ips) >= 1: + with open(file_name, 'w') as f: + for line in all_blocked_ips: + f.write(line + '\n') + return black_listed_ips + +#upload file to s3 +def upload_blocked_ips(bucket_name, key, file_name): + s3.meta.client.upload_file(file_name, bucket_name, key) + if os.path.exists(file_name): + os.remove(file_name) + +#main lambda entry +def handler(event, context): + waf_blocked_ips = get_all_blocked_ips(s3_bucket_name, block_ip_file_name, key_prefix, local_blocked_ip_file_name) + upload_blocked_ips(s3_bucket_name, block_ip_file_name, local_blocked_ip_file_name) + update_blocked_ip_list(name_of_blocked_ip_list, waf_blocked_ips) + if waf_blocked_ips: + send_blocked_ips_report(waf_blocked_ips) + diff --git a/terraform/modules/cloudfront/cloudwatch.tf b/terraform/modules/cloudfront/cloudwatch.tf new file mode 100644 index 000000000..5593a4ea8 --- /dev/null +++ b/terraform/modules/cloudfront/cloudwatch.tf @@ -0,0 +1,62 @@ +resource "aws_cloudwatch_metric_alarm" "cloudfront_alarm" { + for_each = var.alarms + alarm_name = "${var.stack_name}-${var.env}-${each.key}-cloudfront-alarm" + comparison_operator = "GreaterThanOrEqualToThreshold" + evaluation_periods = "5" + metric_name = each.value["name"] + namespace = "AWS/CloudFront" + period = "60" + statistic = "Sum" + threshold = each.value["threshold"] + alarm_description = "CloudFront alarm for ${each.value["name"]}" + insufficient_data_actions = [] + dimensions = { + DistributionId = aws_cloudfront_distribution.bento_distribution.id + Region = "Global" + } + alarm_actions = [aws_sns_topic.cloudfront_alarm_topic.arn] + ok_actions = [aws_sns_topic.cloudfront_alarm_topic.arn] +} + + +resource "aws_sns_topic" "cloudfront_alarm_topic" { + name = "${var.stack_name}-${var.env}-cloudfront-4xx-5xx-errors" + delivery_policy = < 30) + transition { + storage_class = "STANDARD_IA" + days = var.s3_object_standard_ia_transition_days + } + noncurrent_version_transition { + days = var.s3_object_nonactive_expiration_days - 30 > 30 ? 30 : var.s3_object_nonactive_expiration_days + 30 + storage_class = "STANDARD_IA" + } + } + lifecycle_rule { + id = "expire_objects" + enabled = true + expiration { + days = var.s3_object_expiration_days + } + noncurrent_version_expiration { + days = var.s3_object_nonactive_expiration_days + } + } +} diff --git a/terraform/modules/fargate/data.tf b/terraform/modules/fargate/data.tf new file mode 100644 index 000000000..fba0cb2e7 --- /dev/null +++ b/terraform/modules/fargate/data.tf @@ -0,0 +1,119 @@ +data "aws_caller_identity" "current" {} + +data "aws_vpc" "vpc" { + id = var.vpc_id +} + + +data "aws_acm_certificate" "cert" { + domain = var.certificate_domain_name + types = [local.cert_types] + most_recent = true +} + +data "aws_iam_policy_document" "s3_policy" { + statement { + sid = "allowalbaccount" + effect = "Allow" + principals { + identifiers = ["arn:aws:iam::${lookup(var.aws_account_id,var.region,"us-east-1" )}:root"] + type = "AWS" + } + actions = ["s3:PutObject"] + resources = ["arn:aws:s3:::${local.alb_s3_bucket_name}/*"] + } + statement { + sid = "allowalblogdelivery" + effect = "Allow" + principals { + identifiers = ["delivery.logs.amazonaws.com"] + type = "Service" + } + actions = ["s3:PutObject"] + resources = ["arn:aws:s3:::${local.alb_s3_bucket_name}/*"] + condition { + test = "StringEquals" + values = ["bucket-owner-full-control"] + variable = "s3:x-amz-acl" + } + } + statement { + sid = "awslogdeliveryacl" + effect = "Allow" + actions = ["s3:GetBucketAcl"] + resources = ["arn:aws:s3:::${local.alb_s3_bucket_name}"] + principals { + identifiers = ["delivery.logs.amazonaws.com"] + type = "Service" + } + } +} + + +data "aws_iam_policy_document" "task_execution_policy" { + statement { + actions = ["sts:AssumeRole"] + effect = "Allow" + principals { + identifiers = ["ecs-tasks.amazonaws.com"] + type = "Service" + } + } +} + + +data "aws_iam_policy_document" "task_logs_policy" { + statement { + actions = [ + "logs:CreateLogGroup" + ] + effect = "Allow" + resources = [ + "arn:aws:logs:*:*:*" + ] + } +} + +data "aws_iam_policy_document" "ecs_policy_doc" { + statement { + effect = "Allow" + actions = ["ecs:*"] + resources = ["*"] + } + statement { + effect = "Allow" + actions = [ + "ecr:*" + ] + resources = ["*"] + } + statement { + effect = "Allow" + actions = [ + "ssm:*" + ] + resources = ["*"] + } + statement { + effect = "Allow" + actions = [ + "s3:*" + ] + resources = ["*"] + } + statement { + effect = "Allow" + actions = [ + "es:*" + ] + resources = ["*"] + } + statement { + effect = "Allow" + actions = [ + "elasticache:*" + ] + resources = ["*"] + } + +} \ No newline at end of file diff --git a/terraform/modules/fargate/ecs.tf b/terraform/modules/fargate/ecs.tf new file mode 100644 index 000000000..94a955996 --- /dev/null +++ b/terraform/modules/fargate/ecs.tf @@ -0,0 +1,213 @@ +#task definition +resource "aws_ecs_task_definition" "task" { + for_each = var.microservices + family = "${var.stack_name}-${var.env}-${each.value.name}" + network_mode = var.ecs_network_mode + requires_compatibilities = ["FARGATE"] + cpu = each.value.cpu + memory = each.value.memory + execution_role_arn = aws_iam_role.task_execution_role.arn + task_role_arn = aws_iam_role.task_role.arn + container_definitions = jsonencode([ + { + name = each.value.name + image = each.value.image_url + essential = true + portMappings = [ + { + protocol = "tcp" + containerPort = each.value.port +# hostPort = var.microservice_port + } + ] + }]) + tags = merge( + { + "Name" = format("%s-%s-%s-%s",var.stack_name,var.env,each.value.name,"task-definition") + }, + var.tags, + ) +} +#ecs service +resource "aws_ecs_service" "service" { + for_each = var.microservices + name = "${var.stack_name}-${var.env}-${each.value.name}" + cluster = aws_ecs_cluster.ecs_cluster.id + task_definition = aws_ecs_task_definition.task[each.key].arn + desired_count = var.number_container_replicas + launch_type = var.ecs_launch_type + scheduling_strategy = var.ecs_scheduling_strategy + deployment_minimum_healthy_percent = 50 + deployment_maximum_percent = 200 + deployment_circuit_breaker { + enable = true + rollback = true + } + network_configuration { + security_groups = [aws_security_group.app_sg.id,aws_security_group.fargate_sg.id] + subnets = var.private_subnet_ids + assign_public_ip = false + } + load_balancer { + target_group_arn = aws_lb_target_group.target_group[each.key].arn + container_name = each.value.name + container_port = each.value.port + } + lifecycle { + ignore_changes = [task_definition, desired_count] + } +} + + +#create ecs cluster +resource "aws_appautoscaling_target" "microservice_autoscaling_target" { + for_each = var.microservices + max_capacity = 5 + min_capacity = 1 + resource_id = "service/${aws_ecs_cluster.ecs_cluster.name}/${aws_ecs_service.service[each.key].name}" + scalable_dimension = "ecs:service:DesiredCount" + service_namespace = "ecs" +} + +resource "aws_appautoscaling_policy" "microservice_autoscaling_cpu" { + for_each = var.microservices + name = "cpu-autoscaling" + policy_type = "TargetTrackingScaling" + resource_id = aws_appautoscaling_target.microservice_autoscaling_target[each.key].resource_id + scalable_dimension = aws_appautoscaling_target.microservice_autoscaling_target[each.key].scalable_dimension + service_namespace = aws_appautoscaling_target.microservice_autoscaling_target[each.key].service_namespace + + target_tracking_scaling_policy_configuration { + predefined_metric_specification { + predefined_metric_type = "ECSServiceAverageCPUUtilization" + } + + target_value = 80 + } +} + +resource "aws_ecs_cluster" "ecs_cluster" { + name = "${var.stack_name}-${var.env}-ecs" + tags = merge( + { + "Name" = format("%s-%s",var.stack_name,"ecs-cluster") + }, + var.tags, + ) + +} + +resource "aws_security_group" "fargate_sg" { + name = "${var.stack_name}-${var.env}-fargate-sg" + vpc_id = var.vpc_id + tags = merge( + { + "Name" = format("%s-%s-fargate-sg",var.stack_name,var.env), + }, + var.tags, + ) +} + +resource "aws_security_group_rule" "all_outbound_fargate" { + from_port = local.any_port + protocol = local.any_protocol + to_port = local.any_port + cidr_blocks = local.all_ips + security_group_id = aws_security_group.fargate_sg.id + type = "egress" +} + +resource "aws_security_group_rule" "inbound_fargate" { + for_each = toset(var.fargate_security_group_ports) + from_port = each.key + protocol = local.tcp_protocol + to_port = each.key + security_group_id = aws_security_group.fargate_sg.id + cidr_blocks = [data.aws_vpc.vpc.cidr_block] + type = "ingress" +} + +resource "aws_security_group" "app_sg" { + name = "${var.stack_name}-${var.env}-app-sg" + vpc_id = var.vpc_id + tags = merge( + { + "Name" = format("%s-%s-frontend-sg",var.stack_name,var.env), + }, + var.tags, + ) +} + +resource "aws_security_group_rule" "inbound_alb" { + for_each = var.microservices + from_port = each.value.port + protocol = local.tcp_protocol + to_port = each.value.port + security_group_id = aws_security_group.app_sg.id + source_security_group_id = aws_security_group.alb-sg.id + type = "ingress" +} + + + +resource "aws_security_group_rule" "all_outbound_frontend" { + from_port = local.any_port + protocol = local.any_protocol + to_port = local.any_port + cidr_blocks = local.all_ips + security_group_id = aws_security_group.app_sg.id + type = "egress" +} + +#create alb target group +resource "aws_lb_target_group" "target_group" { + for_each = var.microservices + name = "${var.stack_name}-${var.env}-${each.value.name}" + port = each.value.port + protocol = "HTTP" + vpc_id = var.vpc_id + target_type = var.alb_target_type + stickiness { + type = "lb_cookie" + cookie_duration = 1800 + enabled = true + } + health_check { + path = each.value.health_check_path + protocol = "HTTP" + matcher = "200" + port = each.value.port + interval = 45 + timeout = 30 + healthy_threshold = 2 + unhealthy_threshold = 2 + } + tags = merge( + { + "Name" = format("%s-%s",var.stack_name,"${each.value.name}-alb-target-group") + }, + var.tags, + ) +} + +resource "aws_lb_listener_rule" "alb_listener" { + for_each = var.microservices + listener_arn = aws_lb_listener.listener_https.arn + priority = each.value.priority_rule_number + action { + type = "forward" + target_group_arn = aws_lb_target_group.target_group[each.key].arn + } + + condition { + host_header { + values = [var.microservice_url] + } + } + condition { + path_pattern { + values = [each.value.path] + } + } +} + diff --git a/terraform/modules/fargate/locals.tf b/terraform/modules/fargate/locals.tf new file mode 100644 index 000000000..4562f8697 --- /dev/null +++ b/terraform/modules/fargate/locals.tf @@ -0,0 +1,11 @@ +locals { + alb_s3_bucket_name = var.cloud_platform == "leidos" ? "${var.stack_name}-alb-${var.env}-access-logs" : "${var.stack_name}-${var.cloud_platform}-alb-${var.env}-access-logs" + http_port = 80 + any_port = 0 + any_protocol = "-1" + tcp_protocol = "tcp" + https_port = "443" + all_ips = ["0.0.0.0/0"] + account_arn = format("arn:aws:iam::%s:root", data.aws_caller_identity.current.account_id) + cert_types = var.cloud_platform == "leidos" ? "AMAZON_ISSUED" : "IMPORTED" +} diff --git a/terraform/modules/fargate/outputs.tf b/terraform/modules/fargate/outputs.tf new file mode 100644 index 000000000..b46ca9800 --- /dev/null +++ b/terraform/modules/fargate/outputs.tf @@ -0,0 +1,15 @@ +output "alb_dns_name" { + value = aws_lb.alb.dns_name + description = "ALB dns name" +} +output "alb_security_group_id" { + value = aws_security_group.alb-sg.id +} +output "alb_https_listener_arn" { + description = "https listerner arn" + value = aws_lb_listener.listener_https.arn +} +output "alb_zone_id" { + description = "https listerner arn" + value = aws_lb.alb.zone_id +} \ No newline at end of file diff --git a/terraform/modules/fargate/roles.tf b/terraform/modules/fargate/roles.tf new file mode 100644 index 000000000..0b808a438 --- /dev/null +++ b/terraform/modules/fargate/roles.tf @@ -0,0 +1,35 @@ +resource "aws_iam_role" "task_execution_role" { + name = "${var.stack_name}-${var.env}-task-execution-role" + assume_role_policy = data.aws_iam_policy_document.task_execution_policy.json +} + +resource "aws_iam_role" "task_role" { + name = "${var.stack_name}-${var.env}-task-role" + assume_role_policy = data.aws_iam_policy_document.task_execution_policy.json +} + +resource "aws_iam_role_policy_attachment" "task-execution-role-policy-attachment" { + role = aws_iam_role.task_execution_role.name + policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy" +} + +resource "aws_iam_policy" "ecs_log_policy" { + name = "${var.stack_name}-${var.env}-log-policy" + policy = data.aws_iam_policy_document.task_logs_policy.json +} + +resource "aws_iam_role_policy_attachment" "task-log-policy-attachment" { + role = aws_iam_role.task_execution_role.name + policy_arn = aws_iam_policy.ecs_log_policy.arn +} + + +resource "aws_iam_policy" "ecs_policy" { + name = "${var.stack_name}-${var.env}-ecs-policy" + policy = data.aws_iam_policy_document.ecs_policy_doc.json +} +resource "aws_iam_policy_attachment" "ecs_policy_attachment" { + name = "${var.stack_name}-${var.env}-ecs-policy-attachement" + policy_arn = aws_iam_policy.ecs_policy.arn + roles = [aws_iam_role.task_role.name] +} \ No newline at end of file diff --git a/terraform/modules/fargate/route53.tf b/terraform/modules/fargate/route53.tf new file mode 100644 index 000000000..2ee92d685 --- /dev/null +++ b/terraform/modules/fargate/route53.tf @@ -0,0 +1,16 @@ +data "aws_route53_zone" "zone" { + count = var.create_dns_record ? 1 : 0 + name = var.domain_name +} + +resource "aws_route53_record" "dns_record" { + count = var.create_dns_record ? 1 : 0 + name = "${var.app_sub_domain}-${var.env}" + type = "A" + zone_id = data.aws_route53_zone.zone[count.index].zone_id + alias { + evaluate_target_health = false + name = aws_lb.alb.dns_name + zone_id = aws_lb.alb.zone_id + } +} diff --git a/terraform/modules/fargate/variables.tf b/terraform/modules/fargate/variables.tf new file mode 100644 index 000000000..22398fce5 --- /dev/null +++ b/terraform/modules/fargate/variables.tf @@ -0,0 +1,202 @@ +variable "tags" { + description = "tags to associate with this instance" + type = map(string) +} +variable "stack_name" { + description = "name of the project" + type = string +} +variable "region" { + description = "aws region to deploy" + type = string + default = "us-east-1" +} +variable "profile" { + description = "iam user profile to use" + type = string + default = "default" +} +variable "s3_object_expiration_days" { + description = "number of days for object to live" + type = number + default = 720 +} +variable "s3_object_nonactive_expiration_days" { + description = "number of days to retain non active objects" + type = number + default = 90 +} +variable "s3_object_standard_ia_transition_days" { + description = "number of days for an object to transition to standard_ia storage class" + default = 120 + type = number +} +variable "s3_object_glacier_transition_days" { + description = "number of days for an object to transition to glacier storage class" + default = 180 + type = number +} + +variable "alb_name" { + description = "Name for the ALB" + type = string + default = "alb" +} +variable "create_alb" { + description = "choose to create alb or not" + type = bool + default = true +} +variable "lb_type" { + description = "Type of loadbalance" + type = string + default = "application" +} +variable "internal_alb" { + description = "is this alb internal?" + default = false + type = bool +} + +variable "ssl_policy" { + description = "specify ssl policy to use" + default = "ELBSecurityPolicy-2016-08" + type = string +} + +variable "default_message" { + description = "default message response from alb when resource is not available" + default = "The requested resource is not available" +} + +variable "domain_name" { + description = "domain name for the application" + type = string +} + +variable "public_subnet_ids" { + description = "Provide list of public subnets to use in this VPC. Example 10.0.1.0/24,10.0.2.0/24" + type = list(string) +} + +variable "private_subnet_ids" { + description = "Provide list private subnets to use in this VPC. Example 10.0.10.0/24,10.0.11.0/24" + type = list(string) +} + +variable "vpc_id" { + description = "VPC Id to to launch the ALB" + type = string +} + +#added frontend app name to accomodate ppdc-otg and ppdc-otp +variable "app_name" { + description = "it will be either otp or otg" + type = string +} + +variable "certificate_domain_name" { + description = "domain name for the ssl cert" + type = string +} + +variable "aws_account_id" { + type = map(string) + description = "aws account to allow for alb s3 logging" + default = { + us-east-1 = "127311923021" + } +} + +variable "ecs_launch_type" { + description = "ecs launch type - FARGATE or EC2" + type = string + default = "FARGATE" +} + + +variable "number_container_replicas" { + description = "specify the number of container to run" + type = number + default = 1 +} + +variable "ecs_scheduling_strategy" { + description = "ecs scheduling strategy" + type = string + default = "REPLICA" +} + +variable "app_sub_domain" { + description = "url of the app" + type = string + default = null +} + +variable "ecs_network_mode" { + description = "ecs network mode - bridge,host,awsvpc" + type = string + default = "awsvpc" +} + +variable "alb_target_type" { + type = string + description = "type of alb target - ip , instance, lambda" + default = "ip" +} +variable "create_app_ecr_registry" { + default = true + description = "create list of registry defined in app_ecr_registry_names" + type = bool +} + + +variable "microservice_port" { + type = number + description = "port on which microservice listens" + default = 80 +} +variable "microservice_url" { + description = "url of the application" + type = string +} +variable "env" { + description = "name of the environment to provision" + type = string +} +variable "alb_subnets" { + description = "list of subnets to use for the alb" + type = list(string) +} +variable "alb_allowed_ip_range" { + description = "allowed subnet range for alb" + type = list(string) +} +variable "create_dns_record" { + type = bool + description = "choose to create dns record" +} +variable "fargate_security_group_ports" { + type = list(string) + description = "list of ports to allow when using ECS" +} +variable "microservices" { + type = map(object({ + name = string + port = number + health_check_path = string + priority_rule_number = number + image_url = string + cpu = number + memory = number + path = string + })) +} +variable "cloud_platform" { + description = "choose platform to use" + type = string +} +variable "create_ecr" { + type = bool + description = "choose to create ecr or not" +} \ No newline at end of file diff --git a/terraform/modules/globals/ecr/main.tf b/terraform/modules/globals/ecr/main.tf new file mode 100644 index 000000000..6f75504a4 --- /dev/null +++ b/terraform/modules/globals/ecr/main.tf @@ -0,0 +1,46 @@ + +locals { + my_account = "${format("arn:aws:iam::%s:root", data.aws_caller_identity.account.account_id)}" +} + +data "aws_caller_identity" "account" { +} + +resource "aws_ecr_repository" "ecr" { + name = var.stack_name + tags = merge( + { + "Name" = format("%s-%s",var.stack_name,"ecr-registry") + }, + var.tags, + ) +} + +resource "aws_ecr_repository_policy" "ecr_policy" { + repository = aws_ecr_repository.ecr.name + policy = data.aws_iam_policy_document.ecr_policy_doc.json +} + +data "aws_iam_policy_document" "ecr_policy_doc" { + + statement { + sid = "ElasticContainerRegistryPushAndPull" + effect = "Allow" + + principals { + identifiers = [local.my_account] + type = "AWS" + } + actions = [ + "ecr:GetDownloadUrlForLayer", + "ecr:BatchGetImage", + "ecr:BatchCheckLayerAvailability", + "ecr:PutImage", + "ecr:InitiateLayerUpload", + "ecr:UploadLayerPart", + "ecr:CompleteLayerUpload", + ] + } +} + + diff --git a/terraform/modules/globals/ecr/outputs.tf b/terraform/modules/globals/ecr/outputs.tf new file mode 100644 index 000000000..e69de29bb diff --git a/terraform/modules/globals/ecr/variables.tf b/terraform/modules/globals/ecr/variables.tf new file mode 100644 index 000000000..409f24c01 --- /dev/null +++ b/terraform/modules/globals/ecr/variables.tf @@ -0,0 +1,14 @@ +variable "repo_name" { + type = string + description = "Name of the repository." +} + +variable "stack_name" { + description = "name of the project" + type = string +} +variable "tags" { + description = "tags for the vpc" + type = map(string) + default = {} +} \ No newline at end of file diff --git a/terraform/modules/networks/alb/data.tf b/terraform/modules/networks/alb/data.tf new file mode 100644 index 000000000..bde027d78 --- /dev/null +++ b/terraform/modules/networks/alb/data.tf @@ -0,0 +1,38 @@ +data "aws_caller_identity" "current" {} +//data "aws_iam_policy_document" "s3_policy" { +// statement { +// sid = "allowalbaccount" +// effect = "Allow" +// principals { +// identifiers = ["arn:aws:iam::${lookup(var.aws_account_id,var.region,"us-east-1" )}:root"] +// type = "AWS" +// } +// actions = ["s3:PutObject"] +// resources = ["arn:aws:s3:::${local.alb_s3_bucket_name}/*"] +// } +// statement { +// sid = "allowalblogdelivery" +// effect = "Allow" +// principals { +// identifiers = ["delivery.logs.amazonaws.com"] +// type = "Service" +// } +// actions = ["s3:PutObject"] +// resources = ["arn:aws:s3:::${local.alb_s3_bucket_name}/*"] +// condition { +// test = "StringEquals" +// values = ["bucket-owner-full-control"] +// variable = "s3:x-amz-acl" +// } +// } +// statement { +// sid = "awslogdeliveryacl" +// effect = "Allow" +// actions = ["s3:GetBucketAcl"] +// resources = ["arn:aws:s3:::${local.alb_s3_bucket_name}"] +// principals { +// identifiers = ["delivery.logs.amazonaws.com"] +// type = "Service" +// } +// } +//} diff --git a/terraform/modules/networks/alb/main.tf b/terraform/modules/networks/alb/main.tf new file mode 100644 index 000000000..0a4529f23 --- /dev/null +++ b/terraform/modules/networks/alb/main.tf @@ -0,0 +1,152 @@ +//locals { +// alb_s3_bucket_name = "${var.stack_name}-alb-${terraform.workspace}-access-logs" +//} +resource "aws_lb" "alb" { + + name = "${var.stack_name}-${var.alb_name}-${var.env}" + load_balancer_type = var.lb_type + subnets = var.subnets + security_groups = [aws_security_group.alb-sg.id] + +// access_logs { +// bucket = local.alb_s3_bucket_name +// prefix = "alb-logs" +// enabled = true +// } + + timeouts { + create = "10m" + } + + tags = merge( + { + "Name" = format("%s-%s", var.stack_name, var.env) + }, + var.tags, + ) +} + +#create alb security group + +resource "aws_security_group" "alb-sg" { + + name = "${var.stack_name}-${var.frontend_app_name}${var.env}-alb-sg" + vpc_id = var.vpc_id + tags = merge( + { + "Name" = format("%s-%s", var.stack_name, var.env) + }, + var.tags, + ) +} + +resource "aws_security_group_rule" "inbound_http" { + + from_port = local.http_port + protocol = local.tcp_protocol + to_port = local.http_port + cidr_blocks = local.all_ips + + security_group_id = aws_security_group.alb-sg.id + type = "ingress" +} + +resource "aws_security_group_rule" "inbound_https" { + + from_port = local.https_port + protocol = local.tcp_protocol + to_port = local.https_port + cidr_blocks = local.all_ips + + security_group_id = aws_security_group.alb-sg.id + type = "ingress" +} + +resource "aws_security_group_rule" "all_outbound" { + + from_port = local.any_port + protocol = local.any_protocol + to_port = local.any_port + cidr_blocks = local.all_ips + + security_group_id = aws_security_group.alb-sg.id + type = "egress" +} + +#create https redirect +resource "aws_lb_listener" "redirect_https" { + + load_balancer_arn = aws_lb.alb.arn + port = local.http_port + protocol = "HTTP" + default_action { + type = "redirect" + redirect { + port = local.https_port + protocol = "HTTPS" + status_code = "HTTP_301" + } + } +} + +resource "aws_lb_listener" "listener_https" { + + load_balancer_arn = aws_lb.alb.arn + port = local.https_port + protocol = "HTTPS" + ssl_policy = var.ssl_policy + certificate_arn = var.certificate_arn + default_action { + type = "fixed-response" + + fixed_response { + content_type = "text/plain" + message_body = var.default_message + status_code = "200" + } + } +} + +locals { + http_port = 80 + any_port = 0 + any_protocol = "-1" + tcp_protocol = "tcp" + https_port = "443" + all_ips = ["0.0.0.0/0"] +} + +//resource "aws_s3_bucket" "alb_logs_bucket" { +// bucket = local.alb_s3_bucket_name +// acl = "private" +// policy = data.aws_iam_policy_document.s3_policy.json +// server_side_encryption_configuration { +// rule { +// apply_server_side_encryption_by_default { +// sse_algorithm = "AES256" +// } +// } +// } +// lifecycle_rule { +// id = "transition_to_standard_ia" +// enabled = (var.s3_object_expiration_days - var.s3_object_standard_ia_transition_days > 30) +// transition { +// storage_class = "STANDARD_IA" +// days = var.s3_object_standard_ia_transition_days +// } +// noncurrent_version_transition { +// days = var.s3_object_nonactive_expiration_days - 30 > 30 ? 30 : var.s3_object_nonactive_expiration_days + 30 +// storage_class = "STANDARD_IA" +// } +// } +// lifecycle_rule { +// id = "expire_objects" +// enabled = true +// expiration { +// days = var.s3_object_expiration_days +// } +// noncurrent_version_expiration { +// days = var.s3_object_nonactive_expiration_days +// } +// } +//} diff --git a/terraform/modules/networks/alb/outputs.tf b/terraform/modules/networks/alb/outputs.tf new file mode 100644 index 000000000..b46ca9800 --- /dev/null +++ b/terraform/modules/networks/alb/outputs.tf @@ -0,0 +1,15 @@ +output "alb_dns_name" { + value = aws_lb.alb.dns_name + description = "ALB dns name" +} +output "alb_security_group_id" { + value = aws_security_group.alb-sg.id +} +output "alb_https_listener_arn" { + description = "https listerner arn" + value = aws_lb_listener.listener_https.arn +} +output "alb_zone_id" { + description = "https listerner arn" + value = aws_lb.alb.zone_id +} \ No newline at end of file diff --git a/terraform/modules/networks/alb/variables.tf b/terraform/modules/networks/alb/variables.tf new file mode 100644 index 000000000..99f4e906a --- /dev/null +++ b/terraform/modules/networks/alb/variables.tf @@ -0,0 +1,92 @@ +variable "alb_name" { + description = "Name for the ALB" + type = string + default = "alb" +} +variable "create_alb" { + description = "choose to create alb or not" + type = bool + default = true +} +variable "lb_type" { + description = "Type of loadbalance" + type = string + default = "application" +} +variable "internal_alb" { + description = "is this alb internal?" + default = false + type = bool +} +variable "subnets" { + description = "subnets to associate with this ALB" + type = list(string) +} +variable "tags" { + description = "tags to label this ALB" + type = map(string) + default = {} +} +variable "stack_name" { + description = "Name of the project" + type = string +} +variable "ssl_policy" { + description = "specify ssl policy to use" + default = "ELBSecurityPolicy-2016-08" + type = string +} +variable "default_message" { + description = "default message response from alb when resource is not available" + default = "The request resource is not available" +} +variable "certificate_arn" { + description = "certificate arn to use for the https listner" + type = string +} +variable "vpc_id" { + description = "VPC Id to to launch the ALB" + type = string +} +variable "env" { + description = "environment" + type = string +} + +#added frontend app name to accomodate ppdc-otg and ppdc-otp +variable "frontend_app_name" { + description = "it will be either otp or otg" + type = string + default = "" +} +//variable "s3_object_expiration_days" { +// description = "number of days for object to live" +// type = number +// default = 720 +//} +//variable "s3_object_nonactive_expiration_days" { +// description = "number of days to retain non active objects" +// type = number +// default = 90 +//} +//variable "s3_object_standard_ia_transition_days" { +// description = "number of days for an object to transition to standard_ia storage class" +// default = 120 +// type = number +//} +//variable "s3_object_glacier_transition_days" { +// description = "number of days for an object to transition to glacier storage class" +// default = 180 +// type = number +//} +variable "aws_account_id" { + type = map(string) + description = "aws account to allow for alb s3 logging" + default = { + us-east-1 = "127311923021" + } +} +variable "region" { + type = string + description = "AWS region" +} \ No newline at end of file diff --git a/terraform/modules/networks/nlb/main.tf b/terraform/modules/networks/nlb/main.tf new file mode 100644 index 000000000..0c047327a --- /dev/null +++ b/terraform/modules/networks/nlb/main.tf @@ -0,0 +1,70 @@ +resource "aws_lb" "alb" { + name = "${var.stack_name}-${var.nlb_name}" + load_balancer_type = var.lb_type + enable_cross_zone_load_balancing = "true" + subnets = var.subnets + timeouts { + create = "10m" + } + + tags = merge( + { + "Name" = format("%s-%s",var.stack_name,var.nlb_name) + }, + var.tags, + ) +} + +resource "aws_lb_listener" "tcp" { + + load_balancer_arn = aws_lb.alb.arn + port = var.nlb_listener_port + protocol = local.tcp_protocol + default_action { + type = "forward" + target_group_arn = aws_lb_target_group.target.arn + } +} + +resource "aws_lb_target_group" "target" { + name = "${var.stack_name}-nlb-target-group" + port = var.nlb_listener_port + protocol = local.tcp_protocol + vpc_id = var.vpc_id + target_type = "ip" + deregistration_delay = var.deregistration_delay + + health_check { + protocol = local.tcp_protocol + interval = var.health_check_interval + healthy_threshold = 2 + unhealthy_threshold = 2 + } + + tags = merge( + { + "Name" = format("%s-%s",var.stack_name,"ecs-nlb-target") + }, + var.tags, + ) +} + +# get nlb private ips +data "aws_network_interface" "nlb_ips" { + count = length(var.subnets) + + filter { + name = "description" + values = ["ELB ${aws_lb.alb.arn_suffix}"] + } + + filter { + name = "subnet-id" + values = [element(var.subnets, count.index)] + } +} + +locals { + tcp_protocol = "TCP" +} + diff --git a/terraform/modules/networks/nlb/outputs.tf b/terraform/modules/networks/nlb/outputs.tf new file mode 100644 index 000000000..ab103a029 --- /dev/null +++ b/terraform/modules/networks/nlb/outputs.tf @@ -0,0 +1,14 @@ +output "nlb_dns_name" { + value = aws_lb.alb.*.dns_name + description = "ALB dns name" +} +output "nlb_tcp_listener_arn" { + description = "nlb listerner arn" + value = aws_lb_listener.tcp.arn +} +output "nlb_target_group_arn" { + value = aws_lb_target_group.target.arn +} +output "nlb_ips" { + value = flatten(data.aws_network_interface.nlb_ips.*.private_ips) +} \ No newline at end of file diff --git a/terraform/modules/networks/nlb/variables.tf b/terraform/modules/networks/nlb/variables.tf new file mode 100644 index 000000000..e055c4949 --- /dev/null +++ b/terraform/modules/networks/nlb/variables.tf @@ -0,0 +1,56 @@ +variable "nlb_name" { + description = "Name for the ALB" + type = string + default = "nlb" +} +variable "create_alb" { + description = "choose to create alb or not" + type = bool + default = true +} +variable "lb_type" { + description = "Type of loadbalance" + type = string + default = "network" +} +variable "internal_alb" { + description = "is this alb internal?" + default = true + type = bool +} +variable "subnets" { + description = "subnets to associate with this ALB" + type = list(string) +} +variable "tags" { + description = "tags to label this ALB" + type = map(string) + default = {} +} +variable "stack_name" { + description = "Name of the project" + type = string +} + +variable "vpc_id" { + description = "VPC Id to to launch the ALB" + type = string +} +variable "nlb_listener_port" { + description = "load balance port to listen traffic on " + type = number +} +variable "default_message" { + description = "default message on nlb listerner" + default = "This is a NLB listener underconstruction" + type = string +} +variable "deregistration_delay" { + description = "number of seconds it takes to stop sending traffic to instance taken out of service" + default = 90 + type = number +} +variable "health_check_interval" { + description = "how often to check livelininess of the instances" + default = "30" +} \ No newline at end of file diff --git a/terraform/modules/networks/vpc/main.tf b/terraform/modules/networks/vpc/main.tf new file mode 100644 index 000000000..3ff11dfe0 --- /dev/null +++ b/terraform/modules/networks/vpc/main.tf @@ -0,0 +1,213 @@ +#create vpc +resource "aws_vpc" "vpc" { + cidr_block = var.vpc_cidr_block + enable_dns_hostnames = var.enable_hostname_dns + enable_dns_support = var.enable_dns_support + tags = merge( + { + "Name" = format("%s-%s-%s",var.stack_name,var.env,"vpc") + }, + var.tags, + var.custom_vpc_tags, + ) +} +#create internet gateway +resource "aws_internet_gateway" "igw" { + #create igw only if a new vpc is to be created and a public subnet is specified + count = var.create_vpc && length(var.public_subnets) > 0 ? 1 : 0 + vpc_id = aws_vpc.vpc.id + tags = merge( + { + "Name" = format("%s",var.stack_name) + }, + var.tags, + var.custom_nat_gateway_tags, + ) + +} +#create public route table +resource "aws_route_table" "public_route_table" { + #create public route table only if a new vpc is to be created and a public subnet is specified + count = var.create_vpc && length(var.public_subnets) > 0 ? 1 : 0 + vpc_id = aws_vpc.vpc.id + tags = merge( + { + "Name" = format("%s-public",var.stack_name) + }, + var.tags, + var.custom_public_route_table_tags, + ) +} + +#create public route +resource "aws_route" "public_route_internet" { + #create public route table only if a new vpc is to be created and a public subnet is specified + count = var.create_vpc && length(var.public_subnets) > 0 ? 1 : 0 + route_table_id = aws_route_table.public_route_table[count.index].id + destination_cidr_block = local.all_ips + gateway_id = aws_internet_gateway.igw[count.index].id + + timeouts { + create = "5m" + } +} +#create private route table +resource "aws_route_table" "private_route_table" { + count = var.create_vpc && local.max_subnet_length > 0 ? local.num_of_nat_gateway : 0 + vpc_id = aws_vpc.vpc.id + tags = merge( + { + "Name" = var.single_nat_gateway ? "${var.stack_name}-private" : format("%s-private-%s",var.stack_name,element(var.availability_zones,count.index )), + }, + var.tags, + var.custom_private_route_table_tags, + ) +} + +#create database route table +resource "aws_route_table" "database_route_table" { + count = var.create_vpc && var.create_db_subnet_group && length(var.private_subnets) > 0 ? 1 : 0 + vpc_id = aws_vpc.vpc.id + tags = merge( + { + "Name" = var.single_nat_gateway ? "${var.stack_name}-private" : format("%s-private-%s",var.stack_name,element(var.availability_zones,count.index )), + }, + var.tags, + var.custom_db_subnet_group_tags, + ) +} + +#create public subnets +resource "aws_subnet" "public_subnet" { + #use the number of subnets provided + count = var.create_vpc && length(var.public_subnets) > 0 && (!var.one_nat_gateway_per_az || length(var.public_subnets) >= length(var.availability_zones)) ? length(var.public_subnets) : 0 + cidr_block = var.public_subnets[count.index] + availability_zone = element(var.availability_zones,count.index ) + vpc_id = aws_vpc.vpc.id + + tags = merge( + { + "Name" = format("%s-public-%s",var.stack_name,element(var.availability_zones,count.index )), + }, + var.tags, + var.custom_public_subnet_tags, + ) +} +#create private subnets +resource "aws_subnet" "private_subnet" { + #create private subnets provided + count = var.create_vpc && length(var.private_subnets) > 0 ? length(var.private_subnets) : 0 + cidr_block = var.private_subnets[count.index] + availability_zone = element(var.availability_zones,count.index ) + vpc_id = aws_vpc.vpc.id + + tags = merge( + { + "Name" = format("%s-private-%s",var.stack_name,element(var.availability_zones,count.index )), + }, + var.tags, + var.custom_private_subnet_tags, + ) +} +#create database subnet +resource "aws_subnet" "db_subnet" { + #create db subnets provided + count = var.create_vpc && length(var.db_subnets) > 0 ? length(var.db_subnets) : 0 + cidr_block = var.db_subnets[count.index] + availability_zone = element(var.availability_zones,count.index ) + vpc_id = aws_vpc.vpc.id + + tags = merge( + { + "Name" = format("%s-database-%s",var.stack_name,element(var.availability_zones,count.index )), + }, + var.tags, + var.custom_db_subnet_tags, + ) +} +#create db subnet group to use with managed service like RDS +resource "aws_db_subnet_group" "db_subnet_group" { + count = var.create_vpc && length(var.db_subnets) > 0 && var.create_db_subnet_group ? 1 : 0 + name = "${var.stack_name}-${var.env}-${var.name_db_subnet_group}" + description = "${var.stack_name}-${var.env} database subnet group" + subnet_ids = aws_subnet.db_subnet.*.id + tags = merge( + { + "Name" = format("%s",var.stack_name), + }, + var.tags, + var.custom_db_subnet_tags, + ) +} + +resource "aws_eip" "nat" { + count = var.create_vpc && (var.enable_nat_gateway && !var.reuse_nat_ips) ? local.num_of_nat_gateway : 0 + vpc = true + tags = merge( + { + "Name" = format("%s-%s",var.stack_name,element(var.availability_zones,var.single_nat_gateway ? 0 : count.index )), + }, + var.tags, + var.custom_nat_gateway_tags, + ) +} + +resource "aws_nat_gateway" "aws_nat" { + count = var.create_vpc && var.enable_nat_gateway ? local.num_of_nat_gateway : 0 + allocation_id = element(local.nat_gateway_ips,(var.single_nat_gateway ? 0 : count.index)) + subnet_id = element(aws_subnet.public_subnet.*.id,(var.single_nat_gateway ? 0 : count.index) ) + tags = merge( + { + "Name" = format("%s-%s",var.stack_name,element(var.availability_zones,var.single_nat_gateway ? 0 : count.index )), + }, + var.tags, + var.custom_nat_gateway_tags, + var.custom_nat_gateway_tags, + ) + depends_on = [aws_internet_gateway.igw] +} + +resource "aws_route" "private_route_nat_gateway" { + count = var.create_vpc && var.enable_nat_gateway ? local.num_of_nat_gateway : 0 + + route_table_id = element(aws_route_table.private_route_table.*.id,count.index ) + destination_cidr_block = local.all_ips + nat_gateway_id = element(aws_nat_gateway.aws_nat.*.id,count.index ) + timeouts { + create = "5m" + } +} +resource "aws_route" "database_route_nat_gateway" { + count = var.create_vpc && var.create_db_subnet_group && length(var.db_subnets) > 0 ? 1 : 0 + route_table_id = aws_route_table.database_route_table[0].id + destination_cidr_block = local.all_ips + nat_gateway_id = element(aws_nat_gateway.aws_nat.*.id,count.index ) + + timeouts { + create = "5m" + } + +} +locals { + all_ips = "0.0.0.0/0" + max_subnet_length = max(length(var.private_subnets),length(var.db_subnets)) + num_of_nat_gateway = var.single_nat_gateway ? 1 : var.one_nat_gateway_per_az ? length(var.availability_zones) : local.max_subnet_length + nat_gateway_ips = split(",", (var.reuse_nat_ips ? join(",", var.external_nat_ip_ids) : join(",", aws_eip.nat.*.id))) +} +#route table association +resource "aws_route_table_association" "private-subnet" { + count = var.create_vpc && length(var.private_subnets) > 0 ? length(var.private_subnets) : 0 + route_table_id = element(aws_route_table.private_route_table.*.id,var.single_nat_gateway ? 0 : count.index ) + subnet_id = element(aws_subnet.private_subnet.*.id,count.index) +} +resource "aws_route_table_association" "public-subnet" { + count = var.create_vpc && length(var.public_subnets) > 0 ? length(var.public_subnets) : 0 + route_table_id = aws_route_table.public_route_table[0].id + subnet_id = element(aws_subnet.public_subnet.*.id,count.index) +} + +resource "aws_route_table_association" "db-subnet" { + count = var.create_vpc && length(var.db_subnets) > 0 ? length(var.db_subnets) : 0 + route_table_id = element(coalescelist(aws_route_table.database_route_table.*.id,aws_route_table.private_route_table.*.id),var.single_nat_gateway || var.create_db_subnet_group ? 0 : count.index) + subnet_id = element(aws_subnet.db_subnet.*.id,count.index) +} \ No newline at end of file diff --git a/terraform/modules/networks/vpc/outputs.tf b/terraform/modules/networks/vpc/outputs.tf new file mode 100644 index 000000000..2088bfb36 --- /dev/null +++ b/terraform/modules/networks/vpc/outputs.tf @@ -0,0 +1,24 @@ +output "vpc_id" { + description = "VPC Id" + value = aws_vpc.vpc.id +} + +output "private_subnets_ids" { + description = "private subnets ids" + value = aws_subnet.private_subnet.*.id +} + +output "public_subnets_ids" { + description = "public subnets ids" + value = aws_subnet.public_subnet.*.id +} + +output "database_subnets_ids" { + description = "database subnets ids" + value = aws_subnet.db_subnet.*.id +} + +output "vpc_cidr_block"{ + description="cidr block details of vpc" + value = aws_vpc.vpc.cidr_block +} \ No newline at end of file diff --git a/terraform/modules/networks/vpc/variables.tf b/terraform/modules/networks/vpc/variables.tf new file mode 100644 index 000000000..fbbed4fd4 --- /dev/null +++ b/terraform/modules/networks/vpc/variables.tf @@ -0,0 +1,159 @@ +variable "tags" { + description = "tags for the vpc" + type = map(string) +} +variable "vpc_cidr_block" { + description = "CIDR Block for this VPC. Example 10.0.0.0/16" + default = "10.0.0.0/16" + type = string +} +variable "stack_name" { + description = "Name of project. Example arp" + type = string + default = "main" +} +variable "custom_vpc_tags" { + description = "Custom tags for the vpc" + type = map(string) + default = {} +} +variable "custom_igw_tags" { + description = "Custom tags for the vpc" + type = map(string) + default = {} +} +variable "custom_private_tags" { + description = "Custom tags for the private subnet" + type = map(string) + default = {} +} +variable "custom_public_tags" { + description = "Custom tags for the public subnet" + type = map(string) + default = {} +} +variable "custom_db_tags" { + description = "Custom tags for the database subnet" + type = map(string) + default = {} +} +variable "custom_nat_gateway_tags" { + description = "Custom tags for the database subnet" + type = map(string) + default = {} +} +variable "custom_db_subnet_group_tags" { + description = "Custom tags for the database subnet group" + type = map(string) + default = {} +} +variable "enable_hostname_dns" { + description = "use true or false to determine support for hostname dns" + type = bool + default = true +} +variable "instance_tenancy" { + description = "instances tenancy option. Options are dedicated or default" + default = "default" + type = string +} + +variable "public_subnets" { + description = "Provide list of public subnets to use in this VPC. Example 10.0.1.0/24,10.0.2.0/24" + default = [] + type = list(string) +} + +variable "private_subnets" { + description = "Provide list private subnets to use in this VPC. Example 10.0.10.0/24,10.0.11.0/24" + default = [] + type = list(string) +} + +variable "db_subnets" { + description = "Provide list database subnets to use in this VPC. Example 10.0.20.0/24,10.0.21.0/24" + type = list(string) + default = [] +} +variable "create_vpc" { + description = "Use true or false to determine if a new vpc is to be created" + type = bool + default = true +} +variable "env" { + description = "specify environment for this vpc" + type = string + default = "" +} +variable "single_nat_gateway" { + description = "Choose as to wherether you want single Nat Gateway for the environments or multiple" + type = bool + default = true +} +variable "one_nat_gateway_per_az" { + description = "Choose as to wherether you want one Nat Gateway per availability zone or not" + type = bool + default = false +} +variable "availability_zones" { + description = "list of availability zones to use" + type = list(string) + default = [] +} +variable "create_db_subnet_group" { + description = "Set to true if you want to create database subnet group for RDS" + type = bool + default = true +} +variable "name_db_subnet_group" { + default = "db-subnet" + type = string + description = "name of the db subnet group" +} + +variable "reuse_nat_ips" { + description = "Choose wherether you want EIPs to be created or not" + type = bool + default = false +} + +variable "external_nat_ip_ids" { + description = "List of EIP to be assigned to the NAT Gateways if you don't want to don't want to reuse existing EIP" + type = list(string) + default = [] +} +variable "enable_nat_gateway" { + description = "choose as to provision NAT Gateways for each of your private subnets" + type = bool + default = true +} +variable "custom_public_route_table_tags" { + description = "Custom tags for the vpc" + type = map(string) + default = {} +} +variable "custom_private_route_table_tags" { + description = "Custom tags for the vpc" + type = map(string) + default = {} +} +variable "custom_private_subnet_tags" { + description = "Custom tags for the vpc" + type = map(string) + default = {} +} +variable "custom_public_subnet_tags" { + description = "Custom tags for the vpc" + type = map(string) + default = {} +} +variable "custom_db_subnet_tags" { + description = "Custom tags for the vpc" + type = map(string) + default = {} +} +variable "enable_dns_support" { + description = "enable dns resolution" + type = bool + default = true +} diff --git a/terraform/modules/new-relic/apm/main.tf b/terraform/modules/new-relic/apm/main.tf new file mode 100644 index 000000000..b4093326d --- /dev/null +++ b/terraform/modules/new-relic/apm/main.tf @@ -0,0 +1,113 @@ + +locals { + policy_name = var.policy_name == null ? "${var.application_name}: APM Monitoring" : var.policy_name +} + +resource "newrelic_alert_policy" "policy" { + name = local.policy_name + incident_preference = var.incident_preference + channel_ids = var.channel_ids +} + +resource "newrelic_nrql_alert_condition" "apdex_condition" { + policy_id = newrelic_alert_policy.policy.id + + name = "Apdex (Low)" + type = "static" + runbook_url = var.runbook_url + enabled = true + + warning { + operator = "below" + threshold = var.apdex_warning_threshold + threshold_duration = var.apdex_duration + threshold_occurrences = "ALL" + } + + critical { + operator = "below" + threshold = var.apdex_critical_threshold + threshold_duration = var.apdex_duration + threshold_occurrences = "ALL" + } + + nrql { + query = "SELECT apdex(duration, t: ${var.apdex_t}) FROM Transaction WHERE appName = '${var.application_name}' AND accountId = ${var.account_id}" + evaluation_offset = 3 + } + + value_function = "single_value" + violation_time_limit = "ONE_HOUR" +} + +resource "newrelic_nrql_alert_condition" "error_rate_condition" { + policy_id = newrelic_alert_policy.policy.id + + name = "Error rate (High)" + type = "static" + runbook_url = var.runbook_url + enabled = true + + critical { + operator = "above" + threshold = var.error_rate_critical_threshold + threshold_duration = var.error_rate_duration + threshold_occurrences = "ALL" + } + + warning { + operator = "above" + threshold = var.error_rate_warning_threshold + threshold_duration = var.error_rate_duration + threshold_occurrences = "ALL" + } + + nrql { + query = "SELECT percentage(count(*), WHERE error IS TRUE) FROM Transaction WHERE appName = '${var.application_name}' AND accountId = ${var.account_id}" + evaluation_offset = 3 + } + + value_function = "single_value" + violation_time_limit = "ONE_HOUR" +} + +resource "newrelic_synthetics_monitor" "synthetics_monitor" { + count = var.application_url == null ? 0 : 1 + + name = "${var.application_name}: SIMPLE" + type = "SIMPLE" + frequency = var.synthetics_monitor_frequency + status = "ENABLED" + locations = var.synthetics_monitor_locations + + uri = var.application_url + validation_string = var.synthetics_monitor_validation_string + verify_ssl = var.synthetics_monitor_verify_ssl +} + + +resource "newrelic_nrql_alert_condition" "synthetics_condition" { + count = var.application_url == null ? 0 : 1 + + policy_id = newrelic_alert_policy.policy.id + + name = "Synthetics monitor failure" + type = "static" + runbook_url = var.runbook_url + enabled = true + + critical { + operator = "above" + threshold = var.synthetics_condition_threshold + threshold_duration = var.synthetics_condition_duration + threshold_occurrences = "ALL" + } + + nrql { + query = "SELECT count(*) FROM SyntheticCheck WHERE result != 'SUCCESS' WHERE monitorId = '${newrelic_synthetics_monitor.synthetics_monitor[0].id}'" + evaluation_offset = 3 + } + + value_function = "single_value" + violation_time_limit = "ONE_HOUR" +} \ No newline at end of file diff --git a/terraform/modules/new-relic/apm/outputs.tf b/terraform/modules/new-relic/apm/outputs.tf new file mode 100644 index 000000000..e69de29bb diff --git a/terraform/modules/new-relic/apm/variables.tf b/terraform/modules/new-relic/apm/variables.tf new file mode 100644 index 000000000..d9a8b9e5d --- /dev/null +++ b/terraform/modules/new-relic/apm/variables.tf @@ -0,0 +1,118 @@ + +variable "application_name" { + description = "The name of the New Relic application to monitor" + type = string +} + +variable "account_id" { + description = "The account ID the application reports to" + type = number +} + +variable "policy_name" { + description = "The name of the alert policy to manage" + type = string + default = null +} + +variable "runbook_url" { + description = "A URL that points to a runbook for when this application is failing" + type = string + default = null +} + +variable "incident_preference" { + description = "The rollup strategy of the alert policy. Valid values are PER_POLICY, PER_CONDITION, and PER_CONDITION_AND_TARGET" + type = string + default = "PER_POLICY" +} + +variable "channel_ids" { + description = "The notification channel IDs to link to this alert policy" + type = list(number) + default = null +} + +variable "apdex_warning_threshold" { + description = "The threshold below which a warning violation will be triggered for the Apdex condition (percentage satisfied users)" + type = number + default = 0.8 +} + +variable "apdex_critical_threshold" { + description = "The threshold below which a critical violation will be triggered for the Apdex condition (percentage satisfied users)" + type = number + default = 0.7 +} + +variable "apdex_duration" { + description = "The evaluation window length of the Apdex condition (seconds). Value must be a multiple of 60 and within 120-3600 seconds for baseline conditions and 120-7200 seconds for static conditions." + type = number + default = 300 +} + +variable "apdex_t" { + description = "The response time above which a transaction is considered tolerable" + type = number + default = 0.4 +} + +variable "error_rate_warning_threshold" { + description = "The threshold above which a warning violation will be triggered for the error rate condition (errors/minute)" + type = number + default = 2 +} + +variable "error_rate_critical_threshold" { + description = "The threshold above which a critical violation will be triggered for the error rate condition (errors/minute)" + type = number + default = 5 +} + +variable "error_rate_duration" { + description = "The evaluation window length of the error rate condition (seconds). Value must be a multiple of 60 and within 120-3600 seconds for baseline conditions and 120-7200 seconds for static conditions." + type = number + default = 300 +} + +variable "application_url" { + description = "The URL to use when configuring a Synthetics monitor for this application" + type = string + default = null +} + +variable "synthetics_monitor_frequency" { + description = "The interval on which to run Synthetics checks against the provided application URL" + type = number + default = 5 +} + +variable "synthetics_monitor_locations" { + description = "The locations to run Synthetics checks from" + type = list(string) + default = ["AWS_US_EAST_1"] +} + +variable "synthetics_monitor_validation_string" { + description = "An optional string to check existence of when running Synthetics checks" + type = string + default = null +} + +variable "synthetics_monitor_verify_ssl" { + description = "If true, verifies SSL when running Synthetics checks" + type = bool + default = false +} + +variable "synthetics_condition_threshold" { + description = "The threshold above which a critical violation will be triggered for the Synthetics condition (failure count)" + type = number + default = 0 +} + +variable "synthetics_condition_duration" { + description = "The evaluation window length of the Synthetics condition (seconds). Value must be a multiple of 60 and within 120-3600 seconds for baseline conditions and 120-7200 seconds for static conditions." + type = number + default = 300 +} diff --git a/terraform/modules/new-relic/infrastructure/main.tf b/terraform/modules/new-relic/infrastructure/main.tf new file mode 100644 index 000000000..0f2e92506 --- /dev/null +++ b/terraform/modules/new-relic/infrastructure/main.tf @@ -0,0 +1,101 @@ + +resource "newrelic_alert_policy" "alert_policy" { + name = var.alert_policy_name #"ppdc_disk_utilization" + incident_preference = var.incident_preference #"PER_CONDITION" #var.incident_preference +} + +# Creates an email alert channel. +resource "newrelic_alert_channel" "email_channel" { + name = var.email_channel #"ppdc_email_channel" + type = "email" + + config { + recipients = var.recipients #"foo@example.com" # need to check for the alias + include_json_attachment = "1" + } +} + +# Creates a Slack alert channel. +resource "newrelic_alert_channel" "slack_channel" { + name = var.slack_channel #"ppdc-monitoring" + type = "slack" + + config { + channel = var.slack_channel_name #"ppdc_monitoring" + url = var.slack_url #"https://join.slack.com/share/zt-tcihyl3b-N7orX2KeHwZZU_5fW1kN1g" + } +} + + + +# Applies the created channels above to the alert policy +# referenced at the top of the config. +resource "newrelic_alert_policy_channel" "newrelic_alert_notification_channel" { + policy_id = newrelic_alert_policy.alert_policy.id + channel_ids = [ + newrelic_alert_channel.email_channel.id, + newrelic_alert_channel.slack_channel.id + ] +} + +resource "newrelic_infra_alert_condition" "high_disk_usage" { + policy_id = newrelic_alert_policy.alert_policy.id + + name = "High disk usage" + description = "Warning if disk usage goes above 80% and critical alert if goes above 90%" + type = "infra_metric" + event = "StorageSample" + select = "diskUsedPercent" + comparison = "above" + where = var.host_condition #"(hostname LIKE '%ppdc*frontend%')" + + critical { + duration = 25 + value = 90 + time_function = "all" + } + + warning { + duration = 10 + value = 80 + time_function = "all" + } +} + +resource "newrelic_infra_alert_condition" "cpu_percent_utilization" { + policy_id = newrelic_alert_policy.alert_policy.id + + name = "High disk usage" + description = "Warning if disk usage goes above 80% and critical alert if goes above 90%" + type = "infra_metric" + event = "StorageSample" + select = "cpuPercent" + comparison = "below" + where = var.host_condition #"(hostname LIKE '%ppdc*frontend%')" + + critical { + duration = 2 + value = 10 + time_function = "all" + } + + warning { + duration = 2 + value = 20 + time_function = "all" + } +} + +resource "newrelic_infra_alert_condition" "host_not_reporting" { + policy_id = newrelic_alert_policy.alert_policy.id + + name = "Host not reporting" + description = "Critical alert when the host is not reporting" + type = "infra_host_not_reporting" + where = var.host_condition #"(hostname LIKE '%frontend%')" + + critical { + duration = 5 + } +} + diff --git a/terraform/modules/new-relic/infrastructure/outputs.tf b/terraform/modules/new-relic/infrastructure/outputs.tf new file mode 100644 index 000000000..e69de29bb diff --git a/terraform/modules/new-relic/infrastructure/variables.tf b/terraform/modules/new-relic/infrastructure/variables.tf new file mode 100644 index 000000000..110cd39cc --- /dev/null +++ b/terraform/modules/new-relic/infrastructure/variables.tf @@ -0,0 +1,31 @@ +variable "alert_policy_name" { + default = "" +} +variable "incident_preference" { + default = "" + type = string +} +variable "email_channel" { + type = string + default = null +} +variable "recipients" { + type = string + default = null +} +variable "slack_channel" { + type = string + default = null +} +variable "slack_channel_name" { + type = string + default = null +} +variable "slack_url" { + type = string + default = null +} +variable "host_condition" { + type = string + default = null +} diff --git a/terraform/modules/opensearch/data.tf b/terraform/modules/opensearch/data.tf new file mode 100644 index 000000000..c3dd4ca02 --- /dev/null +++ b/terraform/modules/opensearch/data.tf @@ -0,0 +1,5 @@ +data "aws_region" "region" {} + +data "aws_caller_identity" "caller" {} + + diff --git a/terraform/modules/opensearch/main.tf b/terraform/modules/opensearch/main.tf new file mode 100644 index 000000000..23aae11c6 --- /dev/null +++ b/terraform/modules/opensearch/main.tf @@ -0,0 +1,77 @@ +locals { + http_port = 80 + any_port = 0 + any_protocol = "-1" + tcp_protocol = "tcp" + https_port = "443" + all_ips = ["0.0.0.0/0"] + domain_name = "${var.stack_name}-${var.env}-elasticsearch" +} + +resource "aws_security_group" "es" { + name = "${var.stack_name}-${var.env}-elasticsearch-sg" + vpc_id = var.vpc_id + + ingress { + from_port = local.https_port + to_port = local.https_port + protocol = local.tcp_protocol + cidr_blocks = var.allowed_subnet_ip_block + } +} + +resource "aws_security_group_rule" "all_outbound" { + from_port = local.any_port + protocol = local.any_protocol + to_port = local.any_port + cidr_blocks = local.all_ips + + security_group_id = aws_security_group.es.id + type = "egress" +} + +resource "aws_iam_service_linked_role" "es" { + count = var.create_es_service_role ? 1: 0 + aws_service_name = "es.amazonaws.com" +} + +resource "aws_elasticsearch_domain" "es" { + domain_name = local.domain_name + elasticsearch_version = var.elasticsearch_version + vpc_options { + subnet_ids = [element(var.private_subnet_ids,0)] + security_group_ids = [aws_security_group.es.id] + } + + ebs_options { + ebs_enabled = true + volume_size = 120 + } + + access_policies = < 0 + db_subnet_group_name_provided = var.db_subnet_group_name != null && var.db_subnet_group_name != "" + + db_subnet_group_name = local.db_subnet_group_name_provided ? var.db_subnet_group_name : ( + local.subnet_ids_provided ? join("", aws_db_subnet_group.default.*.name) : null + ) + + availability_zone = var.multi_az ? null : var.availability_zone +} + +resource "aws_db_instance" "default" { + + identifier = var.identifier + name = var.database_name + username = var.database_user + password = var.database_password + port = var.database_port + engine = var.engine + engine_version = var.engine_version + character_set_name = var.charset_name + instance_class = var.instance_class + allocated_storage = var.allocated_storage + max_allocated_storage = var.max_allocated_storage + storage_encrypted = var.storage_encrypted + kms_key_id = var.kms_key_arn + + vpc_security_group_ids = compact( + concat( + [join("", aws_security_group.default.*.id)], + var.associate_security_group_ids + ) + ) + + db_subnet_group_name = local.db_subnet_group_name + availability_zone = local.availability_zone + + ca_cert_identifier = var.ca_cert_identifier + parameter_group_name = length(var.parameter_group_name) > 0 ? var.parameter_group_name : join("", aws_db_parameter_group.default.*.name) + option_group_name = length(var.option_group_name) > 0 ? var.option_group_name : join("", aws_db_option_group.default.*.name) + license_model = var.license_model + multi_az = var.multi_az + storage_type = var.storage_type + iops = var.iops + publicly_accessible = var.publicly_accessible + snapshot_identifier = var.snapshot_identifier + allow_major_version_upgrade = var.allow_major_version_upgrade + auto_minor_version_upgrade = var.auto_minor_version_upgrade + apply_immediately = var.apply_immediately + maintenance_window = var.maintenance_window + skip_final_snapshot = var.skip_final_snapshot + copy_tags_to_snapshot = var.copy_tags_to_snapshot + backup_retention_period = var.backup_retention_period + backup_window = var.backup_window + tags = var.tags + deletion_protection = var.deletion_protection + + iam_database_authentication_enabled = var.iam_database_authentication_enabled + enabled_cloudwatch_logs_exports = var.enabled_cloudwatch_logs_exports + performance_insights_enabled = var.performance_insights_enabled + performance_insights_kms_key_id = var.performance_insights_enabled ? var.performance_insights_kms_key_id : null + performance_insights_retention_period = var.performance_insights_enabled ? var.performance_insights_retention_period : null + + monitoring_interval = var.monitoring_interval + monitoring_role_arn = var.monitoring_role_arn + + depends_on = [ + aws_db_subnet_group.default, + aws_security_group.default, + aws_db_parameter_group.default, + aws_db_option_group.default + ] + + lifecycle { + ignore_changes = [ + snapshot_identifier, # if created from a snapshot, will be non-null at creation, but null afterwards + ] + } +} + +resource "aws_db_parameter_group" "default" { + + + family = var.db_parameter_group + tags = var.tags + + dynamic "parameter" { + for_each = var.db_parameter + content { + apply_method = lookup(parameter.value, "apply_method", null) + name = parameter.value.name + value = parameter.value.value + } + } + + lifecycle { + create_before_destroy = true + } +} + +resource "aws_db_option_group" "default" { + + engine_name = var.engine + major_engine_version = local.major_engine_version + tags = var.tags + + dynamic "option" { + for_each = var.db_options + content { + db_security_group_memberships = lookup(option.value, "db_security_group_memberships", null) + option_name = option.value.option_name + port = lookup(option.value, "port", null) + version = lookup(option.value, "version", null) + vpc_security_group_memberships = lookup(option.value, "vpc_security_group_memberships", null) + + dynamic "option_settings" { + for_each = lookup(option.value, "option_settings", []) + content { + name = option_settings.value.name + value = option_settings.value.value + } + } + } + } + + lifecycle { + create_before_destroy = true + } +} + +resource "aws_db_subnet_group" "default" { + + name = var.db_subnet_id_name + subnet_ids = var.subnet_ids + tags = var.tags +} + +resource "aws_security_group" "default" { + + name = var.subnet_id_name + description = "Allow inbound traffic from the security groups" + vpc_id = var.vpc_id + tags = var.tags +} + +resource "aws_security_group_rule" "ingress_security_groups" { + count = length(var.security_group_ids) + description = "Allow inbound traffic from existing Security Groups" + type = "ingress" + from_port = var.database_port + to_port = var.database_port + protocol = "tcp" + source_security_group_id = var.security_group_ids[count.index] + security_group_id = join("", aws_security_group.default.*.id) +} + +resource "aws_security_group_rule" "ingress_cidr_blocks" { + + description = "Allow inbound traffic from CIDR blocks" + type = "ingress" + from_port = var.database_port + to_port = var.database_port + protocol = "tcp" + cidr_blocks = var.allowed_cidr_blocks + security_group_id = join("", aws_security_group.default.*.id) +} + +resource "aws_security_group_rule" "egress" { + description = "Allow all egress traffic" + type = "egress" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + security_group_id = join("", aws_security_group.default.*.id) +} + diff --git a/terraform/modules/rds/outputs.tf b/terraform/modules/rds/outputs.tf new file mode 100644 index 000000000..e4e9049d1 --- /dev/null +++ b/terraform/modules/rds/outputs.tf @@ -0,0 +1,45 @@ +output "instance_id" { + value = join("", aws_db_instance.default.*.id) + description = "ID of the instance" +} + +output "instance_arn" { + value = join("", aws_db_instance.default.*.arn) + description = "ARN of the instance" +} + +output "instance_address" { + value = join("", aws_db_instance.default.*.address) + description = "Address of the instance" +} + +output "instance_endpoint" { + value = join("", aws_db_instance.default.*.endpoint) + description = "DNS Endpoint of the instance" +} + +output "subnet_group_id" { + value = join("", aws_db_subnet_group.default.*.id) + description = "ID of the created Subnet Group" +} + +output "security_group_id" { + value = join("", aws_security_group.default.*.id) + description = "ID of the Security Group" +} + +output "parameter_group_id" { + value = join("", aws_db_parameter_group.default.*.id) + description = "ID of the Parameter Group" +} + +output "option_group_id" { + value = join("", aws_db_option_group.default.*.id) + description = "ID of the Option Group" +} + + +output "resource_id" { + value = join("", aws_db_instance.default.*.resource_id) + description = "The RDS Resource ID of this instance." +} \ No newline at end of file diff --git a/terraform/modules/rds/variable.tf b/terraform/modules/rds/variable.tf new file mode 100644 index 000000000..aa3b21011 --- /dev/null +++ b/terraform/modules/rds/variable.tf @@ -0,0 +1,346 @@ +variable "dns_zone_id" { + type = string + default = "" + description = "The ID of the DNS Zone in Route53 where a new DNS record will be created for the DB host name" +} + +variable "host_name" { + type = string + default = "db" + description = "The DB host name created in Route53" +} + +variable "security_group_ids" { + type = list(string) + default = [] + description = "The IDs of the security groups from which to allow `ingress` traffic to the DB instance" +} + +variable "allowed_cidr_blocks" { + type = list(string) + default = [] + description = "The whitelisted CIDRs which to allow `ingress` traffic to the DB instance" +} + +variable "associate_security_group_ids" { + type = list(string) + default = [] + description = "The IDs of the existing security groups to associate with the DB instance" +} + +variable "database_name" { + type = string + description = "The name of the database to create when the DB instance is created" +} + +variable "database_user" { + type = string + default = "" + description = "(Required unless a `snapshot_identifier` or `replicate_source_db` is provided) Username for the master DB user" +} + +variable "database_password" { + type = string + default = "" + description = "(Required unless a snapshot_identifier or replicate_source_db is provided) Password for the master DB user" +} + +variable "database_port" { + type = number + description = "Database port (_e.g._ `3306` for `MySQL`). Used in the DB Security Group to allow access to the DB instance from the provided `security_group_ids`" +} + +variable "deletion_protection" { + type = bool + description = "Set to true to enable deletion protection on the RDS instance" + default = false +} + +variable "multi_az" { + type = bool + description = "Set to true if multi AZ deployment must be supported" + default = false +} + +variable "storage_type" { + type = string + description = "One of 'standard' (magnetic), 'gp2' (general purpose SSD), or 'io1' (provisioned IOPS SSD)" + default = "standard" +} + +variable "storage_encrypted" { + type = bool + description = "(Optional) Specifies whether the DB instance is encrypted. The default is false if not specified" + default = true +} + +variable "iops" { + type = number + description = "The amount of provisioned IOPS. Setting this implies a storage_type of 'io1'. Default is 0 if rds storage type is not 'io1'" + default = 0 +} + +variable "allocated_storage" { + type = number + description = "The allocated storage in GBs" + default = null +} + +variable "max_allocated_storage" { + type = number + description = "The upper limit to which RDS can automatically scale the storage in GBs" + default = 0 +} + +variable "engine" { + type = string + description = "Database engine type" + default = null + # http://docs.aws.amazon.com/cli/latest/reference/rds/create-db-instance.html + # - mysql + # - postgres + # - oracle-* + # - sqlserver-* +} + +variable "engine_version" { + type = string + description = "Database engine version, depends on engine type" + # http://docs.aws.amazon.com/cli/latest/reference/rds/create-db-instance.html +} + +variable "major_engine_version" { + type = string + description = "Database MAJOR engine version, depends on engine type" + default = "" + # https://docs.aws.amazon.com/cli/latest/reference/rds/create-option-group.html +} + +variable "charset_name" { + type = string + description = "The character set name to use for DB encoding. [Oracle & Microsoft SQL only](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/db_instance#character_set_name). For other engines use `db_parameter`" + default = null +} + +variable "license_model" { + type = string + description = "License model for this DB. Optional, but required for some DB Engines. Valid values: license-included | bring-your-own-license | general-public-license" + default = "" +} + +variable "instance_class" { + type = string + description = "Class of RDS instance" + # https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html +} + +# This is for custom parameters to be passed to the DB +# We're "cloning" default ones, but we need to specify which should be copied +variable "db_parameter_group" { + type = string + description = "The DB parameter group family name. The value depends on DB engine used. See [DBParameterGroupFamily](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBParameterGroup.html#API_CreateDBParameterGroup_RequestParameters) for instructions on how to retrieve applicable value." + # "mysql5.6" + # "postgres9.5" +} + +variable "publicly_accessible" { + type = bool + description = "Determines if database can be publicly available (NOT recommended)" + default = false +} + +variable "subnet_ids" { + description = "List of subnet IDs for the DB. DB instance will be created in the VPC associated with the DB subnet group provisioned using the subnet IDs. Specify one of `subnet_ids`, `db_subnet_group_name` or `availability_zone`" + type = list(string) + default = [] +} + +variable "availability_zone" { + type = string + default = null + description = "The AZ for the RDS instance. Specify one of `subnet_ids`, `db_subnet_group_name` or `availability_zone`. If `availability_zone` is provided, the instance will be placed into the default VPC or EC2 Classic" +} + +variable "db_subnet_group_name" { + type = string + default = null + description = "Name of DB subnet group. DB instance will be created in the VPC associated with the DB subnet group. Specify one of `subnet_ids`, `db_subnet_group_name` or `availability_zone`" +} + +variable "vpc_id" { + type = string + description = "VPC ID the DB instance will be created in" +} + +variable "auto_minor_version_upgrade" { + type = bool + description = "Allow automated minor version upgrade (e.g. from Postgres 9.5.3 to Postgres 9.5.4)" + default = true +} + +variable "allow_major_version_upgrade" { + type = bool + description = "Allow major version upgrade" + default = false +} + +variable "apply_immediately" { + type = bool + description = "Specifies whether any database modifications are applied immediately, or during the next maintenance window" + default = false +} + +variable "maintenance_window" { + type = string + description = "The window to perform maintenance in. Syntax: 'ddd:hh24:mi-ddd:hh24:mi' UTC " + default = "Mon:03:00-Mon:04:00" +} + +variable "skip_final_snapshot" { + type = bool + description = "If true (default), no snapshot will be made before deleting DB" + default = true +} + +variable "copy_tags_to_snapshot" { + type = bool + description = "Copy tags from DB to a snapshot" + default = true +} + +variable "backup_retention_period" { + type = number + description = "Backup retention period in days. Must be > 0 to enable backups" + default = 0 +} + +variable "backup_window" { + type = string + description = "When AWS can perform DB snapshots, can't overlap with maintenance window" + default = "22:00-03:00" +} + +variable "db_parameter" { + type = list(object({ + apply_method = string + name = string + value = string + })) + default = [] + description = "A list of DB parameters to apply. Note that parameters may differ from a DB family to another" +} + +variable "db_options" { + type = list(object({ + db_security_group_memberships = list(string) + option_name = string + port = number + version = string + vpc_security_group_memberships = list(string) + + option_settings = list(object({ + name = string + value = string + })) + })) + + default = [] + description = "A list of DB options to apply with an option group. Depends on DB engine" +} + +variable "snapshot_identifier" { + type = string + description = "Snapshot identifier e.g: rds:production-2019-06-26-06-05. If specified, the module create cluster from the snapshot" + default = null +} + +variable "final_snapshot_identifier" { + type = string + description = "Final snapshot identifier e.g.: some-db-final-snapshot-2019-06-26-06-05" + default = "" +} + +variable "parameter_group_name" { + type = string + description = "Name of the DB parameter group to associate" + default = "" +} + +variable "option_group_name" { + type = string + description = "Name of the DB option group to associate" + default = "" +} + +variable "kms_key_arn" { + type = string + description = "The ARN of the existing KMS key to encrypt storage" + default = "" +} + +variable "performance_insights_enabled" { + type = bool + default = false + description = "Specifies whether Performance Insights are enabled." +} + +variable "performance_insights_kms_key_id" { + type = string + default = null + description = "The ARN for the KMS key to encrypt Performance Insights data. Once KMS key is set, it can never be changed." +} + +variable "performance_insights_retention_period" { + type = number + default = 7 + description = "The amount of time in days to retain Performance Insights data. Either 7 (7 days) or 731 (2 years)." +} + +variable "enabled_cloudwatch_logs_exports" { + type = list(string) + default = [] + description = "List of log types to enable for exporting to CloudWatch logs. If omitted, no logs will be exported. Valid values (depending on engine): alert, audit, error, general, listener, slowquery, trace, postgresql (PostgreSQL), upgrade (PostgreSQL)." +} + +variable "ca_cert_identifier" { + type = string + description = "The identifier of the CA certificate for the DB instance" + default = null +} + +variable "monitoring_interval" { + description = "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting Enhanced Monitoring metrics, specify 0. Valid Values are 0, 1, 5, 10, 15, 30, 60." + default = "0" +} + +variable "monitoring_role_arn" { + type = string + description = "The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to CloudWatch Logs" + default = null +} + +variable "iam_database_authentication_enabled" { + description = "Specifies whether or mappings of AWS Identity and Access Management (IAM) accounts to database accounts is enabled" + default = false +} + +variable "replicate_source_db" { + type = string + description = "Specifies that this resource is a Replicate database, and to use this value as the source database. This correlates to the `identifier` of another Amazon RDS Database to replicate (if replicating within a single region) or ARN of the Amazon RDS Database to replicate (if replicating cross-region). Note that if you are creating a cross-region replica of an encrypted database you will also need to specify a `kms_key_id`. See [DB Instance Replication](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.Replication.html) and [Working with PostgreSQL and MySQL Read Replicas](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_ReadRepl.html) for more information on using Replication." + default = null +} + +variable "identifier" { + default = "" +} +variable "tags" { + description = "tags to associate with this instance" + type = map(string) +} +variable "db_subnet_id_name" { + description = "db_subnet_id_name" + type = string +} +variable "subnet_id_name" { + default = "" +} \ No newline at end of file diff --git a/terraform/modules/static-site/api-gateway/main.tf b/terraform/modules/static-site/api-gateway/main.tf new file mode 100644 index 000000000..f6b605bbc --- /dev/null +++ b/terraform/modules/static-site/api-gateway/main.tf @@ -0,0 +1,201 @@ +resource "aws_api_gateway_rest_api" "api" { + name = var.api_gateway_name + description = "api gateway for bento frame work" +} + +resource "aws_api_gateway_resource" "bento_resource" { + rest_api_id = aws_api_gateway_rest_api.api.id + parent_id = aws_api_gateway_rest_api.api.root_resource_id + path_part = "icdc" +} + +resource "aws_api_gateway_resource" "api_resource" { + rest_api_id = aws_api_gateway_rest_api.api.id + parent_id = aws_api_gateway_rest_api.api.root_resource_id + path_part = "api" +} +resource "aws_api_gateway_method" "bento_method" { + rest_api_id = aws_api_gateway_rest_api.api.id + resource_id = aws_api_gateway_resource.bento_resource.id + http_method = "ANY" + authorization = "NONE" + + request_parameters = { + "method.request.path.proxy" = true + } + +} +resource "aws_api_gateway_method" "api_method" { + authorization = "NONE" + http_method = "ANY" + resource_id = aws_api_gateway_resource.api_resource.id + rest_api_id = aws_api_gateway_rest_api.api.id + + request_parameters = { + "method.request.path.proxy" = true + } + +} + +resource "aws_api_gateway_integration" "bento_integration" { + rest_api_id = aws_api_gateway_rest_api.api.id + resource_id = aws_api_gateway_resource.bento_resource.id + http_method = aws_api_gateway_method.bento_method.http_method + type = "HTTP_PROXY" + integration_http_method = "ANY" + + uri = "http://bento.essential-dev.com.s3-website-us-east-1.amazonaws.com/" + + passthrough_behavior = "WHEN_NO_MATCH" + request_parameters = { + "integration.request.path.proxy" = "method.request.path.proxy" + } + +} + + +resource "aws_api_gateway_method_response" "status_ok_bento" { + rest_api_id = aws_api_gateway_rest_api.api.id + resource_id = aws_api_gateway_resource.bento_resource.id + http_method = aws_api_gateway_method.bento_method.http_method + status_code = "200" + +} + +resource "aws_api_gateway_method_response" "status_ok_resource" { + rest_api_id = aws_api_gateway_rest_api.api.id + resource_id = aws_api_gateway_resource.api_resource.id + http_method = aws_api_gateway_method.api_method.http_method + status_code = "200" + +} + +resource "aws_api_gateway_integration" "proxy_integration" { + rest_api_id = aws_api_gateway_rest_api.api.id + resource_id = aws_api_gateway_resource.api_resource.id + http_method = aws_api_gateway_method.api_method.http_method + type = "HTTP_PROXY" + uri = "http://${join("",var.alb_dns_name)}/api" + //uri = "http://bento.essential-dev.com.s3-website-us-east-1.amazonaws.com/{proxy}" + integration_http_method = "ANY" + passthrough_behavior = "WHEN_NO_MATCH" + + cache_key_parameters = ["method.request.path.proxy"] + request_parameters = { + "integration.request.path.proxy" = "method.request.path.proxy" + } + +} + +resource "aws_api_gateway_integration_response" "status_ok_integration_bento" { + rest_api_id = aws_api_gateway_rest_api.api.id + resource_id = aws_api_gateway_resource.bento_resource.id + http_method = aws_api_gateway_method.bento_method.http_method + status_code = aws_api_gateway_method_response.status_ok_bento.status_code + +// response_templates = { +// "application/json" = "" +// } +} + +resource "aws_api_gateway_integration_response" "status_ok_integration_resource" { + rest_api_id = aws_api_gateway_rest_api.api.id + resource_id = aws_api_gateway_resource.api_resource.id + http_method = aws_api_gateway_method.api_method.http_method + status_code = aws_api_gateway_method_response.status_ok_resource.status_code + +} + +resource "aws_api_gateway_domain_name" "domain" { + domain_name = var.domain_name + certificate_arn = var.certificate_arn +} + +resource "aws_api_gateway_base_path_mapping" "base_path_map" { + api_id = aws_api_gateway_rest_api.api.id + domain_name = aws_api_gateway_domain_name.domain.domain_name + stage_name = aws_api_gateway_deployment.deployment.stage_name +} + +resource "aws_api_gateway_deployment" "deployment" { + depends_on = [ + aws_api_gateway_integration.bento_integration, + aws_api_gateway_integration.proxy_integration + ] + rest_api_id = aws_api_gateway_rest_api.api.id + stage_name = var.api_stage_name +} + +//#create s3 bucket to host website logs +//resource "aws_s3_bucket" "s3-icdc" { +// bucket = join(".",["api",var.domain_name]) +// tags = merge( +// { +// "Name" = format("%s",var.stack_name) +// }, +// var.tags, +// ) +//} +// +//#create policy document +//data "aws_iam_policy_document" "api_read_policy" { +// statement { +// sid = "apigwRead" +// actions = [ +// "s3:Get*", +// "s3:List*" +// ] +// resources = [ join("",[aws_s3_bucket.s3-icdc.arn,"/*"])] +// principals { +// type = "Service" +// identifiers = ["apigateway.amazonaws.com"] +// } +// effect = "Allow" +// +// } +// +//} +// +//resource "aws_s3_bucket_policy" "s3_api_read" { +// bucket = aws_s3_bucket.s3-icdc.id +// policy = data.aws_iam_policy_document.api_read_policy.json +//} +// +//data "aws_iam_policy_document" "s3_full_access_doc" { +// statement { +// sid = "apigwRead0" +// actions = [ +// "s3:*" +// ] +// resources = [ "*"] +// effect = "Allow" +// +// } +//} +// +//data "aws_iam_policy_document" "api_gateway_policy" { +// statement { +// actions = ["sts:AssumeRole"] +// effect = "Allow" +// principals { +// type = "Service" +// identifiers = ["apigateway.amazonaws.com"] +// } +// } +//} +//resource "aws_iam_policy" "s3_full_access_policy" { +// policy = data.aws_iam_policy_document.s3_full_access_doc.json +// name = join("-",[var.stack_name,"s3-full-access"]) +// description = "api s3 read access" +//} +//resource "aws_iam_role" "api_iam_role" { +// name = "${var.stack_name}-api-gateway-role" +// path = "/" +// assume_role_policy = data.aws_iam_policy_document.api_gateway_policy.json +//} +// +//resource "aws_iam_role_policy_attachment" "api-gateway-attach" { +// role = aws_iam_role.api_iam_role.name +// policy_arn = aws_iam_policy.s3_full_access_policy.arn +// +//} \ No newline at end of file diff --git a/terraform/modules/static-site/api-gateway/outputs.tf b/terraform/modules/static-site/api-gateway/outputs.tf new file mode 100644 index 000000000..cffe91150 --- /dev/null +++ b/terraform/modules/static-site/api-gateway/outputs.tf @@ -0,0 +1,3 @@ +output "api_domain_name" { + value = aws_api_gateway_domain_name.domain.domain_name +} diff --git a/terraform/modules/static-site/api-gateway/variables.tf b/terraform/modules/static-site/api-gateway/variables.tf new file mode 100644 index 000000000..78a3df0cf --- /dev/null +++ b/terraform/modules/static-site/api-gateway/variables.tf @@ -0,0 +1,36 @@ +variable "api_gateway_name" { + description = "name of the api gateway" + type = string +} +variable "s3_uri" { + description = "specify s3 uri" + type = string +} +variable "api_stage_name" { + description = "specify the name of api deployment stage" + type = string +} +variable "domain_name" { + description = "specify the domain name to use for this resource" + type = string +} +variable "certificate_arn" { + description = "certificate arn to use" + type = string +} +variable "tags" { + description = "tags for the vpc" + type = map(string) + default = {} +} +variable "stack_name" { + description = "name of the project" + type = string +} +variable "region" { + description = "aws region to use for resources" + type = string +} +variable "alb_dns_name" { + description = "alb dns name" +} \ No newline at end of file diff --git a/terraform/modules/static-site/nlb-api-gateway/main.tf b/terraform/modules/static-site/nlb-api-gateway/main.tf new file mode 100644 index 000000000..1bb54f490 --- /dev/null +++ b/terraform/modules/static-site/nlb-api-gateway/main.tf @@ -0,0 +1,81 @@ +resource "aws_api_gateway_vpc_link" "vpc_link" { + name = "${var.stack_name}-api-vpc-link" + description = "public api gateway to private network loadbalancer" + target_arns = var.target_arns +} + +resource "aws_api_gateway_rest_api" "api" { + name = var.api_gateway_name + endpoint_configuration { + types = var.endpoint_configuration + } +} + +resource "aws_api_gateway_resource" "api_resource" { + rest_api_id = aws_api_gateway_rest_api.api.id + parent_id = aws_api_gateway_rest_api.api.root_resource_id + path_part = "api" +} + +resource "aws_api_gateway_method" "api_method" { + rest_api_id = aws_api_gateway_rest_api.api.id + resource_id = aws_api_gateway_resource.api_resource.id + http_method = "ANY" + authorization = "NONE" + + request_parameters = { + "method.request.path.proxy" = true + } +} + +resource "aws_api_gateway_integration" "api_intergration" { + rest_api_id = aws_api_gateway_rest_api.api.id + resource_id = aws_api_gateway_resource.api_resource.id + http_method = aws_api_gateway_method.api_method.http_method + + type = "HTTP_PROXY" + integration_http_method = "ANY" + uri = "http://${var.nlb_dns_name}/" + connection_type = "VPC_LINK" + connection_id = aws_api_gateway_vpc_link.vpc_link.id + timeout_milliseconds = 29000 + + cache_key_parameters = ["method.request.path.proxy"] + request_parameters = { + "integration.request.path.proxy" = "method.request.path.proxy" + } +} + +resource "aws_api_gateway_method_response" "status_ok" { + rest_api_id = aws_api_gateway_rest_api.api.id + resource_id = aws_api_gateway_resource.api_resource.id + http_method = aws_api_gateway_method.api_method.http_method + status_code = "200" +} + +resource "aws_api_gateway_integration_response" "status_ok_intergration" { + rest_api_id = aws_api_gateway_rest_api.api.id + resource_id = aws_api_gateway_resource.api_resource.id + http_method = aws_api_gateway_method.api_method.http_method + status_code = aws_api_gateway_method_response.status_ok.status_code + +// response_templates = { +// "application/json" = "" +// } +} + +resource "aws_api_gateway_domain_name" "domain" { + domain_name = var.domain_name + certificate_arn = var.certificate_arn +} + +resource "aws_api_gateway_base_path_mapping" "base_path_map" { + api_id = aws_api_gateway_rest_api.api.id + domain_name = aws_api_gateway_domain_name.domain.domain_name + stage_name = aws_api_gateway_deployment.deployment.stage_name +} + +resource "aws_api_gateway_deployment" "deployment" { + rest_api_id = aws_api_gateway_rest_api.api.id + stage_name = var.api_stage_name +} diff --git a/terraform/modules/static-site/nlb-api-gateway/outputs.tf b/terraform/modules/static-site/nlb-api-gateway/outputs.tf new file mode 100644 index 000000000..0fed91978 --- /dev/null +++ b/terraform/modules/static-site/nlb-api-gateway/outputs.tf @@ -0,0 +1,2 @@ +output "api_gateway_endpoint" { +value = "https://${aws_api_gateway_domain_name.domain.domain_name}" \ No newline at end of file diff --git a/terraform/modules/static-site/nlb-api-gateway/variables.tf b/terraform/modules/static-site/nlb-api-gateway/variables.tf new file mode 100644 index 000000000..a5a38daf2 --- /dev/null +++ b/terraform/modules/static-site/nlb-api-gateway/variables.tf @@ -0,0 +1,42 @@ +variable "tags" { + description = "tags to label this ALB" + type = map(string) + default = {} +} +variable "stack_name" { + description = "Name of the project" + type = string +} +variable "target_arns" { + description = "specify target group arns" + type = list(string) +} +variable "endpoint_configuration" { + description = "specify the types of api endpoint configuration " + type = list(string) + default = ["EDGE"] +} +variable "api_gateway_name" { + description = "name of the api gateway" + type = string +} +variable "s3_uri" { + description = "specify s3 uri" + type = string +} +variable "api_stage_name" { + description = "specify the name of api deployment stage" + type = string +} +variable "domain_name" { + description = "specify the domain name to use for this resource" + type = string +} +variable "certificate_arn" { + description = "certificate arn to use" + type = string +} + +variable "nlb_dns_name" { + description = "alb dns name" +} \ No newline at end of file diff --git a/terraform/modules/static-site/s3/main.tf b/terraform/modules/static-site/s3/main.tf new file mode 100644 index 000000000..217a7fde0 --- /dev/null +++ b/terraform/modules/static-site/s3/main.tf @@ -0,0 +1,96 @@ +#create s3 bucket to host our site +resource "aws_s3_bucket" "s3_website" { + bucket = join(".",[var.app,var.domain]) + acl = "public-read" + force_destroy = true + + cors_rule { + allowed_headers = ["*"] + allowed_methods = ["HEAD", "GET", "POST"] + expose_headers = ["ETag"] + max_age_seconds = "0" + allowed_origins = ["*"] + } + website { + index_document = var.index_document + } + + logging { + target_bucket = aws_s3_bucket.website_logs.id + } + versioning { + enabled = true + } + tags = merge( + { + "Name" = format("%s-%s-%s",var.stack_name,var.app,"website") + }, + var.tags, + ) +} + + +#create a bucket to redirect http to https +resource "aws_s3_bucket" "redirect_http_https" { + bucket = join(".",["www",var.app,var.domain]) + + website { + redirect_all_requests_to = join("",["https://",var.app,".",var.domain]) + } + tags = merge( + { + "Name" = format("%s-%s-%s",var.stack_name,var.app,"redirect-website") + }, + var.tags, + ) +} + +#create s3 bucket to host website logs +resource "aws_s3_bucket" "website_logs" { + bucket = join("-",[var.domain,"logs"]) + acl = "log-delivery-write" + tags = merge( + { + "Name" = format("%s-%s-%s",var.stack_name,var.app,"logs") + }, + var.tags, + ) +} + + +#create policy document +data "aws_iam_policy_document" "public_read_policy" { + statement { + sid = "publicRead" + actions = ["s3:GetObject"] + resources = [ join("",[aws_s3_bucket.s3_website.arn,"/*"])] + principals { + type = "AWS" + identifiers = ["*"] + } + } + +} + +#create policy document for redirect bucket +data "aws_iam_policy_document" "public_read_redirect_policy" { + statement { + sid = "publicReadRedirect" + actions = ["s3:GetObject"] + resources = [ join("",[aws_s3_bucket.redirect_http_https.arn,"/*"])] + principals { + type = "*" + identifiers = ["*"] + } + } +} +#create s3 bucket policy +resource "aws_s3_bucket_policy" "s3_public_read" { + bucket = aws_s3_bucket.s3_website.id + policy = data.aws_iam_policy_document.public_read_policy.json + +} +resource "aws_s3_bucket_policy" "redirect_read_policy" { + bucket = aws_s3_bucket.redirect_http_https.id + policy = data.aws_iam_policy_document.public_read_redirect_policy.json +} \ No newline at end of file diff --git a/terraform/modules/static-site/s3/outputs.tf b/terraform/modules/static-site/s3/outputs.tf new file mode 100644 index 000000000..e69de29bb diff --git a/terraform/modules/static-site/s3/variables.tf b/terraform/modules/static-site/s3/variables.tf new file mode 100644 index 000000000..e85883583 --- /dev/null +++ b/terraform/modules/static-site/s3/variables.tf @@ -0,0 +1,25 @@ +variable index_document { + description = "name of the home page" + default = "index.html" +} +variable "error_document" { + description = "name of the error document" + default = "access_log" +} +variable app { + description = "name of the static website" + default = "" + type = string +} +variable "tags" { + description = "tags for the vpc" + type = map(string) + default = {} +} +variable "domain" { + description = "domain name for the website" +} +variable "stack_name" { + description = "name of the project" + type = string +} \ No newline at end of file diff --git a/terraform/monitoring/new-relic/ppdc/infra-monitoring-database.tf b/terraform/monitoring/new-relic/ppdc/infra-monitoring-database.tf new file mode 100644 index 000000000..21d2a11ee --- /dev/null +++ b/terraform/monitoring/new-relic/ppdc/infra-monitoring-database.tf @@ -0,0 +1,116 @@ +/*module "infrastructure" { + source = "../../../modules/new-relic/infrastructure" +} +*/ +/* +# Creates an email alert channel. +resource "newrelic_alert_channel" "email_channel" { + name = var.email_channel + type = "email" + + config { + recipients = var.recipients #"foo@example.com" # need to check for the alias + include_json_attachment = "1" + } +} + +# Creates a Slack alert channel. +resource "newrelic_alert_channel" "slack_channel" { + name = var.slack_channel + type = "slack" + + config { + channel = var.slack_channel_name + url = var.slack_url + } +} +*/ +resource "newrelic_alert_policy" "alert_policy_disk_utilization_db" { + name = "${var.stack_name}-${var.frontend_app_name}-frontend-${var.env}-DB-disk-utilization" #var.alert_policy_disk_utilization_name #"ppdc_disk_utilization" + incident_preference = var.incident_preference + channel_ids = [ + newrelic_alert_channel.email_channel.id, + newrelic_alert_channel.slack_channel.id + ] +} + +resource "newrelic_alert_policy" "alert_policy_cpu_usage_db" { + name = "${var.stack_name}-${var.frontend_app_name}-frontend-${var.env}-DB-cpu-usage" #var.alert_policy_cpu_usage_name #"ppdc_disk_utilization" + incident_preference = var.incident_preference + channel_ids = [ + newrelic_alert_channel.email_channel.id, + newrelic_alert_channel.slack_channel.id + ] +} + +resource "newrelic_alert_policy" "alert_policy_host_reporting_db" { + name = "${var.stack_name}-${var.frontend_app_name}-frontend-${var.env}-DB-host-not-reporting" #"ppdc_disk_utilization" + incident_preference = var.incident_preference + channel_ids = [ + newrelic_alert_channel.email_channel.id, + newrelic_alert_channel.slack_channel.id + ] +} + +resource "newrelic_infra_alert_condition" "high_disk_usage_db" { + policy_id = newrelic_alert_policy.alert_policy_disk_utilization_db.id + + name = "High disk usage" + description = "Warning if disk usage goes above 80% and critical alert if goes above 90%" + type = "infra_metric" + event = "StorageSample" + select = "diskUsedPercent" + comparison = "above" + where = "(hostname LIKE '%${var.stack_name}-${var.frontend_app_name}-frontend-${var.env}-database%')" + + critical { + duration = 25 + value = 90 + time_function = "all" + } + + warning { + duration = 10 + value = 80 + time_function = "all" + } +} + +resource "newrelic_infra_alert_condition" "cpu_percent_utilization_db" { + policy_id = newrelic_alert_policy.alert_policy_cpu_usage_db.id + + name = "High disk usage" + description = "Warning if disk usage goes above 80% and critical alert if goes above 90%" + type = "infra_metric" + event = "StorageSample" + select = "cpuPercent" + comparison = "below" + where = "(hostname LIKE '%${var.stack_name}-${var.frontend_app_name}-frontend-${var.env}-database%')" + + critical { + duration = 2 + value = 10 + time_function = "all" + } + + warning { + duration = 2 + value = 20 + time_function = "all" + } +} + +resource "newrelic_infra_alert_condition" "host_not_reporting_db" { + policy_id = newrelic_alert_policy.alert_policy_host_reporting_db.id + + name = "Host not reporting" + description = "Critical alert when the host is not reporting" + type = "infra_host_not_reporting" + where = "(hostname LIKE '%${var.stack_name}-${var.frontend_app_name}-frontend-${var.env}-database%')" + + critical { + duration = 5 + } +} + + diff --git a/terraform/monitoring/new-relic/ppdc/infra-monitoring-frontend.tf b/terraform/monitoring/new-relic/ppdc/infra-monitoring-frontend.tf new file mode 100644 index 000000000..d64ae80eb --- /dev/null +++ b/terraform/monitoring/new-relic/ppdc/infra-monitoring-frontend.tf @@ -0,0 +1,118 @@ +/*module "infrastructure" { + source = "../../../modules/new-relic/infrastructure" +} +*/ +# Creates an email alert channel. +resource "newrelic_alert_channel" "email_channel" { + name = var.email_channel + type = "email" + + config { + recipients = var.recipients #"foo@example.com" # need to check for the alias + include_json_attachment = "1" + } +} + +# Creates a Slack alert channel. +resource "newrelic_alert_channel" "slack_channel" { + name = var.slack_channel + type = "slack" + + config { + channel = var.slack_channel_name + url = var.slack_url + } +} + + +resource "newrelic_alert_policy" "alert_policy_disk_utilization" { + name = "${var.stack_name}-${var.frontend_app_name}-frontend-${var.env}-disk-utilization" #var.alert_policy_disk_utilization_name #"ppdc_disk_utilization" + incident_preference = var.incident_preference + channel_ids = [ + newrelic_alert_channel.email_channel.id, + newrelic_alert_channel.slack_channel.id + ] +} + +resource "newrelic_alert_policy" "alert_policy_cpu_usage" { + name = "${var.stack_name}-${var.frontend_app_name}-frontend-${var.env}-cpu-usage" #var.alert_policy_cpu_usage_name #"ppdc_disk_utilization" + incident_preference = var.incident_preference + channel_ids = [ + newrelic_alert_channel.email_channel.id, + newrelic_alert_channel.slack_channel.id + ] +} + +resource "newrelic_alert_policy" "alert_policy_host_reporting" { + name = "${var.stack_name}-${var.frontend_app_name}-frontend-${var.env}-host-not-reporting" #"ppdc_disk_utilization" + incident_preference = var.incident_preference + channel_ids = [ + newrelic_alert_channel.email_channel.id, + newrelic_alert_channel.slack_channel.id + ] +} + +resource "newrelic_infra_alert_condition" "high_disk_usage" { + policy_id = newrelic_alert_policy.alert_policy_disk_utilization.id + + name = "High disk usage" + description = "Warning if disk usage goes above 80% and critical alert if goes above 90%" + type = "infra_metric" + event = "StorageSample" + select = "diskUsedPercent" + comparison = "above" + where = "(hostname LIKE '%${var.stack_name}-${var.frontend_app_name}-frontend-${var.env}%')" + + + critical { + duration = 25 + value = 90 + time_function = "all" + } + + warning { + duration = 10 + value = 80 + time_function = "all" + } +} + +resource "newrelic_infra_alert_condition" "cpu_percent_utilization" { + policy_id = newrelic_alert_policy.alert_policy_cpu_usage.id + + name = "CPU Usage" + description = "Warning if CPU Utilization goes low" + type = "infra_metric" + event = "StorageSample" + select = "cpuPercent" + comparison = "below" + where = "(hostname LIKE '%${var.stack_name}-${var.frontend_app_name}-frontend-${var.env}%')" + + + critical { + duration = 2 + value = 10 + time_function = "all" + } + + warning { + duration = 2 + value = 20 + time_function = "all" + } +} + +resource "newrelic_infra_alert_condition" "host_not_reporting" { + policy_id = newrelic_alert_policy.alert_policy_host_reporting.id + + name = "Host not reporting" + description = "Critical alert when the host is not reporting" + type = "infra_host_not_reporting" + where = "(hostname LIKE '%${var.stack_name}-${var.frontend_app_name}-frontend-${var.env}%')" + + critical { + duration = 5 + } +} + + diff --git a/terraform/monitoring/new-relic/ppdc/main.tf b/terraform/monitoring/new-relic/ppdc/main.tf new file mode 100644 index 000000000..7f38cf42b --- /dev/null +++ b/terraform/monitoring/new-relic/ppdc/main.tf @@ -0,0 +1,36 @@ +# Configure terraform +/*terraform { + required_version = "~> 0.13.0" + required_providers { + newrelic = { + source = "terraform-providers/newrelic" + version = "~> 2.21.0" + } + } +}*/ +terraform { + required_version = ">= 0.13" +} +terraform { + required_providers { + newrelic = { + source = "newrelic/newrelic" + version = "2.21.0" + } + } +} + +# Configure the New Relic provider, values added to environment variable. +provider "newrelic" {} + +terraform { + backend "s3" { + bucket = "bento-terraform-remote-state" + key = "bento/ppdc/terraform.tfstate" + workspace_key_prefix = "env" + region = "us-east-1" + encrypt = true + } +} + + diff --git a/terraform/monitoring/new-relic/ppdc/terraform.tfvars b/terraform/monitoring/new-relic/ppdc/terraform.tfvars new file mode 100644 index 000000000..3bb2242ea --- /dev/null +++ b/terraform/monitoring/new-relic/ppdc/terraform.tfvars @@ -0,0 +1,14 @@ + +#alert_policy_name = "ppdc_disk_utilization" +alert_policy_disk_utilization_name = "ppdc_disk_utilization" +alert_policy_cpu_usage_name = "ppdc_cpu_usage" +alert_policy_host_reporting_name = "ppdc_host_not_reporting" +incident_preference = "PER_CONDITION" +email_channel= "ppdc_email_channel" +recipients= "karavadis2@nih.gov" +slack_channel= "ppdc-monitoring" +slack_channel_name = "ppdc-monitoring" +slack_url= "https://join.slack.com/share/zt-tcihyl3b-N7orX2KeHwZZU_5fW1kN1g" +stack_name = "ppdc" +frontend_app_name = "otp" +env = "dev" diff --git a/terraform/monitoring/new-relic/ppdc/variables.tf b/terraform/monitoring/new-relic/ppdc/variables.tf new file mode 100644 index 000000000..e8639cacd --- /dev/null +++ b/terraform/monitoring/new-relic/ppdc/variables.tf @@ -0,0 +1,53 @@ +variable "alert_policy_name" { + default = "" + type = string +} +variable "incident_preference" { + default = "" + type = string +} +variable "email_channel" { + type = string + default = "" +} +variable "recipients" { + type = string + default = "" +} +variable "slack_channel" { + type = string + default = "" +} +variable "slack_channel_name" { + type = string + default = "" +} +variable "slack_url" { + type = string + default = "" +} +variable "host_condition" { + type = string + default = "" +} +variable "alert_policy_disk_utilization_name" { + default = "" +} +variable "alert_policy_cpu_usage_name" { + default = "" +} +variable "alert_policy_host_reporting_name" { + default = "" +} +variable "host_condition_database" { + default = "" +} +variable "stack_name" { + default = "" +} +variable "frontend_app_name" { + default = "" +} +variable "env" { + default = "" +} \ No newline at end of file