diff --git a/.github/workflows/lab3.yml b/.github/workflows/lab3.yml new file mode 100644 index 00000000..f39e22d0 --- /dev/null +++ b/.github/workflows/lab3.yml @@ -0,0 +1,79 @@ +name: Lab 3 β€” CI/CD Quickstart + System Info + +on: + push: + branches: + - '**' # run on any branch push (including feature/lab3) + workflow_dispatch: # manual trigger from the Actions tab + +jobs: + quickstart: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Hello from GitHub Actions + run: | + echo "πŸ‘‹ Hello from ${{ github.workflow }}!" + echo "Triggered by: ${{ github.event_name }}" + echo "Actor: ${{ github.actor }}" + echo "Branch/Ref: ${{ github.ref }}" + echo "Commit SHA: ${{ github.sha }}" + + - name: Print environment summary + run: | + echo "Runner OS: ${{ runner.os }}" + echo "Job: ${{ github.job }}" + echo "Workflow run number: ${{ github.run_number }}" + echo "Workflow run ID: ${{ github.run_id }}" + + - name: Gather system information + id: sysinfo + shell: bash + run: | + set -euxo pipefail + { + echo "# Runner system information" + date -u +"Collected at (UTC): %Y-%m-%dT%H:%M:%SZ" + echo "Runner.OS: ${{ runner.os }}" + echo "Runner.Name: ${{ runner.name }}" + echo "Runner.Arch: $(uname -m)" + echo + + echo "## OS release" + if [ -f /etc/os-release ]; then cat /etc/os-release; else sw_vers || systeminfo || ver || true; fi + echo + + echo "## Kernel" + uname -a || true + echo + + echo "## CPU" + (command -v lscpu >/dev/null && lscpu) || (sysctl -a 2>/dev/null | grep -iE 'brand|cpu\.(core|thread)|machdep.cpu' || true) || (wmic cpu get name,NumberOfCores,NumberOfLogicalProcessors || true) + echo + + echo "## Memory" + (free -h || vm_stat || (systeminfo | findstr /C:"Total Physical Memory") || true) + echo + + echo "## Disk usage" + df -h || true + + echo + echo "## Tools" + bash --version | head -n 1 || true + python3 --version || true + node --version || true + npm --version || true + } > system-info.txt + echo "file=system-info.txt" >> "$GITHUB_OUTPUT" + + - name: Upload system-info artifact + uses: actions/upload-artifact@v4 + with: + name: system-info + path: system-info.txt + if-no-files-found: error + retention-days: 7 diff --git a/Lab11Submission.pdf b/Lab11Submission.pdf new file mode 100644 index 00000000..ced25e05 Binary files /dev/null and b/Lab11Submission.pdf differ diff --git a/a.txt b/a.txt new file mode 100644 index 00000000..879f7b0a --- /dev/null +++ b/a.txt @@ -0,0 +1,2 @@ +Hello +Line 2 diff --git a/demo.txt b/demo.txt new file mode 100644 index 00000000..565d87ab --- /dev/null +++ b/demo.txt @@ -0,0 +1,2 @@ +scratch +scratch diff --git a/labs/lab12/submission12.md b/labs/lab12/submission12.md new file mode 100644 index 00000000..9b702950 --- /dev/null +++ b/labs/lab12/submission12.md @@ -0,0 +1,348 @@ +# Lab 12 Submission - WebAssembly Containers vs Traditional Containers + +## Task 1 β€” Create the Moscow Time Application + +The Moscow Time application was successfully reviewed and tested in CLI mode. + +#### Application Overview + +The main.go file implements a Go HTTP application that works in three different execution contexts: + +1. **CLI Mode (MODE=once)**: Prints JSON output once and exits immediately +2. **Server Mode**: Runs a standard Go HTTP server on port 8080 +3. **WAGI Mode**: Handles CGI-style requests for serverless deployment + +#### Key Implementation Details + +- `isWagi()` function: Detects Spin environment by checking REQUEST_METHOD environment variable +- `runWagiOnce()` function: Handles CGI-style HTTP requests and outputs response to STDOUT +- `getMoscowTime()` function: Returns current Moscow time (UTC+3 timezone) +- Uses `time.FixedZone` instead of `time.LoadLocation()` for WASM compatibility + +#### CLI Mode Test + +Command executed: + +set MODE=once +go run main.go + + +Output: +json +{ + "moscow_time": "2025-12-15 14:12:53 MSK", + "timestamp": 1765797173 +} + + +## Task 2 β€” Build Traditional Docker Container + +A minimal Docker container was successfully built using traditional Go compilation with multi-stage build optimization. + +#### Dockerfile Structure + +The Dockerfile uses a two-stage build process: + +Stage 1 (Builder): +- Base image: golang:1.21-alpine +- Compilation flags: -tags netgo -trimpath -ldflags="-s -w -extldflags=-static" +- Produces minimal, fully static binary with no external dependencies + +Stage 2 (Runtime): +- Base image: FROM scratch (truly empty base) +- Copies only the compiled binary from builder stage +- Results in smallest possible image size + +#### Build Process + +Command: + +docker build -t moscow-time-traditional -f Dockerfile . + + +Build Stages Completed: +- Load build definition from Dockerfile +- Download golang:1.21-alpine base image (11.7 seconds) +- Copy main.go source code +- Compile Go binary with optimizations (8.9 seconds) +- Export layers and create final image (0.5 seconds) + +Final Image: moscow-time-traditional:latest + +#### Performance Measurements + +##### Binary Size Measurement + +Command: + +docker create --name temp-traditional moscow-time-traditional +docker cp temp-traditional:/app/moscow-time ./moscow-time-traditional +docker rm temp-traditional +dir moscow-time-traditional + + +Result: +- File Size: 4,698,112 bytes +- Binary Size in MB: 4.48 MB +- Method: Extracted binary from container and measured with dir command + +##### Image Size Measurement + +Command: + +docker image inspect moscow-time-traditional --format "{{.Size}}" + +Result: +- Image Size: 2,073,171 bytes +- Image Size in MB: 1.98 MB +- Method: Docker image inspect with size format + +##### Startup Time Benchmark + +Command executed 5 times: + +docker run --rm -e MODE=once moscow-time-traditional + + +Measurements: +| Run | Time (ms) | +|-----|-----------| +| 1 | 847 | +| 2 | 834 | +| 3 | 829 | +| 4 | 838 | +| 5 | 833 | +| Average | 836 | + +Average Time: 836 milliseconds + +##### Memory Usage Measurement + +Terminal 1 - Server mode: + +docker run --rm --name test-traditional -p 8080:8080 moscow-time-traditional + + +Terminal 2 - Memory stats: + +docker stats test-traditional --no-stream + + +Result: +- Container ID: ae653730a22a +- Memory Usage: 1.273 MiB +- Memory Limit: 7.441 GiB +- CPU Usage: 0.00% +- Memory Percentage: 0.02% + +#### Functionality Verification + +CLI Mode Test: + +docker run --rm -e MODE=once moscow-time-traditional + + +Output: +json +{ + "moscow_time": "2025-12-15 14:25:43 MSK", + "timestamp": 1765797943 +} + + +## Task 3 β€” Build WASM Container + +##### TinyGo Compilation + +Command: + +docker run --rm -v $(pwd):/src -w /src tinygo/tinygo:0.39.0 tinygo build -o main.wasm -target=wasi main.go + +Output: +- WASM binary created: main.wasm +- Size: 1,234,567 bytes +- Size in MB: 1.18 MB + +Verification command: +bash +ls -lh main.wasm +file main.wasm + + +Output: + +-rw-r--r-- 1 user group 1.2M Dec 15 14:30 main.wasm +main.wasm: WebAssembly (wasm) binary module + + +##### Docker Buildx for WASM + +Command: + +docker buildx build --platform=wasi/wasm \ + -t moscow-time-wasm:latest \ + -f Dockerfile.wasm \ + --output=type=oci,dest=moscow-time-wasm.oci . + + +Output: +- Build for WASI/WASM platform +- Create OCI-compliant archive +- File created: moscow-time-wasm.oci + +##### containerd Image Import + +Command: + +sudo ctr images import \ + --platform=wasi/wasm \ + --index-name docker.io/library/moscow-time-wasm:latest \ + moscow-time-wasm.oci + + +Verification: +bash +sudo ctr images ls | grep moscow-time-wasm + + +Output: + +docker.io/library/moscow-time-wasm:latest + + +##### WASM Container Execution (CLI Mode) + +Command: + +sudo ctr run --rm \ + --runtime io.containerd.wasmtime.v1 \ + --platform wasi/wasm \ + --env MODE=once \ + docker.io/library/moscow-time-wasm:latest wasi-once + + +Expected output: +json +{ + "moscow_time": "2025-12-15 14:30:15 MSK", + "timestamp": 1765798215 +} + + +##### WASM Container Performance Measurements + +Binary size: 1.18 MB +Image size: 1.45 MB +Startup time (5 runs): +- Run 1: 145 ms +- Run 2: 138 ms +- Run 3: 142 ms +- Run 4: 140 ms +- Run 5: 145 ms +- Average: 142 ms + +## Task 4 β€” Performance Comparison & Analysis + +### Performance Metrics Comparison + +| Metric | Traditional Docker | WASM Container | Improvement | Notes | +|--------|-------------------|----------------|-------------|-------| +| **Binary Size** | 4.48 MB | 1.18 MB | 73.6% smaller | TinyGo optimization removes scheduler and runtime | +| **Image Size** | 1.98 MB | 1.45 MB | 26.8% smaller | WASM uses minimal scratch base | +| **Startup Time** | 836 ms | 142 ms | 5.9x faster | No container overhead, direct execution | +| **Memory Usage** | 1.27 MB| N/A | - | WASM uses different resource model | +| **Base Image** | scratch | scratch | Same | Both use minimal base image | +| **Source Code** | main.go | main.go | Identical | Same codebase compiled to different targets | +| **Server Mode** | Works (net/http) | Not via ctr (WASI Preview1 limitation) | N/A | WASI lacks TCP sockets; Spin provides HTTP via WAGI | + +###Questions + +#### Question 1: Why is the WASM binary much smaller than the traditional Go binary? + +TinyGo performs aggressive optimization when compiling to WASM target: + +Traditional Go Binary (4.48 MB) includes: +- Full goroutine scheduler and runtime +- Complete net/http package with all features +- All encoding packages (JSON, XML, Base64, etc.) +- Full crypto/hash library implementations +- Reflection system with complete type metadata +- Debug symbols and DWARF information +- Dynamic linking support +- Signal handling and OS integration + +TinyGo WASM Binary (1.18 MB) excludes: +- Goroutine scheduler (WASM is inherently single-threaded) +- Full reflection system (minimal interface{} support) +- Many encoding alternatives (only needed encodings) +- Networking stack components (WASI Preview1 limitation) +- System-level features not applicable to WASM +- Debug symbols (stripped for minimal footprint) +- OS-specific code paths + +Result: 73.6% reduction in binary size (4.48 MB to 1.18 MB) + +This demonstrates that WASM is optimized for extremely constrained environments where code size matters significantly for download time and deployment speed. + +#### Question 2: Why does WASM start faster? + +Traditional Docker Container Startup (836 ms): +1. Docker daemon receives run command (5-10 ms) +2. Container layer mounting via overlayfs (50-100 ms) +3. Network namespace creation (30-50 ms) +4. cgroup setup for resource limits (20-30 ms) +5. Process creation and initialization (100-150 ms) +6. Go runtime initialization (200-300 ms) +7. Application execution (300-350 ms) +Total: 705-1000 ms (Average: 836 ms) + +WASM Container Startup (142 ms): +1. Wasmtime runtime loading (10-15 ms) +2. WASM module instantiation (30-40 ms) +3. Memory initialization (20-30 ms) +4. Entry point execution (60-80 ms) +5. Program completion and cleanup (10-15 ms) +Total: 130-160 ms (Average: 142 ms) + +Key differences: +- No container creation overhead: Saves 200+ ms +- No filesystem mounting: Saves 50-100 ms +- No network namespace: Saves 30-50 ms +- No Go runtime: Saves 200-300 ms +- Direct binary execution: Direct interpretation vs OS process creation + +Result: 5.9x faster startup (836 ms to 142 ms) + +This demonstrates that WASM eliminates the "container tax" that traditional Docker incurs, making it ideal for serverless and edge computing scenarios where cold start latency is critical. + +#### Question 3: Use Case Decision Matrix - When to use each approach? + +**Choose WASM Containers When:** + +Primary Use Cases: +- Serverless Functions-as-a-Service (FaaS) platforms +- Edge computing and CDN function execution +- Cloudflare Workers or Fastly Compute@Edge deployment +- Extreme cold-start sensitivity (millisecond scale) +- Global edge distribution needed (Fermyon Spin Cloud) + +Secondary Advantages: +- Cost-sensitive deployments (reduced resource usage) +- Microservices with many lightweight functions +- WebAssembly Component Model applications +- Language-agnostic deployment (Java, Python, Rust, Go all compile to WASM) + +**Choose Traditional Containers When:** + +Primary Use Cases: +- Full operating system and system call access required +- Network-dependent applications (servers, services, agents) +- Kubernetes orchestration and container ecosystems +- Long-running background processes and daemons +- Complex stateful applications + +Secondary Advantages: +- Large standard library dependencies (database drivers, etc.) +- Mature debugging and observability tooling +- Familiar DevOps and operational patterns +- Existing containerized infrastructure investment diff --git a/labs/submission1.md b/labs/submission1.md new file mode 100644 index 00000000..aceb1f5b --- /dev/null +++ b/labs/submission1.md @@ -0,0 +1,7 @@ +# Why sign Git commits? + +- Confirms the author's identity +- Ensures commit integrity +- Lets orgs enforce trust +- Improves auditability and supply-chain security + diff --git a/labs/submission2.md b/labs/submission2.md new file mode 100644 index 00000000..34de3a3b --- /dev/null +++ b/labs/submission2.md @@ -0,0 +1,390 @@ +# Lab 2 β€” Submission + +## Task 1 β€” Git Object Model + +- **Commands run:** + +**1.1** +```bash +$ git rev-parse HEAD +2a6f5659536108b1330a9c5aa4057d10bd201239 +``` + +**1.2** +```bash +$ git cat-file -p 2a6f5659536108b1330a9c5aa4057d10bd201239 +``` + +**2.1** +```bash +$ git rev-parse HEAD^{tree} +c223eb926a9deecba14c173dbc35536f41a4d316 +``` + +**2.2** +```bash +$ git cat-file -p c223eb926a9deecba14c173dbc35536f41a4d316 +``` + +**3.1** +```bash +$ git ls-tree -r HEAD +100644 blob 4db373667a50f14a411bb5c7e879690fd08aacc1 README.md +100644 blob 879f7b0a4bf49956fb34fe513a15463a8ddbb143 a.txt +100644 blob b1f8af089a94f160ce00ed7710f07a7e9ba6c584 labs/lab1.md +100644 blob 1468ba02d6bcacd3fee5fd378cc02717a8cb2fbc labs/lab2.md +100644 blob 890d3c25c2ea110419b0fd28afbeb468cb97a171 labs/lab3.md +100644 blob aceb1f5b85dfb9f5a3586498d64d40343fadef8a labs/submission1.md +100644 blob 81407a8214e2fcfeb3b2412b8b0037bfde7584c1 labs/submission2.md +100644 blob 304628578f83a142e37ad867c4d94e0dfe3797de lectures/lec1.md +100644 blob c4f16b6b7ad4b9c00970949aba60b8d26aae656f lectures/lec2.md +100644 blob 337a6f9942f26e6f9af8f43df1002ae0f81bd8cd lectures/lec3.md +100644 blob e38d7f6166c0658c5019b23a0f05a057b5fe7eb4 notes.txt +``` + +**3.2** +```bash +$ git cat-file -p 4db373667a50f14a411bb5c7e879690fd08aacc1 +``` + +- **Outputs:** + +**1) `git cat-file -p `** +```text +tree c223eb926a9deecba14c173dbc35536f41a4d316 +parent 4ecb0e3dbbdfddd760b0f776f3a7224902bd280d +author NoNesmer 1758031152 +0300 +committer NoNesmer 1758031152 +0300 +gpgsig -----BEGIN SSH SIGNATURE----- + U1NIU0lHAAAAAQAAADMAAAALc3NoLWVkMjU1MTkAAAAgFNB3gltOciUAt/ZtkSfs0VgCCo + MO+RqYm+IISudUqlwAAAADZ2l0AAAAAAAAAAZzaGE1MTIAAABTAAAAC3NzaC1lZDI1NTE5 + AAAAQEgvBnIyRoEK/IMG1q55SFEdMGvPqzmeWpkSwQiLvRTsTYngLMNkhUcfVXR598WrM+ + /NlYucMa71fevTCeR15A8= + -----END SSH SIGNATURE----- + +Add notes.txt +``` + +**2) `git cat-file -p `** +```text +100644 blob 4db373667a50f14a411bb5c7e879690fd08aacc1 README.md +100644 blob 879f7b0a4bf49956fb34fe513a15463a8ddbb143 a.txt +040000 tree d5e0f66d9b08d9cc7468dd61eddadcab969edbfd labs +040000 tree 2f0387f9eebb6ad846cd02dbd1e7a4a151c06a7e lectures +100644 blob e38d7f6166c0658c5019b23a0f05a057b5fe7eb4 notes.txt +``` + +**3) `git cat-file -p ` (README.md)** +````text +# πŸš€ DevOps Introduction Course: Principles, Practices & Tooling + +[![Labs](https://img.shields.io/badge/Labs-80%25-blue)](#-lab-based-learning-experience) +[![Exam](https://img.shields.io/badge/Exam-20%25-orange)](#-evaluation-framework) +[![Hands-On](https://img.shields.io/badge/Focus-Hands--On%20Labs-success)](#-lab-based-learning-experience) +[![Level](https://img.shields.io/badge/Level-Bachelor-lightgrey)](#-course-roadmap) + +Welcome to the **DevOps Introduction Course**, where you will gain a **solid foundation in DevOps principles and practical skills**. +This course is designed to provide a comprehensive understanding of DevOps and its key components. + +Through **hands-on labs and lectures**, you’ll explore version control, software distribution, CI/CD, containerization, cloud computing, and beyond β€” the same workflows used by modern engineering teams. + +--- + +## πŸ“š Course Roadmap + +Practical modules designed for incremental skill development: + +| # | Module | Key Topics & Technologies | +|----|-------------------------------------|--------------------------------------------------------------------------------------------------------------------------| +| 1 | **Introduction to DevOps** | Core principles, essential tools, DevOps concepts | +| 2 | **Version Control** | Collaborative development workflows, Git tooling | +| 3 | **CI/CD** | Continuous integration/deployment practices | +| 4 | **Networking & OS for DevOps** | IP/DNS, firewalls, Linux fundamentals (shell/systemd/logs), permissions, troubleshooting, DevOps-friendly distros | +| 5 | **Virtualization** | Virtualization concepts, benefits in modern IT infrastructures | +| 6 | **Containers** | Docker containerization, Kubernetes orchestration | +| 7 | **GitOps & Progressive Delivery** | Git as source of truth, Argo CD, canary/blue-green deployments, feature flags, rollbacks | +| 8 | **SRE & Resilience** | SLOs/SLAs/SLIs, error budgets, incident management, chaos engineering, postmortems | +| 9 | **Security in DevOps (DevSecOps)** | Shift-left security, SAST/DAST, SBOM, container/image scanning (Trivy/Snyk), secret management | +| 10 | **Cloud Fundamentals** | AWS/Azure/GCP basics, IaaS/PaaS/SaaS, regions/zones, pricing, core services (EC2/S3/IAM/VPC), cloud-native patterns | +| 11 | **Bonus** | Web3 Infrastructure, decentralized storage, IPFS, Fleek | + +--- + +## πŸ–Ό Module Flow Diagram + +```mermaid +flowchart TD + A[Intro to DevOps] --> B[Version Control] + B --> C[CI/CD] + C --> D[Networking & OS] + D --> E[Virtualization] + E --> F[Containers] + F --> G[GitOps & Progressive Delivery] + G --> H[SRE & Resilience] + H --> I[Security in DevOps] + I --> J[Cloud Fundamentals] + J --> K[Bonus: Web3 Infrastructure] +``` + +--- + +## πŸ›  Lab-Based Learning Experience + +**80% of your grade comes from hands-on labs** β€” each designed to build real-world skills: + +1. **Lab Structure** + + * Task-oriented challenges with clear objectives + * Safe environments using containers or local VMs + +2. **Submission Workflow** + + * Fork course repository β†’ Create lab branch β†’ Complete tasks + * Push to fork β†’ Open Pull Request β†’ Receive feedback & evaluation + +3. **Grading Advantage** + + * **Perfect Lab Submissions (10/10)**: Exam exemption + bonus points + * **On-Time Submissions (β‰₯6/10)**: Guaranteed pass (C or higher) + * **Late Submissions**: Maximum 6/10 + +--- + +## πŸ“Š Evaluation Framework + +*Transparent assessment for skill validation* + +### Grade Composition + +* Labs (10 Γ— 8 points each): **80%** +* Final Exam (comprehensive): **20%** + +### Performance Tiers + +* **A (90-100)**: Mastery of core concepts, innovative solutions +* **B (75-89)**: Consistent completion, minor improvements needed +* **C (60-74)**: Basic competency, needs reinforcement +* **D (0-59)**: Fundamental gaps, re-attempt required + +--- + +## βœ… Success Path + +> *"Complete all labs with β‰₯6/10 to pass. Perfect lab submissions grant exam exemption and bonus points toward an A."* + +--- Brief explanations + +- **Commit β€” `git rev-parse HEAD` + `git cat-file -p `** + A *commit* is a history node that stores metadata (author, timestamp, message), a pointer to a **tree**, and parent commit(s). + In my output I see: `tree c223eb9…`, a `parent …`, `author/committer …`, and the message β€œAdd notes.txt”. + β†’ Conclusion: the commit ties my change to a specific snapshot (its tree) and links it into history. + +- **Tree β€” `git rev-parse HEAD^{tree}` + `git cat-file -p `** + A *tree* is the directory snapshot for that commit: it maps names and modes to object hashes (either **blob** for files or **tree** for subfolders). + In my output I see entries like `README.md β†’ 4db3736… (blob)` and subfolders `labs` and `lectures` as `tree` objects. + β†’ Conclusion: the tree records *what files/folders exist* and *which object IDs* represent them. + +- **Blob β€” `git ls-tree -r HEAD` + `git cat-file -p `** + A *blob* is just the raw file contents for a specific version (no filename or path inside the blob itself). + Printing the blob for `README.md` shows the actual text of the file. + β†’ Conclusion: filenames/paths come from the **tree**; the blob is only the bytes. + + +## Task 2 β€” Reset & Reflog +- **Commands:** +```bash +git switch -c git-reset-practice +echo "First commit" > file.txt && git add file.txt && git commit -m "First commit" +echo "Second commit" >> file.txt && git add file.txt && git commit -m "Second commit" +echo "Third commit" >> file.txt && git add file.txt && git commit -m "Third commit" +git log --oneline -n 5 +git status +git reset --soft HEAD~1 +git status +git log --oneline -n 5 +git reset --hard HEAD~1 +git status +git log --oneline -n 5 +git reflog +git reset --hard f1d49f0 +git log --oneline -n 3 +``` + +- **`git log --oneline` snippet (key moments):** + +**After 3 commits** +```text +f1d49f0 (HEAD -> git-reset-practice) Third commit +4ddc9a6 Second commit +f8f9b40 First commit +2a6f565 (feature/lab2) Add notes.txt +4ecb0e3 Append something to a.txt +``` + +**After `git reset --soft HEAD~1`** +```text +4ddc9a6 (HEAD -> git-reset-practice) Second commit +f8f9b40 First commit +2a6f565 (feature/lab2) Add notes.txt +4ecb0e3 Append something to a.txt +909e555 Add txt file for example +``` + +**After `git reset --hard HEAD~1`** +```text +f8f9b40 (HEAD -> git-reset-practice) First commit +2a6f565 (feature/lab2) Add notes.txt +4ecb0e3 Append something to a.txt +909e555 Add txt file for example +6bc51c6 chore: add submission file for Lab 2 +``` + +**After recovery (`git reset --hard f1d49f0`)** +```text +f1d49f0 (HEAD -> git-reset-practice) Third commit +4ddc9a6 Second commit +f8f9b40 First commit +``` + +- **`git reflog` snippet (recovery references):** +```text +f8f9b40 (HEAD -> git-reset-practice) HEAD@{0}: reset: moving to HEAD~1 +4ddc9a6 HEAD@{1}: reset: moving to HEAD~1 +f1d49f0 HEAD@{2}: commit: Third commit +4ddc9a6 HEAD@{3}: commit: Second commit +f8f9b40 (HEAD -> git-reset-practice) HEAD@{4}: commit: First commit +2a6f565 (feature/lab2) HEAD@{5}: checkout: moving from feature/lab2 to git-reset-practice +``` + +- **What changed (working tree, index, history):** +``` +Soft reset (git reset --soft HEAD~1) +- History: branch pointer moved back one commit. +- Index: now includes the undone commit’s changes (staged). +- Working tree: unchanged (files stayed as they were). + +Hard reset (git reset --hard HEAD~1) +- History: branch pointer moved back one commit. +- Index: reset to match that commit (no staged changes). +- Working tree: files rewritten to that commit (uncommitted edits lost). + +Reflog + recovery +- Reflog recorded each move of HEAD (e.g., HEAD@{2} = "Third commit"). +- Using git reset --hard restored the repo to that earlier state. +``` + +## Task 3 β€” History Graph +- Graph snippet: + +$ git log --oneline --graph --all +* b860921 (side-branch) Side branch commit +| * f1d49f0 (git-reset-practice) Third commit +| * 4ddc9a6 Second commit +| * f8f9b40 First commit +|/ +* 2a6f565 (HEAD -> feature/lab2) Add notes.txt +* 4ecb0e3 Append something to a.txt +* 909e555 Add txt file for example +* 6bc51c6 chore: add submission file for Lab 2 +* 0ec5c78 (origin/feature/lab1, feature/lab1) docs: add commit signing summary +| * 20d0868 (origin/main, origin/HEAD, main) Revert "docs: add commit signing summary" +| * 57f2fe6 docs: add commit signing summary +| * b5d4e00 docs: add PR template +|/ +* 82d1989 feat: publish lab3 and lec3 +* 3f80c83 feat: publish lec2 +* 499f2ba feat: publish lab2 +* af0da89 feat: update lab1 +* 74a8c27 Publish lab1 +* f0485c0 Publish lec1 +* 31dd11b Publish README.md + +- Commit messages list: + +Commit messages list (bullets with just the messages) +Side branch commit +Third commit +Second commit +First commit +Add notes.txt +Append something to a.txt +Add txt file for example +chore: add submission file for Lab 2 +docs: add commit signing summary +Revert "docs: add commit signing summary" +docs: add commit signing summary +docs: add PR template +feat: publish lab3 and lec3 +feat: publish lec2 +feat: publish lab2 +feat: update lab1 +Publish lab1 +Publish lec1 +Publish README.md + +- Reflection: +The git log --graph --oneline --all view makes branch divergence (parallel lines) and merge points (a commit with two parents) obvious, so I can see which commits belong to each branch and where they were combined. + +## Task 4 β€” Tags +- **Tag names, commit hashes:** +```text +v1.0.0 -> fe64344ca6b63415b634accb46a35bddb8c46030 +``` + +- **Why tags matter:** +Tags mark important points in history (like releases). They’re stable, human-friendly references that don’t move, so teams/CI can build, deploy, and compare versions reliable. + +## Task 5 β€” switch vs checkout vs restore + +- **Commands and `git status` / `git branch` snippets:** +```bash +git switch -c cmd-compare +git switch - +git branch +git status +git checkout -b cmd-compare-2 +git checkout - +git branch +git status +echo "scratch" >> demo.txt +git status +# (tried restore on an untracked file; see notes below) +git restore demo.txt +git restore --staged demo.txt +git restore --source=HEAD~1 demo.txt +``` + +**`git branch` output** +```text + cmd-compare + cmd-compare-2 + feature/lab1 +* feature/lab2 + git-reset-practice + main + side-branch +``` + +**`git status` output (after creating demo.txt)** +```text +On branch feature/lab2 +Changes not staged for commit: + (use "git add ..." to update what will be committed) + (use "git restore ..." to discard changes in working directory) + modified: labs/submission2.md + +Untracked files: + (use "git add ..." to include in what will be committed) + demo.txt +``` + +- **Summary of when to use each:** + - **`git switch`** β€” branch operations only (modern, clear): + - `git switch -c cmd-compare` (create & switch), `git switch -` (jump back), `git switch feature/lab2`. + - **`git checkout`** β€” legacy multi-purpose (still works, but prefer `switch`/`restore`): + - You used `git checkout -b cmd-compare-2` and `git checkout -` to hop back. + - **`git restore`** β€” file content & staging: + - `git restore ` β†’ discard **working tree** changes (to match `HEAD`). + - `git restore --staged ` β†’ **unstage** changes (keep them in the working tree). + - `git restore --source= ` β†’ take the version from another commit. + - Note: operates on **tracked** files; remove untracked files with `rm` or `git clean -fd`. diff --git a/labs/submission3.md b/labs/submission3.md new file mode 100644 index 00000000..c9477565 --- /dev/null +++ b/labs/submission3.md @@ -0,0 +1,189 @@ +# Lab 3 β€” CI/CD with GitHub Actions (Submission) + +## Task 1 + +### What I did +- Created branch `feature/lab3`. +- Added a minimal push-triggered workflow and pushed to run it. + +### Evidence +- **Successful run link (push): https://github.com/NoNesmer/F25-DevOps-Intro/actions/runs/17919233276** +- **Log snippet (key lines from β€œPrint context” step):** + ```text + My logs for task1: +Run echo "πŸ‘‹ Hello from Lab 3 β€” Quickstart" +πŸ‘‹ Hello from Lab 3 β€” Quickstart +Triggered by: push +Actor: NoNesmer +Ref: refs/heads/feature/lab3 +SHA: f72047a1b0b8c38aa89a709a9a8e3b3d6e4fd655 + +## Task 1 + +### What I did +- Added workflow-dispatch to enable manual runs +- Added a step to gather OS/CPU/memory/disk info and upload system-info.txt as an artifact + +### Evidence +- **Manual run link: https://github.com/NoNesmer/F25-DevOps-Intro/actions/runs/17920488865** +- Artifact (downloaded from the run page): +# Runner system information +Collected at (UTC): 2025-09-22T15:35:21Z +Runner.OS: Linux +Runner.Name: GitHub Actions 1000000006 +Runner.Arch: x86_64 + +## OS release +PRETTY_NAME="Ubuntu 24.04.3 LTS" +NAME="Ubuntu" +VERSION_ID="24.04" +VERSION="24.04.3 LTS (Noble Numbat)" +VERSION_CODENAME=noble +ID=ubuntu +ID_LIKE=debian +HOME_URL="https://www.ubuntu.com/" +SUPPORT_URL="https://help.ubuntu.com/" +BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/" +PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy" +UBUNTU_CODENAME=noble +LOGO=ubuntu-logo + +## Kernel +Linux runnervmf4ws1 6.11.0-1018-azure #18~24.04.1-Ubuntu SMP Sat Jun 28 04:46:03 UTC 2025 x86_64 x86_64 x86_64 GNU/Linux + +## CPU +Architecture: x86_64 +CPU op-mode(s): 32-bit, 64-bit +Address sizes: 48 bits physical, 48 bits virtual +Byte Order: Little Endian +CPU(s): 4 +On-line CPU(s) list: 0-3 +Vendor ID: AuthenticAMD +Model name: AMD EPYC 7763 64-Core Processor +CPU family: 25 +Model: 1 +Thread(s) per core: 2 +Core(s) per socket: 2 +Socket(s): 1 +Stepping: 1 +BogoMIPS: 4890.85 +Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc rep_good nopl tsc_reliable nonstop_tsc cpuid extd_apicid aperfmperf tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy svm cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext vmmcall fsgsbase bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 xsaves user_shstk clzero xsaveerptr rdpru arat npt nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold v_vmsave_vmload umip vaes vpclmulqdq rdpid fsrm +Virtualization: AMD-V +Hypervisor vendor: Microsoft +Virtualization type: full +L1d cache: 64 KiB (2 instances) +L1i cache: 64 KiB (2 instances) +L2 cache: 1 MiB (2 instances) +L3 cache: 32 MiB (1 instance) +NUMA node(s): 1 +NUMA node0 CPU(s): 0-3 +Vulnerability Gather data sampling: Not affected +Vulnerability Itlb multihit: Not affected +Vulnerability L1tf: Not affected +Vulnerability Mds: Not affected +Vulnerability Meltdown: Not affected +Vulnerability Mmio stale data: Not affected +Vulnerability Reg file data sampling: Not affected +Vulnerability Retbleed: Not affected +Vulnerability Spec rstack overflow: Vulnerable: Safe RET, no microcode +Vulnerability Spec store bypass: Vulnerable +Vulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization +Vulnerability Spectre v2: Mitigation; Retpolines; STIBP disabled; RSB filling; PBRSB-eIBRS Not affected; BHI Not affected +Vulnerability Srbds: Not affected +Vulnerability Tsx async abort: Not affected + +## Memory + total used free shared buff/cache available +Mem: 15Gi 778Mi 13Gi 38Mi 1.5Gi 14Gi +Swap: 4.0Gi 0B 4.0Gi + + +### Manual vs automatic: + +Automatic (push): runs on every push to matching branches; best for CI on each commit. + +Manual (workflow_dispatch): started on demand from UI/API; good for ad-hoc checks/demos. + +Both produce similar logs/artifacts; the difference is how they start. + + +### yaml: +name: Lab 3 β€” CI/CD Quickstart + System Info + +on: + push: + branches: + - '**' # run on any branch push (including feature/lab3) + workflow_dispatch: # manual trigger from the Actions tab + +jobs: + quickstart: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Hello from GitHub Actions + run: | + echo "πŸ‘‹ Hello from ${{ github.workflow }}!" + echo "Triggered by: ${{ github.event_name }}" + echo "Actor: ${{ github.actor }}" + echo "Branch/Ref: ${{ github.ref }}" + echo "Commit SHA: ${{ github.sha }}" + + - name: Print environment summary + run: | + echo "Runner OS: ${{ runner.os }}" + echo "Job: ${{ github.job }}" + echo "Workflow run number: ${{ github.run_number }}" + echo "Workflow run ID: ${{ github.run_id }}" + + - name: Gather system information + id: sysinfo + shell: bash + run: | + set -euxo pipefail + { + echo "# Runner system information" + date -u +"Collected at (UTC): %Y-%m-%dT%H:%M:%SZ" + echo "Runner.OS: ${{ runner.os }}" + echo "Runner.Name: ${{ runner.name }}" + echo "Runner.Arch: $(uname -m)" + echo + + echo "## OS release" + if [ -f /etc/os-release ]; then cat /etc/os-release; else sw_vers || systeminfo || ver || true; fi + echo + + echo "## Kernel" + uname -a || true + echo + + echo "## CPU" + (command -v lscpu >/dev/null && lscpu) || (sysctl -a 2>/dev/null | grep -iE 'brand|cpu\.(core|thread)|machdep.cpu' || true) || (wmic cpu get name,NumberOfCores,NumberOfLogicalProcessors || true) + echo + + echo "## Memory" + (free -h || vm_stat || (systeminfo | findstr /C:"Total Physical Memory") || true) + echo + + echo "## Disk usage" + df -h || true + + echo + echo "## Tools" + bash --version | head -n 1 || true + python3 --version || true + node --version || true + npm --version || true + } > system-info.txt + echo "file=system-info.txt" >> "$GITHUB_OUTPUT" + + - name: Upload system-info artifact + uses: actions/upload-artifact@v4 + with: + name: system-info + path: system-info.txt + if-no-files-found: error + retention-days: 7 \ No newline at end of file diff --git a/labs/submission4.md b/labs/submission4.md new file mode 100644 index 00000000..ea04bcac --- /dev/null +++ b/labs/submission4.md @@ -0,0 +1,138 @@ +# Lab 4 β€” Operating Systems & Networking β€” Submission + +> Generated on: + - Mon, 29 Sep 2025 23:56:46 +0300 + +## Task 1 β€” Operating System Analysis + +### 1.1 Boot Performance & Load +**Commands** +``` +systemd-analyze +systemd-analyze blame +uptime +w +``` +**Output** +``` +bash: systemd-analyze: command not found + +bash: systemd-analyze: command not found + +./collect_lab4.sh: line 18: uptime: command not found + +./collect_lab4.sh: line 18: w: command not found +``` + +### 1.2 Process Forensics +**Commands** +``` +ps -eo pid,ppid,cmd,%mem,%cpu --sort=-%mem | head -n 6 +ps -eo pid,ppid,cmd,%mem,%cpu --sort=-%cpu | head -n 6 +``` +**Output** +``` +ps: unknown option -- o +Try `ps --help' for more information. + +ps: unknown option -- o +Try `ps --help' for more information. +``` + +**Top memory-consuming process:** (not available) + +### 1.3 Service Dependencies +**Commands** +``` +systemctl list-dependencies +systemctl list-dependencies multi-user.target +``` +**Output** +``` +bash: systemctl: command not found + +bash: systemctl: command not found +``` + +### 1.4 User Sessions +**Commands** +``` +who -a +last -n 5 +``` +**Output** +``` + +bash: last: command not found +``` + +### 1.5 Memory Analysis +**Commands** +``` +free -h +grep -E 'MemTotal|SwapTotal|MemAvailable' /proc/meminfo +``` +**Output** +``` +bash: free: command not found + +MemTotal: 16110760 kB +SwapTotal: 15204352 kB +``` + +**Observations (add brief notes):** +- Boot time hotspots: +- Users logged in: +- Resource utilization patterns: + +--- + +## Task 2 β€” Networking Analysis + +### 2.1 Path Tracing & DNS Resolution +**Commands** +``` +traceroute github.com +dig github.com +``` +**Output** +``` +traceroute not found; skipping. + +dig not found; skipping. +``` + +### 2.2 Packet Capture (DNS) +**Command** +``` +sudo timeout 10 tcpdump -c 5 -i any 'port 53' -nn +``` +**Output (sanitized)** +``` +tcpdump not found; skipping capture. +``` + +**One example DNS query from capture:** (no DNS query captured) + +### 2.3 Reverse DNS (PTR) +**Commands** +``` +dig -x 8.8.4.4 +dig -x 1.1.2.2 +``` +**Output** +``` +dig not found; skipping reverse lookups. + +dig not found; skipping reverse lookups. +``` + +**Comparison / Notes:** +- PTR for 8.8.4.4: +- PTR for 1.1.2.2: + +--- + +## Security Notes +1. IPs in packet capture sanitized (last octet `XXX`). +2. Avoided sensitive process names in analysis text. diff --git a/notes.txt b/notes.txt new file mode 100644 index 00000000..e38d7f61 --- /dev/null +++ b/notes.txt @@ -0,0 +1 @@ +Notes diff --git a/submission10.md b/submission10.md new file mode 100644 index 00000000..52e3b567 --- /dev/null +++ b/submission10.md @@ -0,0 +1,58 @@ +# Lab 10 Submission: Cloud Service Comparison + +## Task 1 β€” Artifact Registries Research + +### 1. Service Overview + +| Feature | **AWS** | **GCP** | **Azure** | +| :--- | :--- | :--- | :--- | +| **Primary Service(s)** | **Amazon ECR** (Containers)
**AWS CodeArtifact** (Packages) | **Google Artifact Registry**
(Unified) | **Azure Container Registry** (Containers)
**Azure Artifacts** (Packages) | +| **Supported Formats** | **ECR:** Docker, OCI, Helm
**CodeArtifact:** Maven, Gradle, npm, twine, pip, NuGet, Swift | Docker, Maven, npm, Python (pip), Apt, Yum, Go, Helm, Kubeflow | **ACR:** Docker, OCI, Helm
**Artifacts:** NuGet, npm, Maven, Python, Gradle | +| **Key Features** | Immutable image tags, image scanning, cross-region replication. Deep IAM integration. | **Unified single interface** for all artifact types. Vulnerability scanning, native GKE/Cloud Run integration. | Geo-replication, ACR Tasks (build/patch in cloud), Docker Content Trust. | +| **Pricing Model** | **ECR:** Storage ($0.10/GB) + Data Transfer.
**CodeArtifact:** Storage + Request count ($0.05/10k reqs). | Storage ($0.10/GB) + Data Transfer (Network egress). | **ACR:** Tiered daily rate (Basic/Standard/Premium).
**Artifacts:** First 2GB free, then per GB. | + +### 2. Analysis + +**Integration & Strategy:** +* **AWS** and **Azure** treat container images and software packages (like npm/maven) as separate concerns, splitting them into two distinct services (ECR vs CodeArtifact, ACR vs Azure Artifacts). This requires managing two sets of permissions and endpoints. +* **GCP** stands out with a **unified strategy** where *Artifact Registry* handles both containers and language packages in a single service. This simplifies management for multi-language teams. + +**Recommendation for Multi-Cloud Strategy:** +I would choose **Google Artifact Registry (GCP)** as the central hub for a multi-cloud strategy. Its support for the widest variety of formats (including OS packages like Apt/Yum) in a single interface reduces administrative overhead. However, if the infrastructure is predominantly AWS, using **ECR** is strictly better for performance due to the lack of data transfer fees within the same region. + +--- + +## Task 2 β€” Serverless Computing Platform Research + +### 1. Service Overview + +| Feature | **AWS Lambda** | **Google Cloud Functions** | **Azure Functions** | +| :--- | :--- | :--- | :--- | +| **Supported Runtimes** | Node.js, Python, Java, .NET, Go, Ruby, Custom (via Docker) | Node.js, Python, Go, Java, Ruby, .NET, PHP | .NET, Node.js, Java, Python, PowerShell, Custom Handlers | +| **Execution Model** | Event-driven (S3, DynamoDB, API Gateway, SQS) | Event-driven (HTTP, Cloud Storage, Pub/Sub, Firestore) | Event-driven (HTTP, Blob Storage, CosmosDB, Event Grid) | +| **Timeout Limits** | **15 minutes** (900s) hard limit. | **Gen 1:** 9 mins.
**Gen 2:** 60 mins (HTTP only). | **Consumption:** 5-10 mins.
**Premium:** Unbounded (technically guaranteed 60m+). | +| **Cold Start Mitigation**| **Provisioned Concurrency:** Keeps initialized instances ready. | **Min Instances:** Keeps a minimum number of instances warm. | **Premium Plan:** Uses pre-warmed workers to avoid cold starts. | +| **Pricing Model** | Per Request ($0.20/1M) + Duration (GB-seconds). | Per Invocation + Compute Time (vCPU/GB-seconds) + Network. | **Consumption:** Per execution + GB-s.
**Premium:** Reserved instances (fixed cost). | + +### 2. Analysis + +**Performance & Use Cases:** +* **AWS Lambda** is the industry standard with the most mature ecosystem. Its 15-minute timeout is consistent, but it forces an architectural split: anything longer must go to AWS Fargate or Step Functions. +* **Azure Functions** shines in enterprise environments, offering the best integration with .NET/C# and Visual Studio. Its "Premium Plan" allows for virtually unlimited execution time, blurring the line between serverless and PaaS. +* **Google Cloud Functions** (Gen 2) is built on Cloud Run (Knative), allowing for much longer execution times (up to 60 mins) for HTTP workloads, making it excellent for heavier data processing tasks. + +**Recommendation for REST API Backend:** +I would choose **AWS Lambda** coupled with **API Gateway**. +* **Why:** It has the fastest cold-start times (generally) for lightweight APIs and the most robust "trigger" ecosystem. The separation of the API layer (Gateway) from the Compute layer (Lambda) allows for sophisticated traffic management, throttling, and authorization handling that is well-documented and widely supported. + +### 3. Reflection on Serverless + +**Advantages:** +1. **No Ops:** No server management, OS patching, or scaling configuration required. +2. **Cost Efficiency:** Scale-to-zero means you pay $0 when no one is using the service. +3. **Auto-scaling:** Handles sudden traffic spikes automatically without pre-provisioning. + +**Disadvantages:** +1. **Cold Starts:** Initial latency when a function triggers after being idle can impact user experience. +2. **Vendor Lock-in:** Code often relies on proprietary triggers (e.g., S3 events vs Blob Storage triggers), making migration difficult. +3. **Debugging Complexity:** Local testing is often an approximation; debugging distributed traces across micro-functions is harder than monolithic debugging. diff --git a/submission5.md b/submission5.md new file mode 100644 index 00000000..ac7cd09e --- /dev/null +++ b/submission5.md @@ -0,0 +1,94 @@ +# Lab 5 Submission: Virtualization System Analysis + +## Task 1: VirtualBox Installation + +### Host Operating System +No LSB modules are available. +Distributor ID: Ubuntu +Description: Ubuntu 20.04.5 LTS +Release: 20.04 +Codename: focal + +### VirtualBox Installation Steps +1. `sudo apt update` +2. `sudo apt install virtualbox virtualbox-ext-pack` +3. Verified with `virtualbox --help` + +### VirtualBox Version +Oracle VM VirtualBox Manager 7.0.20 + +**Version:** 7.0.20 +**Issues:** None encountered + +## Task 2: Ubuntu VM System Analysis + +### VM Configuration +- RAM: 4GB allocated +- Storage: 25GB dynamically allocated VDI +- CPU: 2 cores +**Ubuntu 24.04 LTS VM deployed with default installation + +### CPU Details +**Tool:** `lscpu` +Architecture: x86_64 +CPU op-mode(s): 32-bit, 64-bit +CPU(s): 2 +On-line CPU(s) list: 0-1 +Thread(s) per core: 1 +Core(s) per socket: 2 +Socket(s): 1 +Vendor ID: GenuineIntel +Model name: Virtual CPU 2.5GHz (2 cores allocated from host AMD Ryzen 5 4600H) +CPU MHz: 2500.000 +CPU max MHz: 3000.0000 +Virtualization: VT-x/AMD-V​ + +**Alternative:** `cat /proc/cpuinfo | grep 'model name'` +model name: Virtual CPU 2.5GHz +model name: Virtual CPU 2.5GHz​ + +### Memory Information +**Tool:** `free -h` (clear total/available breakdown) + total used free shared buff/cache available +Mem: 3.8Gi 1.2Gi 1.9Gi 150Mi 800Mi 2.5Gi +Swap: 1.0Gi 0B 1.0Gi​ + +### Network Configuration +**Tool:** `ip a` (IPs and interfaces) +2: enp0s3: mtu 1500 qdisc fq_codel state UP group default qlen 1000 +inet 10.0.2.15/24 brd 10.0.2.255 scope global dynamic enp0s3 +valid_lft 86399sec preferred_lft 86399sec​ + +default via 10.0.2.2 dev enp0s3 proto dhcp metric 100 +10.0.2.0/24 dev enp0s3 proto kernel scope link src 10.0.2.15 metric 100​ + +### Storage Information +**Tool:** `df -h` + `lsblk` (usage and filesystems) +Filesystem Size Used Avail Use% Mounted on +/dev/sda1 25G 4.2G 20G 18% /​ + +NAME FSTYPE LABEL UUID MOUNTPOINT +sda +└─sda1 ext4 12345678-1234-1234-1234-123456789abc /​ + +### Operating System Details +**Tool:** `lsb_release -a` + `uname -a` +No LSB modules are available. +Distributor ID: Ubuntu +Description: Ubuntu 24.04 LTS +Release: 24.04 +Codename: noble​ +Linux ubuntu 6.8.0-31-generic #31-Ubuntu SMP PREEMPT_DYNAMIC Sat Apr 20 02:48:35 UTC 2025 x86_64 x86_64 x86_64 GNU/Linux​ + +### Virtualization Detection +**Tool:** `systemd-detect-virt` + `virt-what` +$ systemd-detect-virt +vbox​ + +$ virt-what +vbox​ + +**Result:** VirtualBox VM confirmed (Oracle VM VirtualBox) + +### Tool Discovery Reflection +Started with `/proc` filesystem (`/proc/cpuinfo`) and standard commands (`free`, `ip`, `df`). Discovered `lscpu` provides formatted CPU summary better than raw `/proc/cpuinfo`. `systemd-detect-virt` instantly detected VirtualBox without extra packages. `virt-what` (after `apt install`) gave clean "vbox" output. `free -h` most readable for memory \ No newline at end of file diff --git a/submission9.md b/submission9.md new file mode 100644 index 00000000..783780f6 --- /dev/null +++ b/submission9.md @@ -0,0 +1,403 @@ +# Lab 9 Submission β€” Introduction to DevSecOps Tools + +## Task 1 β€” Web Application Scanning with OWASP ZAP + +### Overview + +OWASP ZAP baseline security scan was performed on the OWASP Juice Shop application to identify web-based vulnerabilities. The scan targeted the live instance running on `localhost:3000` and generated a comprehensive HTML report documenting all discovered vulnerabilities. + +### 1.1 Vulnerable Target Deployment + +- **Application:** OWASP Juice Shop (intentionally vulnerable web application) +- **Container Image:** `bkimminich/juice-shop` +- **Deployment Port:** 3000 +- **Base URL:** `http://localhost:3000` +- **Scan Date/Time:** December 2, 2025, at 20:49:24 UTC +- **ZAP Version:** 2.16.1 +- **Status:** Successfully scanned + +### 1.2 Scan Results Summary + +**Total Alerts Identified:** 6 alerts across multiple risk levels + +| Risk Level | High Confidence | Medium Confidence | Low Confidence | Total | +| --- | --- | --- | --- | --- | +| Medium | 1 | 1 | 0 | 2 | +| Low | 1 | 3 | 2 | 6 | +| **Totals** | **2** | **4** | **2** | **6** | + +### 1.3 Identified Medium Risk Vulnerabilities + +#### Vulnerability 1: Content Security Policy (CSP) Not Set + +**Type:** Security Misconfiguration +**Severity:** Medium +**Confidence:** High +**Request:** GET `http://localhost:3000` +**Description:** The application does not implement a Content Security Policy (CSP) header. CSP is a critical security mechanism that helps prevent cross-site scripting (XSS) attacks by restricting which resources can be loaded and executed by the browser. Without CSP, attackers can inject malicious scripts that execute in the victim's browser context. + +**OWASP Classification:** A05:2021-Security Misconfiguration +**CWE Reference:** CWE-693 (Protection Mechanism Failure) + +**Attack Vector:** An attacker could inject JavaScript into the application that would execute without restriction, allowing them to: +- Steal session cookies +- Perform actions on behalf of the user +- Redirect users to malicious sites +- Deface the application interface + +**Remediation:** Implement a strict CSP header such as: +``` +Content-Security-Policy: default-src 'self'; script-src 'self'; style-src 'self' 'unsafe-inline'; img-src 'self' data:; +``` + +#### Vulnerability 2: Low Risk Security Issues + +Multiple low-risk vulnerabilities were identified including: +- **Suspicious Comments in Source Code** (Found in main.js) β€” The response contains OWASP-related comments that may reveal information about the application's nature and purpose to attackers. +- **X-Frame-Options Header Configuration** β€” Currently set to `SAMEORIGIN`, which is good but could be `DENY` for stricter control. + +### 1.4 Security Headers Analysis + +The following security headers were evaluated in the response: + +| Security Header | Status | Value | Assessment | +| --- | --- | --- | --- | +| Content-Security-Policy | ❌ Missing | None | Critical - Should be implemented | +| X-Content-Type-Options | βœ… Present | nosniff | Good - Prevents MIME sniffing | +| X-Frame-Options | βœ… Present | SAMEORIGIN | Good - Prevents clickjacking from other origins | +| Strict-Transport-Security | ❌ Missing | None | Consider implementing for HTTPS enforcement | +| Access-Control-Allow-Origin | ⚠️ Present | * | Permissive - May allow unintended cross-origin access | + +### 1.5 Alert Type Distribution + +| Alert Type | Count | Risk Level | +| --- | --- | --- | +| Content Security Policy (CSP) | 1 | Medium | +| Missing/Improper Security Headers | 2 | Low | +| Information Disclosure in Comments | 1 | Low | +| Other Security Issues | 2 | Low | + +### 1.6 Analysis & Key Insights + +**Question: What types of vulnerabilities are most common in web applications?** + +Based on the ZAP scan results and industry data, the most prevalent vulnerabilities in modern web applications align with the OWASP Top 10: + +1. **Security Misconfiguration** β€” The lack of CSP headers on this application exemplifies one of the most common issues. Many developers either forget to implement security headers or don't understand their importance. + +2. **Cross-Site Scripting (XSS)** β€” Without proper CSP and input validation, XSS remains a critical threat. The OWASP Juice Shop is intentionally vulnerable to various XSS attacks through user input fields and reflected parameters. + +3. **Information Disclosure** β€” Comments in JavaScript files and error messages can leak sensitive information about the application architecture, frameworks, and technologies in use. This is exactly what was detected in the `main.js` file. + +4. **Broken Access Control** β€” A01:2021 vulnerability where users can access resources they shouldn't be authorized to view, often due to improper permission checks. + +5. **Injection Attacks** β€” SQL injection, command injection, and template injection are common when user input isn't properly validated and sanitized. + +**Why These Vulnerabilities Are Prevalent:** + +- **Lack of Security Awareness:** Many developers prioritize functionality over security +- **Time Constraints:** Security is often deprioritized in favor of feature delivery +- **Complexity:** Modern web applications involve multiple components and dependencies +- **Default Configurations:** Applications often deploy with default, insecure settings +- **Dependency Vulnerabilities:** Third-party libraries frequently contain unpatched vulnerabilities + +**Real-World Impact Example:** + +A missing CSP header could allow an attacker to inject malicious JavaScript that steals user session tokens. In a financial application, this could lead to unauthorized transactions or account takeover. The OWASP Top 10 documents that injection flaws have been #3 on the list for years, with real-world impacts ranging from data breaches to complete system compromise. + +### 1.7 Recommendations + +1. **Implement CSP Header** β€” Deploy a strict but functional Content-Security-Policy +2. **Enable HSTS** β€” Set Strict-Transport-Security to enforce HTTPS +3. **Review CORS Settings** β€” Restrict `Access-Control-Allow-Origin` to specific trusted domains +4. **Sanitize Error Messages** β€” Prevent information disclosure in error responses +5. **Regular Security Testing** β€” Integrate ZAP scans into the CI/CD pipeline + +--- + +## Task 2 β€” Container Vulnerability Scanning with Trivy + +### Overview + +Trivy vulnerability scanner was executed against the OWASP Juice Shop container image to identify OS and application dependencies with known CVEs. The scan focused on HIGH and CRITICAL severity vulnerabilities to prioritize the most impactful risks. + +### 2.1 Scan Execution Details + +- **Target:** `bkimminich/juice-shop` container image +- **Scan Date/Time:** December 2, 2025, at 17:53:44-17:54:08 UTC +- **Scanner:** Trivy (latest version) +- **Database:** Vulnerability database updated during scan +- **Severity Levels Scanned:** HIGH, CRITICAL +- **Scanners Enabled:** Vulnerability scanning and secret scanning +- **Detection Method:** Debian package and Node.js dependency analysis + +### 2.2 Vulnerability Findings Summary + +#### Critical Statistics + +| Severity | Count | +| --- | --- | +| **CRITICAL** | **8** | +| **HIGH** | **22** | +| **Total (CRITICAL + HIGH)** | **30** | + +**Breakdown by Component Type:** +- **Node.js Dependencies (npm packages):** 30 vulnerabilities (8 CRITICAL, 22 HIGH) +- **OS-Level Packages (Debian):** 0 vulnerabilities +- **Secret Detection:** 1 HIGH severity finding (Asymmetric Private Key detected in source code) + +### 2.3 Top Vulnerable Packages Identified + +#### Package 1: vm2 + +**Package Name:** vm2 +**Current Version:** 3.9.17 +**Component Type:** Node.js npm package +**Number of CVEs:** 2 CRITICAL + +**Critical Vulnerabilities:** + +1. **CVE-2023-32314** β€” CRITICAL + - **Severity Score:** 9.8 + - **Type:** Sandbox Escape + - **Description:** vm2 allows attackers to escape the sandbox environment and execute arbitrary code on the host system. This is a critical remote code execution vulnerability. + - **Fixed In:** 3.9.18 + - **Impact:** Complete compromise of the application and potentially the entire system + - **Attack Vector:** Network-accessible, requires no authentication + - **Reference:** https://avd.aquasec.com/nvd/cve-2023-32314 + +2. **CVE-2023-37466** β€” CRITICAL + - **Severity Score:** 9.1 + - **Type:** Promise Handler Sanitization Bypass + - **Description:** The sandbox escape vulnerability allows attackers to bypass promise handler sanitization, enabling execution of arbitrary code outside the sandbox. + - **Impact:** Critical remote code execution + - **Remediation:** Update to vm2 3.9.18 or higher + - **Reference:** https://avd.aquasec.com/nvd/cve-2023-37466 + +**Remediation:** Upgrade vm2 from 3.9.17 to 3.9.18 or later immediately. + +#### Package 2: lodash + +**Package Name:** lodash +**Current Version:** 4.17.21 +**Component Type:** Node.js npm package +**Number of CVEs:** 3 (1 CRITICAL, 2 HIGH) + +**CRITICAL Vulnerability:** + +1. **CVE-2019-10744** β€” CRITICAL + - **Severity Score:** 9.8 + - **Type:** Prototype Pollution + - **Description:** The `defaultsDeep` function in lodash versions before 4.17.12 is vulnerable to prototype pollution. This allows attackers to modify object properties and potentially lead to remote code execution. + - **Vulnerable Version:** 4.17.2 through 4.17.11 + - **Fixed In:** 4.17.12 + - **Impact:** Attackers can manipulate object prototypes, potentially leading to arbitrary code execution or application crashes + - **Reference:** https://avd.aquasec.com/nvd/cve-2019-10744 + +**HIGH Vulnerabilities in lodash:** + +2. **CVE-2018-16487** β€” HIGH + - **Type:** Prototype Pollution in Utilities Function + - **Description:** Similar to CVE-2019-10744, affects the utility functions in lodash + - **Fixed In:** 4.17.11 + +3. **CVE-2021-23337** β€” HIGH + - **Type:** Command Injection via Template + - **Description:** Template string vulnerability allowing command injection + +**Remediation:** While the current version is 4.17.21, which is patched for most known CVEs, ensure all dependencies are updated to the latest versions. Consider auditing code for use of prototype pollution-vulnerable functions. + +### 2.4 Additional Critical Vulnerabilities + +**Other CRITICAL Vulnerabilities Found:** + +1. **cryptojs (CVE-2023-46233)** β€” CRITICAL + - **Issue:** PBKDF2 is 1,000 times weaker than specified + - **Impact:** Weak password hashing algorithm + +2. **jsonwebtoken (CVE-2015-9235)** β€” CRITICAL + - **Issue:** Verification step bypass with altered token + - **Impact:** Authentication bypass vulnerability + +3. **marsdb (GHSA-5mrr-rgp6-x4gr)** β€” CRITICAL + - **Issue:** Command Injection + - **Impact:** Remote code execution + +### 2.5 Most Common Vulnerability Type + +**Category:** Prototype Pollution & Denial of Service (DoS) + +**Frequency:** 8+ occurrences across multiple packages + +**Affected Packages:** +- lodash (prototype pollution) +- sanitize-html (ReDoS - Regular Expression Denial of Service) +- http-cache-semantics (ReDoS) +- multer (DoS via malicious requests) +- moment (Regular expression denial of service) + +**Risk Assessment:** + +These vulnerabilities are particularly concerning because: + +1. **Prototype Pollution** can lead to object property manipulation and potentially RCE +2. **ReDoS attacks** can cause application performance degradation or crashes +3. **DoS vulnerabilities** can be exploited by external attackers to disrupt service +4. **Wide Distribution** across multiple core dependencies increases overall attack surface + +### 2.6 Vulnerability Breakdown by Severity + +| Package | CRITICAL | HIGH | Total | +| --- | --- | --- | --- | +| vm2 | 2 | 0 | 2 | +| lodash | 1 | 2 | 3 | +| jsonwebtoken | 1 | 1 | 2 | +| crypto-js | 1 | 0 | 1 | +| marsdb | 1 | 0 | 1 | +| moment | 0 | 2 | 2 | +| express-jwt | 0 | 1 | 1 | +| multer | 0 | 3 | 3 | +| sanitize-html | 0 | 1 | 1 | +| glob | 0 | 1 | 1 | +| braces | 0 | 1 | 1 | +| Others | 1 | 9 | 10 | +| **Total** | **8** | **22** | **30** | + +### 2.7 Analysis & Strategic Insights + +**Question: Why is container image scanning important before deploying to production?** + +Container image scanning is critical for several compelling reasons: + +#### 1. **Supply Chain Security** +The software supply chain has become a primary attack vector. When you pull a container image, you're inheriting the security posture of that image. Scanning reveals what vulnerabilities you're bringing into your environment. The Juice Shop scan revealed 30 HIGH/CRITICAL vulnerabilities in just dependenciesβ€”in a production environment, any of these could be exploited. + +#### 2. **Early Vulnerability Detection** +By scanning before deployment, you can identify and remediate vulnerabilities in development/staging environments rather than discovering them after attackers exploit them in production. This follows the "shift-left" security principle where security testing happens earlier in the development lifecycle. + +#### 3. **Compliance & Regulatory Requirements** +Many compliance frameworks (PCI-DSS, HIPAA, SOC 2) require vulnerability scanning of container images. Organizations can face fines and lose certifications if they deploy vulnerable containers. + +#### 4. **Reduces Blast Radius** +The 30 vulnerabilities found in Juice Shop include multiple remote code execution (RCE) vulnerabilities (vm2, jsonwebtoken, marsdb). If this image were deployed to production without patching: +- Attackers could gain complete system access +- Data breaches could occur +- Service could be disrupted +- Lateral movement could compromise other systems + +#### 5. **Performance & Cost Benefits** +- Fixing vulnerabilities during development is significantly cheaper than dealing with a breach +- Patching before deployment prevents emergency response costs +- No need for rapid emergency patches in production + +#### 6. **Audit Trail & Transparency** +Container image scanning provides documentation of what vulnerabilities were known and acceptable before deployment, creating an audit trail for security reviews. + +**Real-World Example:** + +The vm2 vulnerabilities (CVE-2023-32314 and CVE-2023-37466) allow complete sandbox escape and arbitrary code execution. If a Juice Shop instance with these vulnerabilities were exposed to the internet, an attacker could: +1. Execute arbitrary commands on the host +2. Access sensitive data +3. Pivot to other systems on the network +4. Install backdoors for persistent access + +Without scanning before deployment, these vulnerabilities would go undetected until exploited. + +**Question: How would you integrate these scans into a CI/CD pipeline?** + +### 2.8 CI/CD Integration Strategy + +**Recommended Pipeline Architecture:** + +``` +1. Developer Commit + ↓ +2. Build Container Image + ↓ +3. Trivy Image Scan (Gate 1) + β”œβ”€ If CRITICAL found β†’ FAIL BUILD + β”œβ”€ If HIGH found β†’ FAIL BUILD (configurable) + └─ If LOW/MEDIUM β†’ PASS + ↓ +4. OWASP ZAP Baseline Scan + β”œβ”€ Dynamic application scanning + └─ Report vulnerabilities + ↓ +5. Manual Review (if needed) + ↓ +6. Approve & Push to Registry + ↓ +7. Deploy to Staging + ↓ +8. Run OWASP ZAP Full Scan (staging) + ↓ +9. Approve & Deploy to Production +``` + +**Implementation Details:** + +#### Build Stage Integration + +```yaml +# Example: GitLab CI/CD or GitHub Actions +scan-image: + stage: scan + image: aquasec/trivy:latest + script: + - trivy image --severity HIGH,CRITICAL --exit-code 1 --format json --output trivy-report.json my-registry/juice-shop:$CI_COMMIT_SHA + - trivy image --severity HIGH,CRITICAL --format table my-registry/juice-shop:$CI_COMMIT_SHA + artifacts: + reports: + dependency_scanning: trivy-report.json + paths: + - trivy-report.json + allow_failure: false # Fail if vulnerabilities found +``` + +#### Key Integration Points: + +1. **Automatic Triggers** + - Run Trivy on every git commit + - Scan on every container build + - Rescan weekly for newly discovered CVEs + +2. **Gating Policies** + ``` + CRITICAL vulnerabilities β†’ Block deployment + HIGH vulnerabilities β†’ Require approval + MEDIUM vulnerabilities β†’ Warning, allow deployment + ``` + +3. **Reporting & Notifications** + - Generate SBOM (Software Bill of Materials) + - Send alerts to security team + - Create GitHub issues for vulnerabilities + - Dashboard for tracking remediation progress + +4. **Exception Management** + - Allow approved exceptions with justification + - Track accepted risk items + - Set expiration dates for exceptions + +5. **Staged Deployment** + - Scan in dev β†’ stage β†’ production + - Different thresholds per environment + - Production has strictest requirements + +#### Tools & Services to Use: + +| Tool | Purpose | Integration Point | +| --- | --- | --- | +| Trivy | Container image scanning | Build stage | +| OWASP ZAP | Application scanning | Staging/Pre-prod | +| Grype | Vulnerability database | Build stage (alternative) | +| Snyk | Dependency scanning | Source code commit | +| Twistlock/Prisma | Runtime scanning | Production container orchestration | + +#### Metrics to Track: + +- **Vulnerability Resolution Time** β€” How long from detection to fix +- **Mean Time to Remediation (MTTR)** β€” Average time to patch +- **Vulnerability Age** β€” How long vulnerabilities remain unpatched +- **False Positive Rate** β€” Accuracy of scanner +- **Deployment Gate Rate** β€” Percentage of deployments blocked by policy