diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 00000000..63b9f8aa --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,17 @@ +## Goal +Briefly describe the purpose of this pull request. What problem does it solve? + +## Changes +- List the main changes made in this PR. +- Keep it short and focused. + +## Testing +Explain how you tested your changes: +- Manual tests +- Automated tests +- Other validation steps + +### Checklist +- [ ] PR has a clear and descriptive title +- [ ] Documentation/README updated if necessary +- [ ] No secrets or large temporary files included diff --git a/.github/workflows/github-actions-demo.yml b/.github/workflows/github-actions-demo.yml new file mode 100644 index 00000000..5f19c141 --- /dev/null +++ b/.github/workflows/github-actions-demo.yml @@ -0,0 +1,31 @@ +name: GitHub Actions Demo +run-name: ${{ github.actor }} is testing out GitHub Actions 🚀 + +on: + push: + workflow_dispatch: + +jobs: + Explore-GitHub-Actions: + runs-on: ubuntu-latest + steps: + - run: echo "🎉 The job was automatically triggered by a ${{ github.event_name }} event." + - run: echo "🐧 This job is now running on a ${{ runner.os }} server hosted by GitHub!" + - run: echo "🔎 The name of your branch is ${{ github.ref }} and your repository is ${{ github.repository }}." + - name: Check out repository code + uses: actions/checkout@v5 + - run: echo "💡 The ${{ github.repository }} repository has been cloned to the runner." + - run: echo "🖥️ The workflow is now ready to test your code on the runner." + - name: List files in the repository + run: | + ls ${{ github.workspace }} + - run: echo "🍏 This job's status is ${{ job.status }}." + + - name: CPU info + run: lscpu + - name: Memory info + run: free -h + - name: Disk info + run: df -h + - name: Environment variables + run: env | sort diff --git a/labs/lab7/current-state.txt b/labs/lab7/current-state.txt new file mode 100644 index 00000000..345c3ef0 --- /dev/null +++ b/labs/lab7/current-state.txt @@ -0,0 +1,3 @@ +version: 1.0 +app: myapp +replicas: 3 diff --git a/labs/lab7/desired-state.txt b/labs/lab7/desired-state.txt new file mode 100644 index 00000000..345c3ef0 --- /dev/null +++ b/labs/lab7/desired-state.txt @@ -0,0 +1,3 @@ +version: 1.0 +app: myapp +replicas: 3 diff --git a/labs/lab7/health.log b/labs/lab7/health.log new file mode 100644 index 00000000..3a6f4d75 --- /dev/null +++ b/labs/lab7/health.log @@ -0,0 +1,17 @@ +Sat Oct 18 07:45:19 PM MSK 2025 - ✅ OK: States synchronized +Sat Oct 18 07:45:54 PM MSK 2025 - ❌ CRITICAL: State mismatch detected! + Desired MD5: a15a1a4f965ecd8f9e23a33a6b543155 + Current MD5: 48168ff3ab5ffc0214e81c7e2ee356f5 +Sat Oct 18 07:46:12 PM MSK 2025 - ✅ OK: States synchronized +Sat Oct 18 07:47:03 PM MSK 2025 - ✅ OK: States synchronized +Sat Oct 18 07:47:06 PM MSK 2025 - ✅ OK: States synchronized +Sat Oct 18 07:47:09 PM MSK 2025 - ✅ OK: States synchronized +Sat Oct 18 07:47:12 PM MSK 2025 - ✅ OK: States synchronized +Sat Oct 18 07:47:15 PM MSK 2025 - ✅ OK: States synchronized +Sat Oct 18 07:47:18 PM MSK 2025 - ✅ OK: States synchronized +Sat Oct 18 07:47:21 PM MSK 2025 - ✅ OK: States synchronized +Sat Oct 18 07:47:24 PM MSK 2025 - ❌ CRITICAL: State mismatch detected! + Desired MD5: a15a1a4f965ecd8f9e23a33a6b543155 + Current MD5: 86c1e4f2cba0e303f72049ccbb3141bf +Sat Oct 18 07:47:27 PM MSK 2025 - ✅ OK: States synchronized +Sat Oct 18 07:47:30 PM MSK 2025 - ✅ OK: States synchronized diff --git a/labs/lab7/healthcheck.sh b/labs/lab7/healthcheck.sh new file mode 100755 index 00000000..d088397f --- /dev/null +++ b/labs/lab7/healthcheck.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# healthcheck.sh - Monitor GitOps sync health + +DESIRED_MD5=$(md5sum desired-state.txt | awk '{print $1}') +CURRENT_MD5=$(md5sum current-state.txt | awk '{print $1}') + +if [ "$DESIRED_MD5" != "$CURRENT_MD5" ]; then + echo "$(date) - ❌ CRITICAL: State mismatch detected!" | tee -a health.log + echo " Desired MD5: $DESIRED_MD5" | tee -a health.log + echo " Current MD5: $CURRENT_MD5" | tee -a health.log +else + echo "$(date) - ✅ OK: States synchronized" | tee -a health.log +fi + diff --git a/labs/lab7/monitor.sh b/labs/lab7/monitor.sh new file mode 100755 index 00000000..721b16ca --- /dev/null +++ b/labs/lab7/monitor.sh @@ -0,0 +1,11 @@ +#!/bin/bash +# monitor.sh - Combined reconciliation and health monitoring + +echo "Starting GitOps monitoring..." +for i in {1..10}; do + echo "\n--- Check #$i ---" + ./healthcheck.sh + ./reconcile.sh + sleep 3 +done + diff --git a/labs/lab7/reconcile.sh b/labs/lab7/reconcile.sh new file mode 100755 index 00000000..022bc936 --- /dev/null +++ b/labs/lab7/reconcile.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# reconcile.sh - GitOps reconciliation loop + +DESIRED=$(cat desired-state.txt) +CURRENT=$(cat current-state.txt) + +if [ "$DESIRED" != "$CURRENT" ]; then + echo "$(date) - ⚠️ DRIFT DETECTED!" + echo "Reconciling current state with desired state..." + cp desired-state.txt current-state.txt + echo "$(date) - ✅ Reconciliation complete" +else + echo "$(date) - ✅ States synchronized" +fi diff --git a/labs/submission1.md b/labs/submission1.md new file mode 100644 index 00000000..d3293200 --- /dev/null +++ b/labs/submission1.md @@ -0,0 +1,9 @@ +Signing commits (with GPG, SSH, or S/MIME) helps to: + +1. **Prove authorship** – ensures the commit truly comes from the person who owns the signing key. + +2. **Protect integrity** – prevents undetected tampering with commit history, as the signature is cryptographically verifiable. + +3. **Build trust** – signed commits are marked as Verified on GitHub, making code review and collaboration more secure. + +4. **Support auditing and compliance** – even if a key is rotated or revoked later, the verification record persists, ensuring stable contribution history. \ No newline at end of file diff --git a/labs/submission10.md b/labs/submission10.md new file mode 100644 index 00000000..fae92802 --- /dev/null +++ b/labs/submission10.md @@ -0,0 +1,175 @@ +# Lab 10 — Cloud Computing Fundamentals + +## Task 1 — Artifact Registries Research + +### AWS +- **Service name:** AWS CodeArtifact (for packages) and Amazon Elastic Container Registry (ECR) (for container images) +- **Supported artifact types:** + - **CodeArtifact:** npm, Maven (Java), PyPI (Python), NuGet (.NET) packages + - **ECR:** Docker/OCI container images +- **Key features:** + - Fully managed repositories for both packages and container images + - Secure, scalable storage with IAM-based access control + - Integrated vulnerability scanning (ECR) + - Deep integration with AWS CI/CD services (CodeBuild, CodePipeline, ECS, EKS, Fargate) +- **Integration with other cloud services:** + - Integrates with CI/CD tools like AWS CodeBuild and AWS CodePipeline for CodeArtifact. + - Works with ECS, EKS, Fargate for container image deployments via ECR. +- **Pricing model:** Pay for storage, data transfer, and number of requests +- **Common use cases:** + - CodeArtifact for managing build/package dependencies + - ECR for hosting and deploying container images + +### GCP +- **Service name:** Google Cloud Artifact Registry +- **Supported artifact types:** Container images (Docker/OCI), npm, Maven, Python, Debian/RPM packages +- **Key features:** + - Unified service for containers and packages + - Region-specific repositories for low latency + - Fine-grained IAM controls and integrated security scanning + - Integration with Cloud Build, GKE, Cloud Run, and CI/CD pipelines +- **Integration with other cloud services:** + - Works with Google Cloud Build for storing build artifacts and then deploying to GKE, Cloud Run, Compute Engine, etc. +- **Pricing model:** Based on storage and network egress; regional repositories can reduce egress costs +- **Common use cases:** + - Centralized repository for multi-artifact builds + - Integration with GCP DevOps pipelines and CI/CD tools + +### Azure +- **Service name:** Azure Artifacts (for packages) and Azure Container Registry (ACR) (for containers) +- **Supported artifact types:** + - **Azure Artifacts:** npm, Maven, NuGet, Python, Cargo, Universal packages + - **ACR:** Docker/OCI container images and Helm charts +- **Key features:** + - Integrated with Azure DevOps and GitHub Actions + - Geo-replication and private networking for ACR + - Artifact caching and upstream sources + - RBAC and private link integration for security +- **Integration with other cloud services:** + - Integrates with Azure DevOps pipelines, Azure Kubernetes Service (AKS), Azure App Service, machine-learning pipelines. +- **Pricing model:** Tier-based (Basic, Standard, Premium) for ACR; pay for storage and operations +- **Common use cases:** + - Managing internal package feeds for enterprises + - Hosting and distributing container images across regions + +### Comparison Table + +| Provider | Registry Service(s) | Artifact Types | Key Differentiators | +|-----------|---------------------|----------------|---------------------| +| **AWS** | CodeArtifact (packages), ECR (containers) | npm, Maven, PyPI, NuGet, Docker/OCI | Strong AWS integration, separate services for packages and images | +| **GCP** | Artifact Registry | Containers + language + OS packages | Unified multi-artifact support, region-specific design | +| **Azure** | Azure Artifacts, Azure Container Registry | npm, Maven, NuGet, Python, Docker/OCI | Deep Azure DevOps integration, geo-replication, caching | + +### Similarities + +All three cloud providers offer **fully managed artifact registry** services that: + +- Support **Docker/OCI images** and **common language package formats** (npm, Maven, PyPI, etc.) +- Provide **fine-grained access control** and **security scanning** +- Integrate tightly with their **own CI/CD ecosystems** (AWS CodePipeline, GCP Cloud Build, Azure DevOps) +- Use a **pay-as-you-go pricing model** based on storage and network usage + +### Analysis — Best Choice for Multi-Cloud Strategy +For a **multi-cloud** setup, **GCP Artifact Registry** is the most versatile choice because it supports multiple artifact types under one service, offers fine-grained IAM, and provides regional optimization. +However, for optimal performance and cost, using **each cloud’s native registry** for workloads deployed within that cloud (e.g., ECR for AWS, ACR for Azure) is often better. +A hybrid model with standardized naming and cross-cloud pipelines provides flexibility while reducing vendor lock-in. + +--- + +## Task 2 — Serverless Computing Platform Research + +### AWS +- **Service name:** AWS Lambda +- **Supported languages/runtimes:** Node.js, Python, Java, Go, .NET, Ruby, PowerShell, custom runtimes +- **Execution model:** Event-driven or HTTP-triggered via API Gateway +- **Key features and capabilities:** + - Fully managed serverless compute platfor + - Automatic horizontal scaling (per-request) + - Integration with over 200 AWS services via triggers and events + - Provisioned Concurrency option for predictable cold start times +- **Cold start:** Low for interpreted languages (Python, Node.js), higher for compiled (Java, .NET) +- **Integration:** Deep integration with AWS ecosystem (S3, DynamoDB, SNS/SQS, EventBridge, etc.) +- **Pricing model:** Pay per invocation + execution time (GB-seconds); free tier available +- **Execution limits:** Up to 15 minutes per function invocation +- **Common use cases:** API backends, event-driven automation, microservices, IoT + +### GCP +- **Service name:** Google Cloud Functions (2nd Gen) / Cloud Run Functions +- **Supported languages/runtimes:** Node.js, Python, Go, Java, .NET, Ruby, PHP +- **Execution model:** Event-driven via Eventarc or HTTP-based; containerized runtime on Cloud Run +- **Key features and capabilities:** + - Based on Cloud Run (containerized serverless model) + - Supports both HTTP and event-based triggers + - Concurrency and minimum instances configurable (reduces cold starts) + - Integrated with Cloud Build, Pub/Sub, Firestore, AI/ML APIs +- **Cold start:** Tunable with minimum instances and concurrency settings +- **Integration:** Works with Cloud Storage, Pub/Sub, Firestore, AI/ML, and other GCP services +- **Pricing model:** Based on vCPU-seconds, GiB-seconds, and request count; free tier available +- **Execution limits:** Up to 60 minutes for HTTP requests +- **Common use cases:** REST APIs, event-driven data pipelines, CI/CD automation + +### Azure +- **Service name:** Azure Functions +- **Supported languages/runtimes:** C#, F#, JavaScript/TypeScript, Python, Java, PowerShell +- **Execution model:** Event-driven or HTTP-triggered; available in Consumption, Premium, or Dedicated plans +- **Key features and capabilities:** + - Deep integration with Azure and Microsoft ecosystem (Logic Apps, Event Grid, Cosmos DB) + - Supports Durable Functions for long-running workflows + - Multiple hosting plans for different performance and cost needs + - Built-in monitoring with Application Insights +- **Cold start:** Reduced in Premium plan via pre-warmed instances +- **Integration:** Tight integration with Azure ecosystem (Event Hub, Logic Apps, Cosmos DB) +- **Pricing model:** Pay-per-execution in Consumption plan; Premium plan adds VNET and scaling control +- **Execution limits:** Up to 5 minutes (Consumption), longer in Premium plan +- **Common use cases:** Enterprise APIs, workflow automation, hybrid integration + +### Comparison Table + +| Provider | Serverless Service(s) | Supported Languages | Key Differentiators | +| --------- | ------------------------------------- | --------------------------------------------------- | -------------------------------------------------------------------------------- | +| **AWS** | AWS Lambda | Node.js, Python, Java, Go, .NET, Ruby | Mature ecosystem, deep AWS integrations, wide event support | +| **GCP** | Cloud Functions / Cloud Run Functions | Node.js, Python, Go, Java, .NET, PHP | Unified HTTP and event model, container-based runtime, tunable cold starts | +| **Azure** | Azure Functions | C#, JavaScript/TypeScript, Python, Java, PowerShell | Tight Microsoft ecosystem integration, durable workflows, flexible hosting plans | + + +### Similarities + +All three serverless platforms: +- Provide **fully managed, event-driven compute** with **auto-scaling** +- Support **HTTP-triggered** and **background (event-based)** execution +- Offer **multi-language runtime environments** +- Use a **pay-as-you-go pricing model** (based on invocations or compute time) +- Include **free tiers** and integrate with **their native cloud services** for CI/CD and monitoring + +### Analysis — Best Platform for REST API Backend +For REST APIs, **AWS Lambda** is ideal due to its strong integration with API Gateway, scalability, and ecosystem maturity. +However, **GCP Cloud Run Functions** offer more flexibility for HTTP-based APIs, better concurrency handling, and lower cold starts. +For .NET-heavy or enterprise setups, **Azure Functions** is the natural fit due to seamless integration with Azure and hybrid deployment options. + +### Reflection — Advantages and Disadvantages of Serverless Computing + +**Advantages:** +- No server provisioning or maintenance required +- Automatic scaling and high availability +- Pay-per-use model improves cost efficiency +- Rapid deployment and integration with other cloud services + +**Disadvantages:** +- Cold start latency can impact performance +- Execution time limits restrict long-running tasks +- Vendor lock-in and limited portability +- Complex debugging and observability +- Potentially higher costs at sustained high load + +--- + +## Summary + +- **Artifact Registries:** + - AWS, GCP, and Azure each provide strong, secure artifact management tools. + - GCP Artifact Registry offers the most unified multi-artifact support. +- **Serverless Platforms:** + - All three providers offer mature FaaS solutions. + - Choice depends on ecosystem, performance needs, and developer expertise. +- **Overall Insight:** + - Use native services when staying within one cloud, but for multi-cloud setups, standardize build pipelines and artifact formats for portability. diff --git a/labs/submission11.md b/labs/submission11.md new file mode 100644 index 00000000..282494e7 --- /dev/null +++ b/labs/submission11.md @@ -0,0 +1,212 @@ +# Lab 11 — Decentralized Web Hosting with IPFS & 4EVERLAND + +## Task 1 — Local IPFS Node Setup and File Publishing + +### 1. IPFS Node Peer Count + +After starting the IPFS Docker container and opening the Web UI (`http://127.0.0.1:5001/webui/`), the node successfully connected to the IPFS network. + +**Peer count observed:** +**603 peers** + +![Peers count](https://github.com/user-attachments/assets/ccd75f1e-b2c8-4f37-a96b-aabea34eeb74) + +--- + +### 2. Network Bandwidth Statistics + +The Web UI dashboard shows healthy incoming and outgoing traffic, indicating active participation in the IPFS swarm. + +![Network bandwidth statistics](https://github.com/user-attachments/assets/d06d4755-b76f-47f0-bf40-c3a5a3fea701) + +--- + +### 3. Test File CID + +A test file was created and added to IPFS using: + +```bash +echo "Hello IPFS Lab" > testfile.txt +docker cp testfile.txt ipfs_node:/export/ +docker exec ipfs_node ipfs add /export/testfile.txt +``` + +**Generated CID:** + +``` +QmUFJmQRosK4Amzcjwbip8kV3gkJ8jqCURjCNxuv3bWYS1 +``` + +--- + +### 4. Local Gateway Access + +The file was successfully accessed through the local IPFS gateway: + +``` +http://localhost:8080/ipfs/QmUFJmQRosK4Amzcjwbip8kV3gkJ8jqCURjCNxuv3bWYS1 +``` + +![local gateway ui](https://github.com/user-attachments/assets/dde87819-86aa-48f5-8ead-5eef31e5b7dd) + +--- + +### 5. Public Gateway Access + +The file is available through public IPFS gateways: + +* [https://ipfs.io/ipfs/QmUFJmQRosK4Amzcjwbip8kV3gkJ8jqCURjCNxuv3bWYS1](https://ipfs.io/ipfs/QmUFJmQRosK4Amzcjwbip8kV3gkJ8jqCURjCNxuv3bWYS1) +* [https://cloudflare-ipfs.com/ipfs/QmUFJmQRosK4Amzcjwbip8kV3gkJ8jqCURjCNxuv3bWYS1](https://cloudflare-ipfs.com/ipfs/QmUFJmQRosK4Amzcjwbip8kV3gkJ8jqCURjCNxuv3bWYS1) + +Both gateways successfully display the file content after a short propagation delay. + +--- + +### 6. Analysis: IPFS Content Addressing vs Traditional URLs + +Traditional (Web 2.0) URLs use **location-based addressing** — they point to *where* the content is hosted (domain, server, path). +This creates issues such as: + +* single point of failure +* dependency on hosting provider +* possibility of link rot +* content may change at the same URL + +IPFS uses **content-based addressing**, where the CID is a cryptographic hash of the data. This means: + +* the address depends on *what* the content is, not *where* it is stored +* content is verifiable — any modification changes the CID +* the content can be served by any IPFS node that stores it +* eliminates single points of failure + +This provides stronger guarantees of integrity, immutability, and decentralization. + +--- + +### 7. Reflection: Advantages and Disadvantages of Decentralized Storage + +#### ✔ Advantages + +* **High resilience:** Data is available as long as *one node* in the network has it. +* **Censorship resistance:** No central authority controls hosting. +* **Integrity:** Content cannot be tampered with without changing the CID. +* **Distributed performance:** Content can be fetched from the nearest peer. + +#### ✘ Disadvantages + +* **Propagation delays:** Public gateways may take several minutes to fetch new content. +* **Mutable updates are harder:** Any file change produces a new CID. +* **Availability depends on pinning:** Unpinned content can be garbage-collected. +* **Less user-friendly:** CIDs are long and not human-readable. + +--- + +# Task 2 — Static Site Deployment with 4EVERLAND + +### 1. 4EVERLAND Project URL + +The deployed static website is available at: + +``` +https://f25-devops-intro-dzimbzxm-belyakova-anna.ipfs.4everland.app/ +``` + +This URL points to the IPFS-hosted version of the site served through 4EVERLAND's infrastructure. + +--- + +### 2. GitHub Repository Used + +The GitHub repository connected to the 4EVERLAND deployment pipeline: + +``` +https://github.com/belyakova-anna/F25-DevOps-Intro +``` + +This repository contains the static site located in `labs/lab11/app`, which is automatically deployed whenever changes are pushed to the selected branch (`main`). + +--- + +### 3. IPFS CID from 4EVERLAND Dashboard + +During deployment, 4EVERLAND generated and pinned the following IPFS CID for the website: + +``` +bafybeifhpby7u3zpsa2ywhwh5ckn2gsdsniygytew6jxok3nvkiq5t3v3e +``` + +This CID uniquely identifies the deployed static site content. + +--- + +### 4. Deployment Dashboard Screenshot + +4EVERLAND deployment overview (status, CID, commit, duration): + +![deployment dashboard](https://github.com/user-attachments/assets/d1f5db3b-ea17-481e-89c5-d3c931f094d1) + +--- + +### 5. Site Accessed Through 4EVERLAND Domain + +The site renders successfully using the `.4everland.app` gateway: + +![site accessed through 4everland](https://github.com/user-attachments/assets/dfa7fa1a-e5bf-4cf6-9ddd-3fa7354884f0) + +--- + +### 6. Site Accessed Through a Public IPFS Gateway + +The same site was accessed directly using the public `ipfs.io` gateway and the deployment CID: + +``` +https://ipfs.io/ipfs/bafybeifhpby7u3zpsa2ywhwh5ckn2gsdsniygytew6jxok3nvkiq5t3v3e +``` + +![site accessed through ipfs](https://github.com/user-attachments/assets/eb744175-0731-4d1f-b588-ee1a2a69b2be) + +--- + +### 7. Analysis: How 4EVERLAND Simplifies IPFS Deployment + +Deploying directly to IPFS normally requires: + +* running an IPFS node +* pinning content +* managing updates manually +* distributing CIDs and dealing with versioning + +4EVERLAND automates all of this: + +✔ integrates with GitHub for CI/CD + +✔ automatically builds and publishes content to IPFS + +✔ provides stable domain routing (`*.4everland.app`) + +✔ handles pinning and persistence + +✔ regenerates CID on each deployment + +✔ abstracts away the IPFS API and gateway interactions + +This makes decentralized hosting accessible even to beginners and removes operational overhead. + +--- + +### 8. Comparison: Traditional Hosting vs IPFS Hosting + +| Feature | Traditional Web Hosting | IPFS / 4EVERLAND Hosting | +| ----------- | ------------------------------------ | --------------------------------- | +| Addressing | Location-based URL (server-specific) | Content-addressed (CID) | +| Reliability | Single point of failure | Redundant, pulled from many peers | +| Mutability | Easy to change content | Changes create new CIDs | +| Deployment | Centralized server pushes | Decentralized IPFS publishing | +| Censorship | Content can be restricted | Highly censorship-resistant | +| Performance | Depends on server location | Can fetch from closest peer | + +**IPFS trade-offs:** + +✔ strong integrity, decentralization, resilience + +✘ slower propagation, versioning complexity, gateway inconsistencies diff --git a/labs/submission2.md b/labs/submission2.md new file mode 100644 index 00000000..a196be36 --- /dev/null +++ b/labs/submission2.md @@ -0,0 +1,386 @@ +# Lab 2 — Version Control & Advanced Git + +## Task 1 — Git Object Model Exploration + +### Blob object + +Command: +```bash +git cat-file -p d329320010da9e34850f76bf7c2954aac19d9377 +``` + +Output: +``` +Signing commits (with GPG, SSH, or S/MIME) helps to: + +1. **Prove authorship** – ensures the commit truly comes from the person who owns the signing key. + +2. **Protect integrity** – prevents undetected tampering with commit history, as the signature is cryptographically verifiable. + +3. **Build trust** – signed commits are marked as Verified on GitHub, making code review and collaboration more secure. + +4. **Support auditing and compliance** – even if a key is rotated or revoked later, the verification record persists, ensuring stable contribution history. +``` +Description: + +A blob stores the raw file contents without metadata such as filename or permissions. + +### Tree object +Command: +``` +git cat-file -p e7eae0fda04b129164a49683809b1a321c12779a +``` + +Output: +```bash +100644 blob 4db373667a50f14a411bb5c7e879690fd08aacc1 README.md +040000 tree 33cd41e211a69f89ca99df6bbf00fda169f2f0da labs +040000 tree 1c31e5c1fc7c98b6fd92659e55e0c03a46229c75 lectures +``` + +Description: + +A tree represents a directory: it links filenames to their corresponding blobs (files) or subtrees (subdirectories). + +### Commit object + +Command: +```bash +git cat-file -p 37c84dcc3d1e14c0473996fa48fb97a79b07b3d7 +``` + +Output: +```bash +tree e7eae0fda04b129164a49683809b1a321c12779a +parent 6c9d08c2101c1ecbe4a03ccd7aaa65d73d5cb78f +author belyakova-anna 1757775358 +0300 +committer belyakova-anna 1757775358 +0300 +gpgsig -----BEGIN SSH SIGNATURE----- + U1NIU0lHAAAAAQAAADMAAAALc3NoLWVkMjU1MTkAAAAgjtPuFdA6G+Y+uAJ/Y1al4WVSE5 + uDORTebA+oc0gOzk4AAAADZ2l0AAAAAAAAAAZzaGE1MTIAAABTAAAAC3NzaC1lZDI1NTE5 + AAAAQFCYz3h1jntL0CX3oPr9FxeMWVqsDKHsI68H8/T5Dop6B6ORtZpxAgwXUkwtKe96Ws + YbiTyi16do8Aak8ctNYw4= + -----END SSH SIGNATURE----- +``` + +Description: + +A commit points to a tree (project snapshot) and contains metadata like parent commit(s), author, committer, signature, and the commit message. + +## Task 2 — Reset and Reflog Recovery + +### Commands and Purpose +```bash +# Create a practice branch for reset experiments +git switch -c git-reset-practice + +# Create three commits +echo "First commit" > file.txt && git add file.txt && git commit -m "First commit" +echo "Second commit" >> file.txt && git add file.txt && git commit -m "Second commit" +echo "Third commit" >> file.txt && git add file.txt && git commit -m "Third commit" + +# View commit history +git log --oneline + +# Soft reset: move HEAD back 1 commit, keep index & working tree +git reset --soft HEAD~1 +git status +git log --oneline + +# Hard reset using reflog: recover previous HEAD state +git reset --hard HEAD@{1} + +# Hard reset to one commit before HEAD +git reset --hard HEAD~1 +git status +git log --oneline + +# View reflog to track HEAD movement +git reflog + +# Reset to a specific commit (by hash) to fully restore that state +git reset --hard 37c84dc +git log --oneline +git status +``` + +### Snippets of git log --oneline + +After creating three commits: + +```bash +f452dbe (HEAD -> git-reset-practice) Third commit +6761636 Second commit +b4a1b11 First commit +``` + +After soft reset (HEAD~1): +```bash +6761636 (HEAD -> git-reset-practice) Second commit +b4a1b11 First commit +``` + +After hard reset to reflog (HEAD@{1}): +```bash +f452dbe (HEAD -> git-reset-practice) Third commit +6761636 Second commit +b4a1b11 First commit +``` + +After hard reset to specific commit (37c84dc): +```bash +37c84dc (HEAD -> git-reset-practice, origin/feature/lab2, feature/lab2) feat: delete Task 1.txt +6c9d08c feat: add Task 1.txt +b4a1b11 First commit +``` + +### Snippet of git reflog +```bash +6761636 (HEAD -> git-reset-practice) HEAD@{0}: reset: moving to HEAD~1 +f452dbe HEAD@{1}: reset: moving to HEAD@{1} +6761636 HEAD@{2}: reset: moving to HEAD~1 +f452dbe HEAD@{3}: commit: Third commit +6761636 HEAD@{4}: commit: Second commit +b4a1b11 HEAD@{5}: commit: First commit +... +37c84dc (origin/feature/lab2, feature/lab2) HEAD@{6}: checkout: moving from feature/lab2 to git-reset-practice +``` + +### Changes in Working Tree, Index, and History +| Reset Type | Working Tree | Index (Staging) | History (HEAD) | +|-----------------------|------------------------------------|--------------------------------|-----------------------------------------| +| `--soft HEAD~1` | unchanged | unchanged | HEAD moved back 1 commit | +| `--hard HEAD@{1}` | restored to state of previous commit| matches working tree | HEAD restored to previous commit | +| `--hard HEAD~1` | file updated to previous commit | matches working tree | HEAD moved back 1 commit | +| `--hard ` | files match the specified commit | matches working tree | HEAD set to specified commit | + + +## Task 3 — Visualize Commit History + +### A snippet of the graph. + +```bash +* 1913eaa (side-branch) Side branch commit +* 56b3b04 (HEAD -> main, origin/main, origin/HEAD) docs: add PR template +| * 37c84dc (origin/feature/lab2, git-reset-practice, feature/lab2) feat: delete Task 1.txt +| * 6c9d08c feat: add Task 1.txt +| * 1ecadc6 (origin/feature/lab1, feature/lab1) docs: add lab1 submission stub +| * c9b38e2 chore: configure ssh +| * 7688a20 docs: add commit signing summary +|/ +* af0da89 feat: update lab1 +* 74a8c27 Publish lab1 +* f0485c0 Publish lec1 +* 31dd11b Publish README.md +``` + +### Commit messages list + +- Side branch commit +- docs: add PR template +- feat: delete Task 1.txt +- feat: add Task 1.txt +- docs: add lab1 submission stub +- chore: configure ssh +- docs: add commit signing summary +- feat: update lab1 +- Publish lab1 +- Publish lec1 +- Publish README.md + +### Reflection +The graph shows the branching structure of the repository, making it easy to see where side branches diverged and how commits are related. +This helps understand the project history and the relationship between branches. + +## Task 4 — Tagging Commits + +### Commands Used +``` +# Switch to the lab branch +git switch feature/lab2 + +# Create a lightweight tag for the latest commit +git tag v1.0.0 + +# Verify the tag and see associated commit +git show v1.0.0 + +# Push the tag to the remote repository +git push origin v1.0.0 +``` + +### Tag Name and Associated Commit + +- Tag: v1.0.0 +- Commit Hash: 37c84dcc3d1e14c0473996fa48fb97a79b07b3d7 +- Commit Message: feat: delete Task 1.txt + +### Note on Tags + +Tags are used to mark specific points in history as releases. They are important for versioning, triggering CI/CD pipelines, and generating release notes, ensuring a clear reference to stable or released code. + +## Task 5 — git switch vs git checkout vs git restore + +### Branch Switching with `git switch` +Command: +```bash +git switch feature/lab2 +``` +Output: +``` +Already on 'feature/lab2' +Your branch is up to date with 'origin/feature/lab2'. +``` + +- Switches to the feature/lab2 branch (already on it). + +Command: +```bash +git switch -c cmd-compare +``` +Output: +``` +Switched to a new branch 'cmd-compare' +``` + +- Creates a new branch cmd-compare and switches to it. + +Command: +```bash +git switch - +``` +Output: +``` +Switched to branch 'feature/lab2' +Your branch is up to date with 'origin/feature/lab2'. +``` + +- Switches back to the previous branch (feature/lab2). + +Command: +```bash +git branch +``` +Output: +```bash + cmd-compare + feature/lab1 +* feature/lab2 + git-reset-practice + main + side-branch +``` + +- Shows all branches and indicates the current branch with *. + +### Branch Creation with Legacy `git checkout` + +Command: +```bash +git checkout -b cmd-compare-2 +``` +Output: +``` +Switched to a new branch 'cmd-compare-2' +``` + +- Creates and switches to a new branch (cmd-compare-2) using the legacy git checkout command. + +Command: +```bash +git branch +``` +Output: +```bash + cmd-compare +* cmd-compare-2 + feature/lab1 + feature/lab2 + git-reset-practice + main + side-branch +``` + +- Confirms that cmd-compare-2 is now the current branch. + +### Working with Files Using `git restore` +Command: +```bash +git add demo.txt +echo "scratch" >> demo.txt +git status +``` +Output: +``` +On branch cmd-compare-2 +Changes to be committed: + new file: demo.txt + +Changes not staged for commit: + modified: demo.txt +``` + +- Adds demo.txt to staging, then appends text to it. Status shows staged changes (new file) and unstaged modifications (new content). + +Command: +```bash +git restore demo.txt +git status +``` +Output: +``` +On branch cmd-compare-2 +Changes to be committed: + new file: demo.txt +``` + +- Discards changes in the working directory (unstaged changes). Staging area remains unchanged. + +Command: +```bash +git restore --staged demo.txt +git status +``` +Output: +``` +On branch cmd-compare-2 +Untracked files: + demo.txt +``` + +- Removes the file from staging area. The file is now untracked. + +Command: +```bash +git add demo.txt +git restore --source=HEAD~1 demo.txt +git status +``` +Output: +``` +On branch cmd-compare-2 +Changes to be committed: + new file: demo.txt + +Changes not staged for commit: + deleted: demo.txt +``` + +- Restores the file to the state from the previous commit (HEAD~1). + +- Since demo.txt did not exist in that commit, Git shows it as deleted in the working directory. + +### Summary of Differences + +| Command | Purpose / Effect | +|---------------------------------|-----------------| +| `git switch` | Modern, clear way to create and switch branches. | +| `git checkout -b` | Legacy way to create + switch branches. Can also restore files (less clear). | +| `git restore ` | Discards changes in working directory for tracked files. | +| `git restore --staged ` | Removes files from staging area without touching working directory. | +| `git restore --source=` | Restores file from a specific commit. Deletes file if it didn’t exist in that commit. | + + +## Bonus — GitHub Social Interactions + +Starring repositories helps show appreciation for valuable projects and makes it easier to find them later. Following people allows you to keep up with their contributions and updates, fostering collaboration and community awareness in open source and team projects. \ No newline at end of file diff --git a/labs/submission3.md b/labs/submission3.md new file mode 100644 index 00000000..431c489c --- /dev/null +++ b/labs/submission3.md @@ -0,0 +1,170 @@ +# Lab 3 — Submission + +## Task 1 — First GitHub Actions Workflow + +### Run link +- [Successful workflow run](https://github.com/belyakova-anna/F25-DevOps-Intro/actions/runs/17896049687) + +### Screenshots +- Workflow run list +![Workflow run list](https://github.com/user-attachments/assets/74d5d767-a089-4d07-8b43-be28e29427d4) +- Step log output +![Step log output](https://github.com/user-attachments/assets/73e61527-119c-4b73-8fb1-f22274437aef) + +### Steps I followed + +1. **Create a feature branch locally** + +``` +git switch -c feature/lab3 +``` + +2. **Create the workflow file locally** +``` +mkdir .github\workflows +echo. > .github\workflows\github-actions-demo.yml +``` + +4. **Paste the Quickstart YML content to `github-actions-demo.yml`** +```yaml +name: GitHub Actions Demo +run-name: ${{ github.actor }} is testing out GitHub Actions 🚀 +on: [push] +jobs: + Explore-GitHub-Actions: + runs-on: ubuntu-latest + steps: + - run: echo "🎉 The job was automatically triggered by a ${{ github.event_name }} event." + - run: echo "🐧 This job is now running on a ${{ runner.os }} server hosted by GitHub!" + - run: echo "🔎 The name of your branch is ${{ github.ref }} and your repository is ${{ github.repository }}." + - name: Check out repository code + uses: actions/checkout@v5 + - run: echo "💡 The ${{ github.repository }} repository has been cloned to the runner." + - run: echo "🖥️ The workflow is now ready to test your code on the runner." + - name: List files in the repository + run: | + ls ${{ github.workspace }} + - run: echo "🍏 This job's status is ${{ job.status }}." +``` + +5. **Commit locally** + +``` +git add .github/workflows/github-actions-demo.yml +git commit -m "feat: add workflow" +``` + +6. **Push the branch to GitHub (this push triggers the workflow)** + +``` +git push -u origin feature/lab3 +``` + +7. **Verify the run on GitHub** + +Open repository → Actions tab → Workflow: GitHub Actions Demo → latest run. + +### Key concepts learned +- **Jobs** are groups of steps executed on a single runner. +- **Steps** are the individual commands or actions inside a job. +- **Runners** are virtual machines provided by GitHub (in this case `ubuntu-latest`) where jobs run. +- **Triggers** define when workflows start. Here, the trigger was `on: push`. + +### What caused the run? +The run was automatically triggered by a **push event** when I committed and pushed changes to the repository. + +### Execution analysis +1. GitHub Actions created a new Ubuntu-based runner (`ubuntu-latest`). +2. It executed the steps in order: + - Printed messages with `echo`. + - Checked out the repository code with `actions/checkout`. + - Listed repository files using `ls`. +3. Each step was logged and could be expanded to see detailed output. +4. The workflow finished successfully, confirming the workflow configuration works as expected + +## Task 2 — Manual Trigger + System Information + +### Changes made to the workflow +1. **Enabled manual runs alongside push-triggered runs:** + ```yaml + on: + push: + workflow_dispatch: + ``` +2. **Added steps to collect runner system information:** + ```yaml + - name: CPU info + run: lscpu + - name: Memory info + run: free -h + - name: Disk info + run: df -h + - name: Environment variables + run: env | sort + ``` + +### Manual dispatch test + +The Run workflow button became visible after the workflow file was present on the main branch. +![Workflow button](https://github.com/user-attachments/assets/2798e136-bf39-403b-b459-c5f84742e83d) + +I manually dispatched the workflow from the GitHub UI: Actions → GitHub Actions Demo → Run workflow → Branch: main. + +Manual run link: [here](https://github.com/belyakova-anna/F25-DevOps-Intro/actions/runs/17897676883) + +![Jobs](https://github.com/user-attachments/assets/d9e39c12-6b8e-400e-8be7-f17bf035f925) + +### Gathered system information (from logs) + +**CPU** +``` +Architecture: x86_64 +CPU(s): 4 (Threads per core: 2; Cores per socket: 2; Sockets: 1) +Vendor ID: AuthenticAMD +Model name: AMD EPYC 7763 64-Core Processor +Hypervisor: Microsoft (Virtualization type: full) +``` + +**Memory** +``` +Mem: 15Gi total, 754Mi used, 13Gi free, 1.5Gi buff/cache, 14Gi available +Swap: 4.0Gi total, 0B used +``` + +**Disk** +``` +/dev/root 72G size, 46G used, 27G avail (64%) mounted at / +/dev/sda1 74G size, 4.1G used, 66G avail mounted at /mnt +``` + +**Environment (selected)** +``` +RUNNER_OS=Linux +ImageOS=ubuntu24 +ImageVersion=20250907.24.1 +GITHUB_EVENT_NAME=workflow_dispatch +GITHUB_REF_NAME=main +GITHUB_WORKFLOW=GitHub Actions Demo +GITHUB_WORKSPACE=/home/runner/work/F25-DevOps-Intro/F25-DevOps-Intro +``` + +### Comparison: manual vs automatic triggers + +- Automatic (`on: push`)\ + Runs when I push commits. In the env, `GITHUB_EVENT_NAME=push`. + +- Manual (`workflow_dispatch`)\ + Runs only when I click Run workflow in the Actions tab. In the env, `GITHUB_EVENT_NAME=workflow_dispatch`. + Useful for re-running without new commits or for ad-hoc checks. + +### Runner environment and capabilities + +- **OS:** Ubuntu (Image: ubuntu24, ImageVersion=20250907.24.1) + +- **CPU:** 4 vCPUs (AMD EPYC 7763 under virtualization) + +- **Memory:** 15 GiB total, ~14 GiB available at start + +- **Disk:** Root FS 72 GB (27 GB free), additional mount at /mnt 74 GB (66 GB free) + +- **Capabilities:** Standard Linux userland tools preinstalled; GitHub Actions environment variables expose repo, ref, and run context. \ No newline at end of file diff --git a/labs/submission4.md b/labs/submission4.md new file mode 100644 index 00000000..c1da683e --- /dev/null +++ b/labs/submission4.md @@ -0,0 +1,837 @@ +# Lab 4 — Operating Systems & Networking + +## Task 1 — Operating System Analysis + +### Task 1.1 — Boot Performance Analysis + +**1. Analyze System Boot Time** + +```bash +anna@annaThinkBook ~> systemd-analyze +Startup finished in 4.286s (firmware) + 13.350s (loader) + 1.496s (kernel) + 9.942s (userspace) = 29.076s +graphical.target reached after 9.915s in userspace. +``` + +Observation: + +The total boot time is ~29 seconds. The slowest stages are firmware and loader (~18 seconds combined). The kernel loads quickly (~1.5s), and the graphical interface becomes ready in ~10s. + +```bash +anna@annaThinkBook ~> systemd-analyze blame +4min 37.161s snapd.service + 38.785s dev-loop13.device + 36.317s dev-loop18.device + 30.169s dev-loop3.device + 17.868s fstrim.service + 6.138s dev-loop17.device + 5.871s NetworkManager-wait-online.service + 2.566s plymouth-quit-wait.service + 1.805s man-db.service + 1.297s snapd.seeded.service + 890ms systemd-backlight@backlight:amdgpu_bl1.service + 858ms NetworkManager.service + 781ms docker.service + 779ms boot-efi.mount + 730ms fwupd.service + 261ms dev-nvme0n1p5.device + 220ms upower.service + 204ms apport.service + 167ms snapd.apparmor.service + 154ms user@1000.service + 149ms systemd-udev-trigger.service + 144ms udisks2.service + 131ms containerd.service + 127ms dev-loop6.device + 123ms dev-loop1.device + 115ms secureboot-db.service + 114ms dev-loop7.device + 110ms dev-loop5.device + 107ms dev-loop4.device + 106ms gnome-remote-desktop.service + 100ms power-profiles-daemon.service + 98ms polkit.service + 94ms accounts-daemon.service + 93ms gpu-manager.service + 90ms dev-loop2.device + 87ms systemd-journal-flush.service + 83ms dev-loop0.device + 75ms rsyslog.service + 69ms gdm.service + 65ms plymouth-start.service + 64ms update-notifier-download.service + 64ms systemd-fsck@dev-disk-by\x2duuid-78F2\x2dE94E.service + 63ms systemd-journald.service + 60ms systemd-resolved.service + 57ms ModemManager.service + 55ms apparmor.service + 53ms setvtrgb.service + 52ms systemd-tmpfiles-setup.service + 50ms dev-loop10.device + 49ms dev-loop8.device + 49ms dev-loop11.device + 48ms dev-loop9.device + 46ms dev-loop16.device + 46ms dev-loop15.device + 46ms dev-loop14.device + 46ms dev-loop12.device + 45ms avahi-daemon.service + 45ms systemd-udevd.service + 44ms bluetooth.service + 41ms grub-common.service + 39ms dbus.service + 35ms systemd-sysctl.service + 33ms bolt.service + 32ms systemd-logind.service + 32ms systemd-oomd.service + 31ms keyboard-setup.service + 31ms systemd-random-seed.service + 30ms systemd-timesyncd.service + 29ms fwupd-refresh.service + 28ms colord.service + 27ms systemd-tmpfiles-clean.service + 27ms switcheroo-control.service + 25ms systemd-remount-fs.service + 24ms snap-bare-5.mount + 24ms cups.service + 23ms snap-code-196.mount + 23ms snap-core20-2599.mount + 22ms systemd-modules-load.service + 22ms e2scrub_reap.service + 21ms snap-core22-1981.mount + 21ms grub-initrd-fallback.service + 20ms snap-core24-988.mount + 20ms thermald.service + 20ms snap-firefox-5751.mount + 19ms docker.socket + 19ms snap-firefox-6316.mount + 18ms snap-firmware\x2dupdater-167.mount + 17ms snap-gnome\x2d42\x2d2204-202.mount + 17ms rtkit-daemon.service + 17ms snap-gtk\x2dcommon\x2dthemes-1535.mount + 16ms dev-loop19.device + 16ms snap-snap\x2dstore-1248.mount + 16ms systemd-binfmt.service + 15ms plymouth-read-write.service + 15ms snap-snap\x2dstore-1270.mount + 15ms iio-sensor-proxy.service + 15ms dev-loop25.device + 14ms dev-loop22.device + 14ms dev-hugepages.mount + 13ms dev-loop24.device + 13ms kerneloops.service + 13ms wpa_supplicant.service + 13ms dev-mqueue.mount + 12ms snap-snapd-24505.mount + 12ms sys-kernel-debug.mount + 12ms systemd-tmpfiles-setup-dev-early.service + 11ms snap-snapd\x2ddesktop\x2dintegration-253.mount + 11ms sys-kernel-tracing.mount + 11ms sysstat.service + 10ms flatpak-system-helper.service + 10ms snap-telegram\x2ddesktop-6691.mount + 9ms alsa-restore.service + 9ms snap-chromium-3251.mount + 8ms kmod-static-nodes.service + 8ms snap-gnome\x2d46\x2d2404-125.mount + 8ms snap-gnome\x2d42\x2d2204-226.mount + 8ms systemd-backlight@leds:platform::kbd_backlight.service + 8ms snap-mesa\x2d2404-912.mount + 8ms modprobe@configfs.service + 7ms proc-sys-fs-binfmt_misc.mount + 7ms swap.img.swap + 7ms snap-cups-1100.mount + 7ms systemd-rfkill.service + 7ms dev-loop20.device + 7ms snap-telegram\x2ddesktop-6798.mount + 7ms modprobe@drm.service + 7ms snap-core24-1151.mount + 7ms systemd-tmpfiles-setup-dev.service + 6ms snap-core22-2133.mount + 6ms snap-snapd\x2ddesktop\x2dintegration-315.mount + 6ms snap-core20-2669.mount + 6ms systemd-update-utmp.service + 6ms snap-snapd-25202.mount + 6ms modprobe@fuse.service + 5ms user-runtime-dir@1000.service + 5ms console-setup.service + 5ms systemd-user-sessions.service + 4ms sysstat-collect.service + 4ms systemd-update-utmp-runlevel.service + 4ms ufw.service + 4ms openvpn.service + 3ms sys-fs-fuse-connections.mount + 3ms modprobe@efi_pstore.service + 3ms modprobe@loop.service + 3ms sys-kernel-config.mount + 3ms dev-loop21.device + 3ms dev-loop23.device + 2ms modprobe@dm_mod.service + 973us snapd.socket +``` +Observation: + +- The largest delay is caused by `snapd.service` (~4.5 minutes). +- `dev-loop*` devices (snap packages) also add noticeable delays. +- `NetworkManager-wait-online.service` adds ~6 seconds. +- Most other services load in under 1 second. + +**2. Check System Load:** +```bash +anna@annaThinkBook ~> uptime + 22:37:34 up 35 min, 1 user, load average: 0,68, 0,49, 0,47 +``` + +```bash +anna@annaThinkBook ~> w + 22:37:36 up 35 min, 1 user, load average: 0,68, 0,49, 0,47 +USER TTY FROM LOGIN@ IDLE JCPU PCPU WHAT +anna tty2 - 22:02 30:19 0.00s ? /usr/libexec/gnome-session-binary --session=ubuntu +``` + +Observation: + +The system load is very low (load average < 1). One active user (`anna`) is logged into a GNOME session. + +### Task 1.2 — Process Forensics + +**1. Identify Resource-Intensive Processes** + +```bash +anna@annaThinkBook ~> ps -eo pid,ppid,cmd,%mem,%cpu --sort=-%mem | head -n 6 + PID PPID CMD %MEM %CPU + 3142 2801 /usr/bin/gnome-software --g 2.6 1.9 + 15093 2855 user-app1 2.3 0.8 + 2855 2587 /usr/bin/gnome-shell 1.7 3.1 + 12227 2587 user-app2 1.2 1.8 + 5275 2855 user-app3 1.2 0.1 +``` + +```bash +anna@annaThinkBook ~> ps -eo pid,ppid,cmd,%mem,%cpu --sort=-%cpu | head -n 6 + PID PPID CMD %MEM %CPU + 2855 2587 /usr/bin/gnome-shell 1.7 3.1 + 16073 15973 user-app4 1.2 2.7 + 3142 2801 /usr/bin/gnome-software --g 2.6 1.9 + 12227 2587 user-app2 1.2 1.8 + 722 2 [irq/93-rtw89_pci] 0.0 1.7 +``` + +Observation: + +- The most memory-hungry process is `gnome-software` (2.6% MEM). +- The top CPU consumer is `gnome-shell` (~3.1% CPU). +- Other notable consumers are `user-app1`, `user-app2`, `user-app3`, and `user-app4`. + +Answer: + +👉 The top memory-consuming process is `gnome-software`. + +### Task 1.3 — Service Dependencies + +**1. Map Service Relationships** + +```bash +anna@annaThinkBook ~> systemctl list-dependencies +default.target +● ├─accounts-daemon.service +● ├─gdm.service +● ├─gnome-remote-desktop.service +● ├─power-profiles-daemon.service +● ├─switcheroo-control.service +○ ├─systemd-update-utmp-runlevel.service +● ├─udisks2.service +● └─multi-user.target +○ ├─anacron.service +● ├─apport.service +● ├─avahi-daemon.service +● ├─console-setup.service +● ├─containerd.service +● ├─cron.service +● ├─cups-browsed.service +● ├─cups.path +● ├─cups.service +● ├─dbus.service +○ ├─dmesg.service +● ├─docker.service +○ ├─e2scrub_reap.service +○ ├─grub-common.service +○ ├─grub-initrd-fallback.service +● ├─kerneloops.service +● ├─ModemManager.service +○ ├─networkd-dispatcher.service +● ├─NetworkManager.service +● ├─openvpn.service +● ├─plymouth-quit-wait.service +○ ├─plymouth-quit.service +● ├─rsyslog.service +○ ├─secureboot-db.service +● ├─snap-bare-5.mount +● ├─snap-chromium-3251.mount +● ├─snap-code-196.mount +● ├─snap-core20-2599.mount +● ├─snap-core20-2669.mount +● ├─snap-core22-1981.mount +● ├─snap-core22-2133.mount +● ├─snap-core24-1151.mount +● ├─snap-core24-988.mount +● ├─snap-cups-1100.mount +● ├─snap-firefox-5751.mount +● ├─snap-firefox-6316.mount +● ├─snap-firmware\x2dupdater-167.mount +● ├─snap-gnome\x2d42\x2d2204-202.mount +● ├─snap-gnome\x2d42\x2d2204-226.mount +● ├─snap-gtk\x2dcommon\x2dthemes-1535.mount +● ├─snap-mesa\x2d2404-912.mount +● ├─snap-snap\x2dstore-1248.mount +● ├─snap-snap\x2dstore-1270.mount +● ├─snap-snapd-24505.mount +● ├─snap-snapd-25202.mount +● ├─snap-snapd\x2ddesktop\x2dintegration-253.mount +● ├─snap-snapd\x2ddesktop\x2dintegration-315.mount +● ├─snap-telegram\x2ddesktop-6691.mount +● ├─snap-telegram\x2ddesktop-6798.mount +● ├─snap.cups.cups-browsed.service +● ├─snap.cups.cupsd.service +● ├─snapd.apparmor.service +○ ├─snapd.autoimport.service +○ ├─snapd.core-fixup.service +○ ├─snapd.recovery-chooser-trigger.service +● ├─snapd.seeded.service +● ├─snapd.service +○ ├─ssl-cert.service +○ ├─sssd.service +● ├─sysstat.service +● ├─systemd-ask-password-wall.path +● ├─systemd-logind.service +● ├─systemd-oomd.service +○ ├─systemd-update-utmp-runlevel.service +● ├─systemd-user-sessions.service +○ ├─thermald.service +○ ├─ua-reboot-cmds.service +○ ├─ubuntu-advantage.service +● ├─ufw.service +● ├─unattended-upgrades.service +● ├─whoopsie.path +● ├─wpa_supplicant.service +● ├─basic.target +● │ ├─-.mount +○ │ ├─tmp.mount +● │ ├─paths.target +○ │ │ ├─apport-autoreport.path +○ │ │ └─tpm-udev.path +● │ ├─slices.target +● │ │ ├─-.slice +● │ │ └─system.slice +● │ ├─sockets.target +○ │ │ ├─apport-forward.socket +● │ │ ├─avahi-daemon.socket +● │ │ ├─cups.socket +● │ │ ├─dbus.socket +● │ │ ├─docker.socket +● │ │ ├─snapd.socket +● │ │ ├─systemd-initctl.socket +● │ │ ├─systemd-journald-dev-log.socket +● │ │ ├─systemd-journald.socket +● │ │ ├─systemd-oomd.socket +○ │ │ ├─systemd-pcrextend.socket +● │ │ ├─systemd-sysext.socket +● │ │ ├─systemd-udevd-control.socket +● │ │ ├─systemd-udevd-kernel.socket +● │ │ └─uuidd.socket +● │ ├─sysinit.target +● │ │ ├─apparmor.service +● │ │ ├─dev-hugepages.mount +● │ │ ├─dev-mqueue.mount +● │ │ ├─keyboard-setup.service +● │ │ ├─kmod-static-nodes.service +○ │ │ ├─ldconfig.service +● │ │ ├─plymouth-read-write.service +● │ │ ├─plymouth-start.service +● │ │ ├─proc-sys-fs-binfmt_misc.automount +● │ │ ├─setvtrgb.service +● │ │ ├─sys-fs-fuse-connections.mount +● │ │ ├─sys-kernel-config.mount +● │ │ ├─sys-kernel-debug.mount +● │ │ ├─sys-kernel-tracing.mount +○ │ │ ├─systemd-ask-password-console.path +● │ │ ├─systemd-binfmt.service +○ │ │ ├─systemd-firstboot.service +○ │ │ ├─systemd-hwdb-update.service +○ │ │ ├─systemd-journal-catalog-update.service +● │ │ ├─systemd-journal-flush.service +● │ │ ├─systemd-journald.service +○ │ │ ├─systemd-machine-id-commit.service +● │ │ ├─systemd-modules-load.service +○ │ │ ├─systemd-pcrmachine.service +○ │ │ ├─systemd-pcrphase-sysinit.service +○ │ │ ├─systemd-pcrphase.service +○ │ │ ├─systemd-pstore.service +● │ │ ├─systemd-random-seed.service +○ │ │ ├─systemd-repart.service +● │ │ ├─systemd-resolved.service +● │ │ ├─systemd-sysctl.service +○ │ │ ├─systemd-sysusers.service +● │ │ ├─systemd-timesyncd.service +● │ │ ├─systemd-tmpfiles-setup-dev-early.service +● │ │ ├─systemd-tmpfiles-setup-dev.service +● │ │ ├─systemd-tmpfiles-setup.service +○ │ │ ├─systemd-tpm2-setup-early.service +○ │ │ ├─systemd-tpm2-setup.service +● │ │ ├─systemd-udev-trigger.service +● │ │ ├─systemd-udevd.service +○ │ │ ├─systemd-update-done.service +● │ │ ├─systemd-update-utmp.service +● │ │ ├─cryptsetup.target +● │ │ ├─integritysetup.target +● │ │ ├─local-fs.target +● │ │ │ ├─-.mount +● │ │ │ ├─boot-efi.mount +○ │ │ │ ├─systemd-fsck-root.service +● │ │ │ └─systemd-remount-fs.service +● │ │ ├─swap.target +● │ │ │ └─swap.img.swap +● │ │ └─veritysetup.target +● │ └─timers.target +● │ ├─anacron.timer +○ │ ├─apport-autoreport.timer +● │ ├─apt-daily-upgrade.timer +● │ ├─apt-daily.timer +● │ ├─dpkg-db-backup.timer +● │ ├─e2scrub_all.timer +● │ ├─fstrim.timer +● │ ├─fwupd-refresh.timer +● │ ├─logrotate.timer +● │ ├─man-db.timer +● │ ├─motd-news.timer +○ │ ├─snapd.snap-repair.timer +● │ ├─systemd-tmpfiles-clean.timer +○ │ ├─ua-timer.timer +● │ ├─update-notifier-download.timer +● │ └─update-notifier-motd.timer +● ├─getty.target +○ │ ├─getty-static.service +○ │ └─getty@tty1.service +● └─remote-fs.target +``` + +```bash +anna@annaThinkBook ~> systemctl list-dependencies multi-user.target +multi-user.target +○ ├─anacron.service +● ├─apport.service +● ├─avahi-daemon.service +● ├─console-setup.service +● ├─containerd.service +● ├─cron.service +● ├─cups-browsed.service +● ├─cups.path +● ├─cups.service +● ├─dbus.service +○ ├─dmesg.service +● ├─docker.service +○ ├─e2scrub_reap.service +○ ├─grub-common.service +○ ├─grub-initrd-fallback.service +● ├─kerneloops.service +● ├─ModemManager.service +○ ├─networkd-dispatcher.service +● ├─NetworkManager.service +● ├─openvpn.service +● ├─plymouth-quit-wait.service +○ ├─plymouth-quit.service +● ├─rsyslog.service +○ ├─secureboot-db.service +● ├─snap-bare-5.mount +● ├─snap-chromium-3251.mount +● ├─snap-code-196.mount +● ├─snap-core20-2599.mount +● ├─snap-core20-2669.mount +● ├─snap-core22-1981.mount +● ├─snap-core22-2133.mount +● ├─snap-core24-1151.mount +● ├─snap-core24-988.mount +● ├─snap-cups-1100.mount +● ├─snap-firefox-5751.mount +● ├─snap-firefox-6316.mount +● ├─snap-firmware\x2dupdater-167.mount +● ├─snap-gnome\x2d42\x2d2204-202.mount +● ├─snap-gnome\x2d42\x2d2204-226.mount +● ├─snap-gtk\x2dcommon\x2dthemes-1535.mount +● ├─snap-mesa\x2d2404-912.mount +● ├─snap-snap\x2dstore-1248.mount +● ├─snap-snap\x2dstore-1270.mount +● ├─snap-snapd-24505.mount +● ├─snap-snapd-25202.mount +● ├─snap-snapd\x2ddesktop\x2dintegration-253.mount +● ├─snap-snapd\x2ddesktop\x2dintegration-315.mount +● ├─snap-telegram\x2ddesktop-6691.mount +● ├─snap-telegram\x2ddesktop-6798.mount +● ├─snap.cups.cups-browsed.service +● ├─snap.cups.cupsd.service +● ├─snapd.apparmor.service +○ ├─snapd.autoimport.service +○ ├─snapd.core-fixup.service +○ ├─snapd.recovery-chooser-trigger.service +● ├─snapd.seeded.service +● ├─snapd.service +○ ├─ssl-cert.service +○ ├─sssd.service +● ├─sysstat.service +● ├─systemd-ask-password-wall.path +● ├─systemd-logind.service +● ├─systemd-oomd.service +○ ├─systemd-update-utmp-runlevel.service +● ├─systemd-user-sessions.service +○ ├─thermald.service +○ ├─ua-reboot-cmds.service +○ ├─ubuntu-advantage.service +● ├─ufw.service +● ├─unattended-upgrades.service +● ├─whoopsie.path +● ├─wpa_supplicant.service +● ├─basic.target +● │ ├─-.mount +○ │ ├─tmp.mount +● │ ├─paths.target +○ │ │ ├─apport-autoreport.path +○ │ │ └─tpm-udev.path +● │ ├─slices.target +● │ │ ├─-.slice +● │ │ └─system.slice +● │ ├─sockets.target +○ │ │ ├─apport-forward.socket +● │ │ ├─avahi-daemon.socket +● │ │ ├─cups.socket +● │ │ ├─dbus.socket +● │ │ ├─docker.socket +● │ │ ├─snapd.socket +● │ │ ├─systemd-initctl.socket +● │ │ ├─systemd-journald-dev-log.socket +● │ │ ├─systemd-journald.socket +● │ │ ├─systemd-oomd.socket +○ │ │ ├─systemd-pcrextend.socket +● │ │ ├─systemd-sysext.socket +● │ │ ├─systemd-udevd-control.socket +● │ │ ├─systemd-udevd-kernel.socket +● │ │ └─uuidd.socket +● │ ├─sysinit.target +● │ │ ├─apparmor.service +● │ │ ├─dev-hugepages.mount +● │ │ ├─dev-mqueue.mount +● │ │ ├─keyboard-setup.service +● │ │ ├─kmod-static-nodes.service +○ │ │ ├─ldconfig.service +● │ │ ├─plymouth-read-write.service +● │ │ ├─plymouth-start.service +● │ │ ├─proc-sys-fs-binfmt_misc.automount +● │ │ ├─setvtrgb.service +● │ │ ├─sys-fs-fuse-connections.mount +● │ │ ├─sys-kernel-config.mount +● │ │ ├─sys-kernel-debug.mount +● │ │ ├─sys-kernel-tracing.mount +○ │ │ ├─systemd-ask-password-console.path +● │ │ ├─systemd-binfmt.service +○ │ │ ├─systemd-firstboot.service +○ │ │ ├─systemd-hwdb-update.service +○ │ │ ├─systemd-journal-catalog-update.service +● │ │ ├─systemd-journal-flush.service +● │ │ ├─systemd-journald.service +○ │ │ ├─systemd-machine-id-commit.service +● │ │ ├─systemd-modules-load.service +○ │ │ ├─systemd-pcrmachine.service +○ │ │ ├─systemd-pcrphase-sysinit.service +○ │ │ ├─systemd-pcrphase.service +○ │ │ ├─systemd-pstore.service +● │ │ ├─systemd-random-seed.service +○ │ │ ├─systemd-repart.service +● │ │ ├─systemd-resolved.service +● │ │ ├─systemd-sysctl.service +○ │ │ ├─systemd-sysusers.service +● │ │ ├─systemd-timesyncd.service +● │ │ ├─systemd-tmpfiles-setup-dev-early.service +● │ │ ├─systemd-tmpfiles-setup-dev.service +● │ │ ├─systemd-tmpfiles-setup.service +○ │ │ ├─systemd-tpm2-setup-early.service +○ │ │ ├─systemd-tpm2-setup.service +● │ │ ├─systemd-udev-trigger.service +● │ │ ├─systemd-udevd.service +○ │ │ ├─systemd-update-done.service +● │ │ ├─systemd-update-utmp.service +● │ │ ├─cryptsetup.target +● │ │ ├─integritysetup.target +● │ │ ├─local-fs.target +● │ │ │ ├─-.mount +● │ │ │ ├─boot-efi.mount +○ │ │ │ ├─systemd-fsck-root.service +● │ │ │ └─systemd-remount-fs.service +● │ │ ├─swap.target +● │ │ │ └─swap.img.swap +● │ │ └─veritysetup.target +● │ └─timers.target +● │ ├─anacron.timer +○ │ ├─apport-autoreport.timer +● │ ├─apt-daily-upgrade.timer +● │ ├─apt-daily.timer +● │ ├─dpkg-db-backup.timer +● │ ├─e2scrub_all.timer +● │ ├─fstrim.timer +● │ ├─fwupd-refresh.timer +● │ ├─logrotate.timer +● │ ├─man-db.timer +● │ ├─motd-news.timer +○ │ ├─snapd.snap-repair.timer +● │ ├─systemd-tmpfiles-clean.timer +○ │ ├─ua-timer.timer +● │ ├─update-notifier-download.timer +● │ └─update-notifier-motd.timer +● ├─getty.target +○ │ ├─getty-static.service +○ │ └─getty@tty1.service +● └─remote-fs.target +``` + +Observation: + +- The system depends on a large set of snap-related mount units. +- `multi-user.target` relies on essential services like `NetworkManager`, `docker.service`, `rsyslog`, and `ufw`. +- The dependency trees highlight the heavy reliance on snap and networking services. + +### Task 1.4 — User Sessions + +**1. Audit Login Activity** +```bash +anna@annaThinkBook ~> who -a + загрузка системы 2025-09-27 22:01 + уровень выполнения 5 2025-09-27 22:01 +anna ? seat0 2025-09-27 22:02 ? 2708 (login screen) +anna + tty2 2025-09-27 22:02 00:40 2708 (tty2) + pts/1 2025-09-27 22:05 5667 id=ts/1 терминал=0 выход=100 +``` + +```bash +anna@annaThinkBook ~> last -n 5 +anna tty2 tty2 Sat Sep 27 22:02 still logged in +anna seat0 login screen Sat Sep 27 22:02 still logged in +reboot system boot 6.11.0-26-generi Sat Sep 27 22:01 still running +anna tty2 tty2 Sat Sep 27 21:56 - down (00:04) +anna seat0 login screen Sat Sep 27 21:56 - down (00:04) + +wtmp begins Fri Jun 13 22:35:52 2025 +``` +Observation: + +- One active user (`anna`). +- Current login sessions: GNOME display manager and TTY2. +- Previous session ended at 21:56, then the system rebooted at 22:01. + +### Task 1.5 — User Sessions + +**1. Inspect Memory Allocation** + +```bash +anna@annaThinkBook ~> free -h + всего занят своб общая буф/врем. доступно +Память: 27Gi 5,7Gi 12Gi 369Mi 10Gi 21Gi +Подкачка: 8,0Gi 0B 8,0Gi +``` + +```bash +anna@annaThinkBook ~> cat /proc/meminfo | grep -e MemTotal -e SwapTotal -e MemAvailable +MemTotal: 28531952 kB +MemAvailable: 22621540 kB +SwapTotal: 8388604 kB +``` + +Observation: + +- Total RAM: ~27–28 GB. +- Available memory: ~21 GB (about 5.7 GB in use). +- Swap exists (8 GB) but is unused. +- Memory usage is efficient; the system has a large buffer/cache. + +### Overall Resource Utilization Patterns + +- Snapd and `dev-loop*` snap devices significantly increase boot time. +- The heaviest processes (by CPU and memory) are related to the GNOME desktop environment and user applications (`gnome-software`, `gnome-shell`, Telegram, Chromium, Firefox, VS Code). +- The system has plenty of RAM and does not rely on swap. +- Average system load is low (load average < 1). +- No major resource bottlenecks observed. + +## Task 2 — Networking Analysis + +### Task 2.1 — Network Path Tracing + +**1. Traceroute Execution** + +```bash +anna@annaThinkBook ~> traceroute github.com +traceroute to github.com (140.82.121.4), 30 hops max, 60 byte packets + 1 _gateway (192.168.X.X) 1.023 ms 1.024 ms 1.188 ms + 2 10.X.X.X (10.X.X.X) 2.273 ms 2.285 ms 3.534 ms + 3 10.X.X.X (10.X.X.X) 3.912 ms 4.972 ms 4.933 ms + 4 10.X.X.X (10.X.X.X) 4.897 ms 4.879 ms 4.824 ms + 5 isp-router (84.18.123.XXX) 16.132 ms 14.801 ms 16.436 ms + 6 isp-router (178.176.191.XXX) 9.544 ms 9.671 ms 10.138 ms + 7 * * * + 8 * * * + 9 * * * +10 * * * +11 transit-node (83.169.204.XXX) 101.652 ms transit-node (83.169.204.XXX) 101.646 ms 101.635 ms +12 netnod-ix (194.68.128.XXX) 101.613 ms netnod-ix (194.68.123.XXX) 101.642 ms netnod-ix (194.68.128.XXX) 101.868 ms +13 * * * +14 r3-ber1-de.as5405.net (94.103.180.XXX) 101.986 ms 101.976 ms 101.955 ms +15 * * * +16 * * * +17 * * * +18 * * * +19 r1-fra3-de.as5405.net (94.103.180.XXX) 103.267 ms 101.921 ms 103.291 ms +20 cust-sid436.fra3-de.as5405.net (45.153.82.XXX) 103.280 ms 102.139 ms cust-sid435.r1-fra3-de.as5405.net (45.153.82.XXX) 103.184 ms +21 * * * +22 * * * +23 * * * +24 * * * +25 * * * +26 * * * +27 * * * +28 * * * +29 * * * +30 * * * +``` + +Observation: + +- The path to GitHub starts from the local gateway (`192.168.X.X`) and traverses private addresses (`10.X.X.X`) before reaching ISP and exchange routers. +- Some hops are hidden (`* * *`), likely due to ICMP filtering. +- Latency rises from ~1 ms locally to ~100 ms on international exchange nodes. + +**2. DNS Resolution Check** + +```bash +anna@annaThinkBook ~> dig github.com +;; communications error to 127.0.0.53#53: timed out + +; <<>> DiG 9.18.30-0ubuntu0.24.04.2-Ubuntu <<>> github.com +;; global options: +cmd +;; Got answer: +;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 29101 +;; flags: qr rd ra; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 1 + +;; OPT PSEUDOSECTION: +; EDNS: version: 0, flags:; udp: 65494 +;; QUESTION SECTION: +;github.com. IN A + +;; ANSWER SECTION: +github.com. 60 IN A 140.82.121.4 + +;; Query time: 179 msec +;; SERVER: 127.0.0.53#53(127.0.0.53) (UDP) +;; WHEN: Sat Sep 27 23:19:09 MSK 2025 +;; MSG SIZE rcvd: 55 +``` + +Observation: + +- DNS successfully resolved `github.com` to 140.82.121.4. +- The local stub resolver at `127.0.0.53` handled the query. +- The TTL for this record is 60 seconds → indicates frequent updates for load balancing. + +### Task 2.2 — Network Path Tracing + +**1. Capture DNS Traffic** + +```bash +anna@annaThinkBook ~> sudo timeout 10 tcpdump -c 5 -i any 'port 53' -nn +tcpdump: data link type LINUX_SLL2 +tcpdump: verbose output suppressed, use -v[v]... for full protocol decode +listening on any, link-type LINUX_SLL2 (Linux cooked v2), snapshot length 262144 bytes +23:22:04.538418 lo In IP 127.0.0.1.54079 > 127.0.0.53.53: 46222+ [1au] A? github.com. (51) +23:22:04.538576 wlp4s0 Out IP 192.168.X.X.47134 > 192.168.X.1.53: 18312+ A? github.com. (28) +23:22:04.749912 wlp4s0 In IP 192.168.X.1.53 > 192.168.X.X.47134: 18312 1/0/0 A 140.82.121.3 (44) +23:22:04.750075 lo In IP 127.0.0.53.53 > 127.0.0.1.54079: 46222 1/0/1 A 140.82.121.3 (55) + +4 packets captured +6 packets received by filter +0 packets dropped by kernel + +``` + +Observation: + +- Example query: `192.168.X.X → 192.168.X.1:53 A? github.com`. +- Example response: `192.168.X.1:53 → 192.168.X.X A 140.82.121.3`. +- Shows the client querying a local DNS forwarder, which resolved GitHub’s IP. +- Private IPs are sanitized. + +### Task 2.3 — Reverse DNS + +**1. Perform PTR Lookups** + +```bash +anna@annaThinkBook ~> dig -x 8.8.4.4 + +; <<>> DiG 9.18.30-0ubuntu0.24.04.2-Ubuntu <<>> -x 8.8.4.4 +;; global options: +cmd +;; Got answer: +;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 64282 +;; flags: qr rd ra; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 1 + +;; OPT PSEUDOSECTION: +; EDNS: version: 0, flags:; udp: 65494 +;; QUESTION SECTION: +;4.4.8.8.in-addr.arpa. IN PTR + +;; ANSWER SECTION: +4.4.8.8.in-addr.arpa. 3658 IN PTR dns.google. + +;; Query time: 182 msec +;; SERVER: 127.0.0.53#53(127.0.0.53) (UDP) +;; WHEN: Sat Sep 27 23:23:27 MSK 2025 +;; MSG SIZE rcvd: 73 +``` + +Observation: + +- Reverse lookup for `8.8.4.4` resolves to dns.google. +- This confirms Google’s PTR record is properly configured. + +```bash +anna@annaThinkBook ~> dig -x 1.1.2.2 + +; <<>> DiG 9.18.30-0ubuntu0.24.04.2-Ubuntu <<>> -x 1.1.2.2 +;; global options: +cmd +;; Got answer: +;; ->>HEADER<<- opcode: QUERY, status: NXDOMAIN, id: 60084 +;; flags: qr rd ra; QUERY: 1, ANSWER: 0, AUTHORITY: 1, ADDITIONAL: 1 + +;; OPT PSEUDOSECTION: +; EDNS: version: 0, flags:; udp: 65494 +;; QUESTION SECTION: +;2.2.1.1.in-addr.arpa. IN PTR + +;; AUTHORITY SECTION: +1.in-addr.arpa. 479 IN SOA ns.apnic.net. read-txt-record-of-zone-first-dns-admin.apnic.net. 22952 7200 1800 604800 3600 + +;; Query time: 1367 msec +;; SERVER: 127.0.0.53#53(127.0.0.53) (UDP) +;; WHEN: Sat Sep 27 23:23:56 MSK 2025 +;; MSG SIZE rcvd: 137 +``` + +Observation: + +- Reverse lookup for `1.1.2.2` failed (NXDOMAIN). +- The authority for this range is APNIC (`ns.apnic.net`), but no PTR record exists. + +### Comparison of Reverse Lookups + +- 8.8.4.4 → dns.google ✅ (PTR record exists, matches forward DNS). +- 1.1.2.2 → NXDOMAIN ❌ (no PTR record defined). +- This illustrates that not all IP addresses have reverse DNS mappings; it depends on whether the owner of the IP block configures PTR records. + +### Overall Insights + +- The traceroute revealed the path through local network, ISP, and IX nodes with some missing hops due to ICMP filtering. +- DNS queries show successful resolution of github.com with short TTLs, consistent with load-balanced infrastructure. +- Packet capture confirmed local DNS queries/answers, with sanitized internal IPs. +- Reverse lookups highlight the difference between a well-configured public resolver (Google) and an IP without PTR records (APNIC’s 1.1.2.2). \ No newline at end of file diff --git a/labs/submission5.md b/labs/submission5.md new file mode 100644 index 00000000..6e1aac06 --- /dev/null +++ b/labs/submission5.md @@ -0,0 +1,279 @@ +# Lab 5 — Virtualization & System Analysis + +## Task 1 — VirtualBox Installation +- **Host OS:** Windows 11 Home Single Language 24H2 +- **VirtualBox version:** 7.2.2 r170484 +- **Issues:** Installation completed successfully, no errors encountered + +## Task 2 — Ubuntu VM and System Analysis + +### VM Specs +- RAM: 4 GB (4096 MB) +- CPU: 2 cores +- Storage: 25 GB + +![VM OS](https://github.com/user-attachments/assets/97d95567-f9e5-47f7-9af1-b72e89d7a169) + +--- + +### CPU Information +**Tools used:** `lscpu`, `cat /proc/cpuinfo` + +**Command:** `lscpu` + +**Output:** +``` +Architecture: x86_64 + CPU op-mode(s): 32-bit, 64-bit + Address sizes: 48 bits physical, 48 bits virtual + Byte Order: Little Endian +CPU(s): 2 + On-line CPU(s) list: 0,1 +Vendor ID: AuthenticAMD + Model name: AMD Ryzen 5 3500U with Radeon Vega Mobile Gfx + CPU family: 23 + Model: 24 + Thread(s) per core: 1 + Core(s) per socket: 2 + Socket(s): 1 + Stepping: 1 + BogoMIPS: 4192.00 + Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr ss + e sse2 ht syscall nx mmxext fxsr_opt rdtscp lm constant_tsc rep_good nopl nonstop_tsc cpuid + extd_apicid tsc_known_freq pni pclmulqdq ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsa + ve avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowpref + etch ssbd vmmcall fsgsbase bmi1 avx2 bmi2 rdseed adx clflushopt sha_ni arat +Virtualization features: + Hypervisor vendor: KVM + Virtualization type: full +Caches (sum of all): + L1d: 64 KiB (2 instances) + L1i: 128 KiB (2 instances) + L2: 1 MiB (2 instances) + L3: 8 MiB (2 instances) +NUMA: + NUMA node(s): 1 + NUMA node0 CPU(s): 0,1 +Vulnerabilities: + Gather data sampling: Not affected + Ghostwrite: Not affected + Indirect target selection: Not affected + Itlb multihit: Not affected + L1tf: Not affected + Mds: Not affected + Meltdown: Not affected + Mmio stale data: Not affected + Reg file data sampling: Not affected + Retbleed: Mitigation; untrained return thunk; SMT disabled + Spec rstack overflow: Vulnerable: Safe RET, no microcode + Spec store bypass: Not affected + Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization + Spectre v2: Mitigation; Retpolines; STIBP disabled; RSB filling; PBRSB-eIBRS Not affected; BHI Not affe + cted + Srbds: Not affected + Tsx async abort: Not affected +``` + +**Command:** `cat /proc/cpuinfo | head -20` + +**Output:** +``` +processor : 0 +vendor_id : AuthenticAMD +cpu family : 23 +model : 24 +model name : AMD Ryzen 5 3500U with Radeon Vega Mobile Gfx +stepping : 1 +microcode : 0xffffffff +cpu MHz : 2096.000 +cache size : 512 KB +physical id : 0 +siblings : 2 +core id : 0 +cpu cores : 2 +apicid : 0 +initial apicid : 0 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt rdtscp lm constant_tsc rep_good nopl nonstop_tsc cpuid extd_apicid tsc_known_freq pni pclmulqdq ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch ssbd vmmcall fsgsbase bmi1 avx2 bmi2 rdseed adx clflushopt sha_ni arat +``` + +--- + +### Memory Information +**Tools used:** `free`, `cat /proc/meminfo` + +**Command:** `free -h` + +**Output:** +``` + total used free shared buff/cache available +Mem: 3.8Gi 1.0Gi 2.2Gi 33Mi 891Mi 2.8Gi +Swap: 0B 0B 0B +``` + +**Command:** `cat /proc/meminfo | head -10` + +**Output:** +``` +MemTotal: 4010568 kB +MemFree: 2268352 kB +MemAvailable: 2925380 kB +Buffers: 34320 kB +Cached: 841824 kB +SwapCached: 0 kB +Active: 1251196 kB +Inactive: 212280 kB +Active(anon): 622080 kB +Inactive(anon): 0 kB +``` + +--- + +### Network Configuration + +**Tools used:** `ip`, `ifconfig` + +**Command:** `ip addr` + +``` +1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + valid_lft forever preferred_lft forever + inet6 ::1/128 scope host noprefixroute + valid_lft forever preferred_lft forever +2: enp0s3: mtu 1500 qdisc fq_codel state UP group default qlen 1000 + link/ether 08:00:27:9a:98:28 brd ff:ff:ff:ff:ff:ff + inet 10.0.2.15/24 brd 10.0.2.255 scope global dynamic noprefixroute enp0s3 + valid_lft 86229sec preferred_lft 86229sec + inet6 fd17:625c:f037:2:7bad:163b:2e66:6662/64 scope global temporary dynamic + valid_lft 86231sec preferred_lft 14231sec + inet6 fd17:625c:f037:2:a00:27ff:fe9a:9828/64 scope global dynamic mngtmpaddr + valid_lft 86231sec preferred_lft 14231sec + inet6 fe80::a00:27ff:fe9a:9828/64 scope link + valid_lft forever preferred_lft forever +``` + + +**Command:** `ifconfig` + +**Output:** + +``` +enp0s3: flags=4163 mtu 1500 + inet 10.0.2.15 netmask 255.255.255.0 broadcast 10.0.2.255 + inet6 fd17:625c:f037:2:7bad:163b:2e66:6662 prefixlen 64 scopeid 0x0 + inet6 fd17:625c:f037:2:a00:27ff:fe9a:9828 prefixlen 64 scopeid 0x0 + inet6 fe80::a00:27ff:fe9a:9828 prefixlen 64 scopeid 0x20 + ether 08:00:27:9a:98:28 txqueuelen 1000 (Ethernet) + RX packets 252 bytes 226398 (226.3 KB) + RX errors 0 dropped 0 overruns 0 frame 0 + TX packets 234 bytes 28071 (28.0 KB) + TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 + +lo: flags=73 mtu 65536 + inet 127.0.0.1 netmask 255.0.0.0 + inet6 ::1 prefixlen 128 scopeid 0x10 + loop txqueuelen 1000 (Local Loopback) + RX packets 55 bytes 5962 (5.9 KB) + RX errors 0 dropped 0 overruns 0 frame 0 + TX packets 55 bytes 5962 (5.9 KB) + TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 +``` + +--- + +### Storage Information + +**Tools used:** `lsblk`, `df` + +**Command:** `lsblk` + +**Output:** +``` +NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS +loop0 7:0 0 73.9M 1 loop /snap/core22/2045 +loop1 7:1 0 4K 1 loop /snap/bare/5 +loop2 7:2 0 245.1M 1 loop /snap/firefox/6565 +loop3 7:3 0 11.1M 1 loop /snap/firmware-updater/167 +loop4 7:4 0 91.7M 1 loop /snap/gtk-common-themes/1535 +loop5 7:5 0 516M 1 loop /snap/gnome-42-2204/202 +loop6 7:6 0 10.8M 1 loop /snap/snap-store/1270 +loop7 7:7 0 49.3M 1 loop /snap/snapd/24792 +loop8 7:8 0 576K 1 loop /snap/snapd-desktop-integration/315 +sda 8:0 0 25G 0 disk +├─sda1 8:1 0 1M 0 part +└─sda2 8:2 0 25G 0 part / +sr0 11:0 1 50.7M 0 rom /media/vboxuser/VBox_GAs_7.2.2 +``` + + +**Command:** `df -h` + +**Output:** +``` +Filesystem Size Used Avail Use% Mounted on +tmpfs 392M 1.5M 391M 1% /run +/dev/sda2 25G 5.6G 18G 24% / +tmpfs 2.0G 0 2.0G 0% /dev/shm +tmpfs 5.0M 8.0K 5.0M 1% /run/lock +tmpfs 392M 120K 392M 1% /run/user/1000 +/dev/sr0 51M 51M 0 100% /media/vboxuser/VBox_GAs_7.2.2 +``` + +--- + +### Operating System + +**Tools used:** `lsb_release`, `uname` + +**Command:** `lsb_release -a` + +**Output:** +``` +No LSB modules are available. +Distributor ID: Ubuntu +Description: Ubuntu 24.04.3 LTS +Release: 24.04 +Codename: noble +``` +**Command:** `uname -a` + +**Output:** +``` +Linux Ubuntu-VM 6.14.0-33-generic #33~24.04.1-Ubuntu SMP PREEMPT_DYNAMIC Fri Sep 19 17:02:30 UTC 2 x86_64 x86_64 x86_64 GNU/Linux +``` + + +--- + +### Virtualization Detection + +**Tool used:** `systemd-detect-virt` + +**Command:** `systemd-detect-virt` + +**Output:** +``` +oracle +``` + +--- + +### Reflection +- The most useful commands: + - `lscpu` — clear summary of CPU info. + - `free -h` — easy way to check RAM usage. + - `ip addr` — modern tool for checking network interfaces and IPs. +- `lsblk` and `df -h` complement each other well for analyzing disk devices and usage. +- `systemd-detect-virt` confirms that the system is running inside a VM, which is essential for validation. + +--- + +## Conclusion +- VirtualBox installed and verified. +- Ubuntu 24.04.3 LTS successfully deployed in VM. +- Comprehensive system information collected and documented. diff --git a/labs/submission6.md b/labs/submission6.md new file mode 100644 index 00000000..f0634880 --- /dev/null +++ b/labs/submission6.md @@ -0,0 +1,279 @@ +# Lab 6 — Container Fundamentals with Docker + +## Task 1 — Container Lifecycle & Image Management + +### Output of `docker ps -a` and `docker images` + +**Command:** `docker ps -a` + +**Output:** +```bash +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +``` +(no existing containers) + +**Command:** `docker images ubuntu` + +**Output:** +```bash +REPOSITORY TAG IMAGE ID CREATED SIZE +ubuntu latest 59a458b76b4e 10 days ago 117MB +``` + +--- + +### Image size and layer count +- **Image size:** 117 MB +- **Layer count:** (implicit from the image’s digest and pull logs) — 1 main layer (`4b3ffd8ccb52: Pull complete`) plus metadata layers managed by Docker. + +--- + +### Tar file size comparison with image size +- **Exported TAR file:** 29 MB (`ubuntu_image.tar`) +- **Original image size:** 117 MB + +> The `.tar` file is smaller because it contains compressed filesystem layers without Docker’s metadata, cache, or uncompressed overlay data. + +--- + +### Error message from the first removal attempt +``` +Error response from daemon: conflict: unable to delete ubuntu:latest (must be forced) - container bea8a2154a86 is using its referenced image 59a458b76b4e +``` + +--- + +### Analysis: Why does image removal fail when a container exists? +When a container is created, it directly **depends on the image layers** it was built from. +Docker won’t allow you to delete an image that still has **active or stopped containers referencing it**, because doing so would break the container’s filesystem chain. + +--- + +### Explanation: What is included in the exported tar file? +The exported `.tar` file from `docker save` contains: +- All **filesystem layers** that make up the image. +- The **`manifest.json`** describing layer order and configuration. +- The **`repositories`** file mapping image names to their digests. +- The **`config.json`** with metadata (environment, entrypoint, etc.). + +It does **not** include: +- Containers +- Volumes +- Runtime state + +## Task 2 — Custom Image Creation & Analysis + +### Screenshot or output of original Nginx welcome page + + +```html + + + +Welcome to nginx! + + + +

Welcome to nginx!

+

If you see this page, the nginx web server is successfully installed and +working. Further configuration is required.

+ +

For online documentation and support please refer to +nginx.org.
+Commercial support is available at +nginx.com.

+ +

Thank you for using nginx.

+ + +``` + +### Custom HTML content and verification via curl + +Custom index.html: + +```html + + +The best + + +

website

+ + +``` + +Verification: + +**Command:** `curl http://localhost` + +**Output:** + +```html + + +The best + + +

website

+ + +``` + +### Output of docker diff my_website_container +```bash +C /run +C /run/nginx.pid +C /etc +C /etc/nginx +C /etc/nginx/conf.d +C /etc/nginx/conf.d/default.conf +``` + +### Analysis +- `A` = Added — new file or directory created +- `C` = Changed — file or directory was modified +- `D` = Deleted — file or directory removed + +In this case, several configuration and runtime files inside `/run` and `/etc/nginx` were changed when Nginx started. + +These changes are expected because Nginx creates its PID file and updates configuration state during startup. + +### Reflection +`docker commit` is a quick way to capture the current state of a container and turn it into a reusable image. + +However, it lacks transparency and reproducibility — no clear record of what commands were run. + +Using a **Dockerfile** is more reliable for production: it provides version control, clear build steps, and easier automation in CI/CD. + +In short: + +- **docker commit** → fast, manual, non-reproducible. + +- **Dockerfile** → transparent, repeatable, professional. + +## Task 3 — Container Networking & Service Discovery + +### Output of `ping` showing successful connectivity +**Command:** `docker exec container1 ping -c 3 container2` + +**Output:** + +```bash +PING container2 (172.18.0.3): 56 data bytes +64 bytes from 172.18.0.3: seq=0 ttl=64 time=0.152 ms +64 bytes from 172.18.0.3: seq=1 ttl=64 time=0.193 ms +64 bytes from 172.18.0.3: seq=2 ttl=64 time=0.198 ms + +--- container2 ping statistics --- +3 packets transmitted, 3 packets received, 0% packet loss +round-trip min/avg/max = 0.152/0.181/0.198 ms +``` + +--- + +### Network inspection output showing both containers' IP addresses + +**Command:** `docker network inspect lab_network` +**Output:** (partially) +```bash +"Containers": { + "1d021a8cafef2beadedb13cbf56cb7c04b6a3154e78dbc196ddc7cca03249b82" : { + "Name": "container1", + "IPv4Address": "172.18.0.2/16", + "IPv6Address": "" + }, + "eea7352108c8a900df0c68d42d03fcf5bf904e1fcb8af2b5bd5527e2ef986a07" : { + "Name": "container2", + "IPv4Address": "172.18.0.3/16", + "IPv6Address": "" + } +} +``` + +--- + +### DNS resolution output +**Command:** `docker exec container1 nslookup container2` + +**Output:** +```bash +Server: 127.0.0.11 +Address: 127.0.0.11:53 + +Non-authoritative answer: + +Non-authoritative answer: +Name: container2 +Address: 172.18.0.3 + +pgsql +``` + +--- + +### Analysis: How does Docker's internal DNS enable container-to-container communication by name? +- On user-defined bridge networks, Docker injects a lightweight DNS resolver inside containers at **127.0.0.11**. +- When a container joins the network, Docker registers its **container name** (and any network aliases) to its **IP** in that network. +- Other containers on the same network query 127.0.0.11; Docker returns an **A record** mapping the name (e.g., `container2`) to its IP (e.g., `172.18.0.3`). +- Records are updated dynamically when containers attach/detach, so name-based communication stays accurate without hardcoding IPs. + +--- + +### Comparison: What advantages does user-defined bridge networks provide over the default bridge network? +- **Built-in DNS by name:** automatic name resolution (`container-name → IP`) without legacy `--link` tricks. +- **Isolation by design:** containers only see peers on the same user-defined network, reducing unintended cross-talk. +- **Configurable IPAM:** choose subnets/gateways and avoid clashes; predictable addressing for troubleshooting. +- **Aliases & multi-networking:** assign readable service aliases and connect a container to multiple networks cleanly. +- **Cleaner service discovery:** names remain stable across restarts, while IPs can change; apps don’t need IPs hardcoded. + +## Task 4 — Data Persistence with Volumes + +### Custom HTML content used +```html +

Persistent Data

+``` + +### Output of curl showing content persists after container recreation + +**Command:** `curl http://localhost` + +**Output:** + +```html +

Persistent Data

+``` + +### Volume inspection output showing mount point + +**Command:** `docker volume inspect app_data` + +**Output:** + +```json +[ + { + "CreatedAt": "2025-10-11T18:42:58Z", + "Driver": "local", + "Labels": null, + "Mountpoint": "/var/lib/docker/volumes/app_data/_data", + "Name": "app_data", + "Options": null, + "Scope": "local" + } +] +``` + +### Analysis: Why is data persistence important in containerized applications? + +Containers are disposable; data isn’t. Volumes keep state outside the container so restarts, updates, or crashes don’t wipe it. + +### Comparison: Explain the differences between volumes, bind mounts, and container storage. When would you use each? + +- **Volumes:** Docker-managed, portable, safe defaults. Use for persistent app/DB data. +- **Bind mounts:** Host folder mapped in. Use for local dev/live edits; mind host paths/permissions. +- **Container storage:** Ephemeral writable layer. Use only for temporary/cache data. \ No newline at end of file diff --git a/labs/submission7.md b/labs/submission7.md new file mode 100644 index 00000000..cd35085b --- /dev/null +++ b/labs/submission7.md @@ -0,0 +1,193 @@ +# Lab 7 — GitOps Fundamentals + +## Task 1 — Git State Reconciliation + +### Initial `desired-state.txt` and `current-state.txt` contents + +`desired-state.txt` +``` +version: 1.0 +app: myapp +replicas: 3 +``` + +`current-state.txt` +``` +version: 1.0 +app: myapp +replicas: 3 +``` + +### Output of drift detection and reconciliation + +```bash +belyak_anya@zephyrus:~/F25-DevOps-Intro/labs/lab7$ ./reconcile.sh +Sat Oct 18 06:53:50 PM MSK 2025 - ⚠️ DRIFT DETECTED! +Reconciling current state with desired state... +Sat Oct 18 06:53:50 PM MSK 2025 - ✅ Reconciliation complete +belyak_anya@zephyrus:~/F25-DevOps-Intro/labs/lab7$ diff desired-state.txt current-state.txt +belyak_anya@zephyrus:~/F25-DevOps-Intro/labs/lab7$ cat current-state.txt +version: 1.0 +app: myapp +replicas: 3 +``` + +### Output showing synchronized state after reconciliation + +```bash +Every 5.0s: ./reconcile.sh zephyrus: Sat Oct 18 19:01:02 2025 + +Sat Oct 18 07:01:02 PM MSK 2025 - ✅ States synchronized +``` + + + +### Output from continuous reconciliation loop detecting auto-healing + +```bash +Every 5.0s: ./reconcile.sh zephyrus: Sat Oct 18 19:01:12 2025 + +Sat Oct 18 07:01:12 PM MSK 2025 - ⚠️ DRIFT DETECTED! +Reconciling current state with desired state... +Sat Oct 18 07:01:12 PM MSK 2025 - ✅ Reconciliation complete + +``` + +### Analysis: Explain the GitOps reconciliation loop. How does this prevent configuration drift? + +The reconciliation loop continuously compares the *desired state* (stored in `desired-state.txt`) with the *current state* (`current-state.txt`). +When drift occurs — meaning someone manually changes the running configuration — the script detects the difference and automatically restores the desired configuration by copying it over. + +This mechanism is the core of GitOps: the system constantly aligns the live environment with the version-controlled source of truth in Git. +As a result, configuration drift cannot persist for long — any unauthorized or accidental change is automatically corrected, keeping the system consistent and predictable. + +### Reflection: What advantages does declarative configuration have over imperative commands in production? + +A declarative configuration defines **what** the final state should be, rather than **how** to reach it. +In GitOps, all changes are versioned, reviewed, and auditable in Git, enabling reproducibility and easy rollback. +The system ensures the runtime environment always matches the declared configuration automatically. + +In contrast, imperative commands require humans to execute steps manually, often leaving no trace of what was done and increasing the risk of drift or errors. +Declarative configuration makes infrastructure self-healing, maintainable, and safer for production environments. + +## Task 2 — GitOps Health Monitoring + + +### Contents of `healthcheck.sh` script + +`healthcheck.sh`: + +```bash +#!/bin/bash +# healthcheck.sh - Monitor GitOps sync health + +DESIRED_MD5=$(md5sum desired-state.txt | awk '{print $1}') +CURRENT_MD5=$(md5sum current-state.txt | awk '{print $1}') + +if [ "$DESIRED_MD5" != "$CURRENT_MD5" ]; then + echo "$(date) - ❌ CRITICAL: State mismatch detected!" | tee -a health.log + echo " Desired MD5: $DESIRED_MD5" | tee -a health.log + echo " Current MD5: $CURRENT_MD5" | tee -a health.log +else + echo "$(date) - ✅ OK: States synchronized" | tee -a health.log +fi +``` + +### Output showing "OK" status when states match + +```bash +belyak_anya@zephyrus:~/F25-DevOps-Intro/labs/lab7$ ./healthcheck.sh +Sat Oct 18 07:45:19 PM MSK 2025 - ✅ OK: States synchronized +belyak_anya@zephyrus:~/F25-DevOps-Intro/labs/lab7$ cat health.log +Sat Oct 18 07:45:19 PM MSK 2025 - ✅ OK: States synchronized +``` + +### Output showing "CRITICAL" status when drift is detected + +```bash +belyak_anya@zephyrus:~/F25-DevOps-Intro/labs/lab7$ echo "unapproved-change: true" >> current-state.txt +belyak_anya@zephyrus:~/F25-DevOps-Intro/labs/lab7$ ./healthcheck.sh +Sat Oct 18 07:45:54 PM MSK 2025 - ❌ CRITICAL: State mismatch detected! + Desired MD5: a15a1a4f965ecd8f9e23a33a6b543155 + Current MD5: 48168ff3ab5ffc0214e81c7e2ee356f5 +``` + +### Complete `health.log` file showing multiple checks + +```bash +belyak_anya@zephyrus:~/F25-DevOps-Intro/labs/lab7$ cat health.log +Sat Oct 18 07:45:19 PM MSK 2025 - ✅ OK: States synchronized +Sat Oct 18 07:45:54 PM MSK 2025 - ❌ CRITICAL: State mismatch detected! + Desired MD5: a15a1a4f965ecd8f9e23a33a6b543155 + Current MD5: 48168ff3ab5ffc0214e81c7e2ee356f5 +Sat Oct 18 07:46:12 PM MSK 2025 - ✅ OK: States synchronized +Sat Oct 18 07:47:03 PM MSK 2025 - ✅ OK: States synchronized +Sat Oct 18 07:47:06 PM MSK 2025 - ✅ OK: States synchronized +Sat Oct 18 07:47:09 PM MSK 2025 - ✅ OK: States synchronized +Sat Oct 18 07:47:12 PM MSK 2025 - ✅ OK: States synchronized +Sat Oct 18 07:47:15 PM MSK 2025 - ✅ OK: States synchronized +Sat Oct 18 07:47:18 PM MSK 2025 - ✅ OK: States synchronized +Sat Oct 18 07:47:21 PM MSK 2025 - ✅ OK: States synchronized +Sat Oct 18 07:47:24 PM MSK 2025 - ❌ CRITICAL: State mismatch detected! + Desired MD5: a15a1a4f965ecd8f9e23a33a6b543155 + Current MD5: 86c1e4f2cba0e303f72049ccbb3141bf +Sat Oct 18 07:47:27 PM MSK 2025 - ✅ OK: States synchronized +Sat Oct 18 07:47:30 PM MSK 2025 - ✅ OK: States synchronized +``` + +### Output from `monitor.sh` showing continuous monitoring + +```bash +belyak_anya@zephyrus:~/F25-DevOps-Intro/labs/lab7$ ./monitor.sh +Starting GitOps monitoring... +\n--- Check #1 --- +Sat Oct 18 07:47:03 PM MSK 2025 - ✅ OK: States synchronized +Sat Oct 18 07:47:03 PM MSK 2025 - ✅ States synchronized +\n--- Check #2 --- +Sat Oct 18 07:47:06 PM MSK 2025 - ✅ OK: States synchronized +Sat Oct 18 07:47:06 PM MSK 2025 - ✅ States synchronized +\n--- Check #3 --- +Sat Oct 18 07:47:09 PM MSK 2025 - ✅ OK: States synchronized +Sat Oct 18 07:47:09 PM MSK 2025 - ✅ States synchronized +\n--- Check #4 --- +Sat Oct 18 07:47:12 PM MSK 2025 - ✅ OK: States synchronized +Sat Oct 18 07:47:12 PM MSK 2025 - ✅ States synchronized +\n--- Check #5 --- +Sat Oct 18 07:47:15 PM MSK 2025 - ✅ OK: States synchronized +Sat Oct 18 07:47:15 PM MSK 2025 - ✅ States synchronized +\n--- Check #6 --- +Sat Oct 18 07:47:18 PM MSK 2025 - ✅ OK: States synchronized +Sat Oct 18 07:47:18 PM MSK 2025 - ✅ States synchronized +\n--- Check #7 --- +Sat Oct 18 07:47:21 PM MSK 2025 - ✅ OK: States synchronized +Sat Oct 18 07:47:21 PM MSK 2025 - ✅ States synchronized +\n--- Check #8 --- +Sat Oct 18 07:47:24 PM MSK 2025 - ❌ CRITICAL: State mismatch detected! + Desired MD5: a15a1a4f965ecd8f9e23a33a6b543155 + Current MD5: 86c1e4f2cba0e303f72049ccbb3141bf +Sat Oct 18 07:47:24 PM MSK 2025 - ⚠️ DRIFT DETECTED! +Reconciling current state with desired state... +Sat Oct 18 07:47:24 PM MSK 2025 - ✅ Reconciliation complete +\n--- Check #9 --- +Sat Oct 18 07:47:27 PM MSK 2025 - ✅ OK: States synchronized +Sat Oct 18 07:47:27 PM MSK 2025 - ✅ States synchronized +\n--- Check #10 --- +Sat Oct 18 07:47:30 PM MSK 2025 - ✅ OK: States synchronized +Sat Oct 18 07:47:30 PM MSK 2025 - ✅ States synchronized +``` + +### Analysis: How do checksums (MD5) help detect configuration changes? + +MD5 checksums allow instant detection of any modification in configuration files. +Even a single character change produces a completely different hash value. +By comparing the MD5 of the desired state and the current state, the script can quickly determine whether drift has occurred without manually checking file contents. +This makes the health check lightweight, reliable, and automation-friendly. + +### Comparison: How does this relate to GitOps tools like ArgoCD's "Sync Status"? + +ArgoCD uses a similar concept called **Sync Status** to monitor whether the live cluster configuration matches the desired state stored in Git: + +- **Synced** — The live state and the desired state are identical (same as “✅ OK” in this lab). +- **OutOfSync** — A difference is detected between Git and the live cluster (same as “❌ CRITICAL”). + +Our healthcheck and monitor scripts reproduce this behavior in a simplified shell-based simulation, continuously verifying and restoring configuration consistency. \ No newline at end of file diff --git a/labs/submission8.md b/labs/submission8.md new file mode 100644 index 00000000..a183ff10 --- /dev/null +++ b/labs/submission8.md @@ -0,0 +1,269 @@ +# Lab 8 — Site Reliability Engineering (SRE) + +## Task 1 — Key Metrics for SRE and System Analysis + +### 1.1 Monitoring System Resources + +#### 1. CPU and Memory Monitoring (htop) + +**Command:** `htop` + +**Screenshot:** + +![htop cpu screenshot](https://github.com/user-attachments/assets/d31317ac-2010-4899-bca0-d47d62c886e2) + +**Top 3 CPU-consuming processes:** +1. `/usr/bin/gnome-shell` — 10.2% +2. `/usr/lib/xorg/Xorg` — 5.1% +3. `jetbrains-toolbox` — 5.1% + +![htop memory screenshot](https://github.com/user-attachments/assets/be7451dd-6ef2-4b08-80a8-42bd53098341) + +**Top 3 memory-consuming processes:** +1. `firefox/...` — 3.7% +2. `firefox/...` — 3.7% +3. `firefox/...` — 3.7% + +**Analysis:** + +CPU load is moderate, mostly coming from the graphical environment (gnome-shell, Xorg) and background apps like JetBrains Toolbox. +Memory usage is dominated by several Firefox processes, each handling separate browser components such as tabs or rendering. +All active processes operate within normal limits, and the system remains stable and responsive. + +#### 2. Disk I/O Monitoring (iostat -x 1 5) + +**Command:** `iostat -x 1 5` + +**Excerpt from output:** + +```bash +avg-cpu: %user %nice %system %iowait %steal %idle + 2.58 0.03 1.25 0.03 0.00 96.12 + +Device r/s rkB/s rrqm/s %rrqm r_await rareq-sz w/s wkB/s wrqm/s %wrqm w_await wareq-sz d/s dkB/s drqm/s %drqm d_await dareq-sz f/s f_await aqu-sz %util +nvme0n1 19.70 982.49 1.58 7.42 0.22 49.88 14.90 566.25 15.52 51.02 1.15 38.02 3.84 104273.12 0.00 0.00 0.29 27142.48 1.17 0.27 0.02 0.48 + +``` + +**Top 3 devices by I/O utilization:** +1. `nvme0n1` — %util ≈ 0.48 +2. `loop10` — %util ≈ 0.01 +3. `loop6` — %util ≈ 0.01 + +**Analysis:** +Disk I/O load is minimal; the main storage device `nvme0n1` shows less than 1 % utilization. +Most loop devices are mounted snap packages with negligible activity. +No I/O bottlenecks were detected. + +### 1.2 Disk Space Management + +#### 1. Disk Usage Overview + +**Command:** `df -h` + +**Output:** + +``` +Filesystem Size Used Avail Use% Mounted on +tmpfs 3.1G 2.6M 3.1G 1% /run +/dev/nvme0n1p7 288G 28G 246G 10% / +tmpfs 16G 6.1M 16G 1% /dev/shm +tmpfs 5.0M 16K 5.0M 1% /run/lock +efivarfs 128K 59K 65K 48% /sys/firmware/efi/efivars +/dev/nvme0n1p1 256M 41M 216M 16% /boot/efi +tmpfs 3.1G 168K 3.1G 1% /run/user/1000 +``` + +**Interpretation:** +Root partition (`/`) is only 10 % used; available space is more than sufficient. +No immediate storage issues. + +#### 2. Largest Directories in /var + +**Command:** `du -h /var | sort -rh | head -n 10` + +**Output:** + +```bash +3.7G /var +2.8G /var/lib +2.5G /var/lib/snapd +1.3G /var/lib/snapd/snaps +1.2G /var/lib/snapd/seed/snaps +1.2G /var/lib/snapd/seed +621M /var/cache +573M /var/cache/apt +460M /var/cache/apt/archives +301M /var/log +``` + +**Analysis:** +Most disk usage comes from snap packages stored in `/var/lib/snapd`. +Caches (`/var/cache/apt`) and logs (`/var/log`) occupy moderate space and can be cleaned safely. + +3. **Largest Files in /var** + +**Command:** `sudo find /var -type f -exec du -h {} + | sort -rh | head -n 3` + +**Output:** + +```bash +517M /var/lib/snapd/snaps/gnome-42-2204_226.snap +517M /var/lib/snapd/seed/snaps/gnome-42-2204_202.snap +517M /var/lib/snapd/cache/c3c38b9039608c596b7174b23d37e6cd1bbd7b13dae28ec1a17a31df34bb5598a7f9f69c4171304c7abac9a73e9d2357 +``` + +**Interpretation:** +The three largest files are snap images, each ≈ 517 MB. These belong to GNOME snap packages. + +### Findings & Reflection + +**Observed patterns:** + +- CPU and memory usage are low — the system is mostly idle. +- Disk I/O activity is negligible, confirming that the system has no performance bottlenecks. +- The /var/lib/snapd directory dominates disk usage due to stored snap packages. + +**Optimization recommendations:** + +1. Remove old or unused snap package revisions: +```bash +sudo snap list --all | grep disabled +sudo snap remove --revision= +``` +2. Clean package cache and old logs: +```bash +sudo apt clean +sudo journalctl --vacuum-size=100M +``` +3. Continue periodic monitoring with htop and iostat to detect anomalies early. + + +## Task 2 — Practical Website Monitoring Setup + +### 2.1 Target Website + +Website: https://innopolis.university/ + +### 2.2 API Check – Basic Availability + +- **Method:** GET +- **Assertion:** Status code equals 200 +- **Interval:** Every 2 minutes +- **Locations:** Frankfurt 🇩🇪 + +**Result:** ✅ Status 200, response time ~80 ms + +![API Check result](https://github.com/user-attachments/assets/727cc68e-6a48-403c-8db4-d8c0e01f9f39) + +![API timing](https://github.com/user-attachments/assets/2b0145ce-d460-4d9e-8d70-cf7bbf3e1c08) + +![API Check Scheduling](https://github.com/user-attachments/assets/4672f286-f051-4d4a-9392-da214cb8b858) + +### 2.3 Browser Check — Content & Interactions + +**Goal** + +Verify the “Информирование о приеме на обучение 2025” link in the Russian version of the Innopolis University site, ensuring that it correctly opens the admissions page in a new tab. + +**Playwright (Checkly) script** + +```js +const { expect, test } = require('@playwright/test') + +test.setTimeout(210000) +test.use({ actionTimeout: 10000 }) + +test('Check Innopolis University apply link', async ({ page, context }) => { + // 1. Visit homepage + const response = await page.goto('https://innopolis.university/') + expect(response.status(), 'Homepage should respond with a valid status code').toBeLessThan(400) + + // 2. Verify page title + await expect(page).toHaveTitle(/Университет Иннополис/i) + + // 3. Locate the “Информирование о приеме...” link + const applyLink = page.locator('text=Информирование о приеме на обучение 2025').first() + await expect(applyLink).toBeVisible({ timeout: 10000 }) + + // 4. Click the link and wait for new tab + const newPagePromise = context.waitForEvent('page', { timeout: 20000 }) + await applyLink.click() + const newPage = await newPagePromise + + // 5. Wait for the new tab to load + await newPage.waitForLoadState('domcontentloaded') + + // 6. Verify that new page has expected URL + await expect(newPage).toHaveURL(/apply/i) + + // 7. Screenshot the result + await newPage.screenshot({ path: 'innopolis_apply_page.png', fullPage: true }) +}) +``` + +**What this check validates** + +- The target text (“Информирование о приеме на обучение 2025”) exists and is visible. +- Clicking it successfully opens the admissions page in a new browser tab. +- The new page loads fully and contains /apply in the URL. +- The test records a full-page screenshot upon success. + +**Browser Check Results** + +- **Availability:** 100% +- **Median load (P50):** 10.85 s +- **P95:** 11.78 s +- **Errors:** 0 +- **Location:** Frankfurt 🇩🇪 + +![Browser Check](https://github.com/user-attachments/assets/36b55467-5bb0-46ff-a9d7-067734f5f213) + +**Analysis** + +The browser check simulates a real user path. It verifies that navigation works correctly and that content loads within acceptable latency limits. + +#### Screenshot — Dashboard + +![Dashboard](https://github.com/user-attachments/assets/09b013bb-a47e-49e7-825a-a338653e7a7b) + + +### 2.4 Alerts — Configuration and Reasoning + +Notification Channel — Email + +**Rules** + +- Trigger alert if a check is failing for more than 5 minutes +- Send 2 reminders, with 10-minute interval +- SSL expiration warning at 30 days + +**Subscribers** +- Browser Check — IU +- https://innopolis.university/ + +**Screenshots** + +![Email alert configuration](https://github.com/user-attachments/assets/9e24a131-d802-4955-9083-10ce0845b209) + +![Global alert rules](https://github.com/user-attachments/assets/a348fbf2-8976-4d25-88f0-961c80cdb3d8) + +**Rationale** +- A 5-minute window avoids false positives caused by minor network delays. +- Two reminders ensure persistent visibility for real downtime. +- SSL expiry notifications prevent loss of trust and service interruptions. + +### Analysis & Reflection + +**Why these checks were chosen** + +- API check monitors uptime and latency. +- Browser check validates interactive functionality and navigation, ensuring that the user journey remains intact. +- Alerts notify early without creating alert fatigue. + +**How this improves reliability** + +- Multi-region monitoring detects outages globally. +- Real browser runs catch front-end and content issues invisible to ping/API checks. +- Timely alerts allow proactive fixes and SLA compliance. +- Historical metrics (P50/P95) help detect gradual degradation before full failure. \ No newline at end of file diff --git a/labs/submission9.md b/labs/submission9.md new file mode 100644 index 00000000..b56aad97 --- /dev/null +++ b/labs/submission9.md @@ -0,0 +1,191 @@ +# Lab 9 — Introduction to DevSecOps Tools + +## Task 1 — Web Application Scanning with OWASP ZAP + +### Number of Medium risk vulnerabilities found + +The automated OWASP ZAP baseline scan was executed against Juice Shop running at http://172.17.0.1:3000. +The ZAP baseline log shows **7 WARN-NEW alerts** in total, corresponding to multiple instances of these **two Medium-risk vulnerability types**. + +### Medium-Risk Vulnerabilities + +1. **Content Security Policy (CSP) Header Not Set [10038]** + +- Risk: Medium (11 instances) +- Description: The application does not send a Content-Security-Policy header in HTTP responses. +Without this header, modern browsers cannot restrict which external scripts, images, or frames can be loaded. +- Impact: Increases exposure to Cross-Site Scripting (XSS) and data-injection attacks. +- Recommendation: Define and enable a strict CSP (e.g. default-src 'self') to limit allowed content sources. + +2. **Cross-Domain Misconfiguration [10098]** + +- Risk: Medium (11 instances) +- Description: The application allows or references resources from multiple domains without proper cross-origin controls. +- Impact: Potential leakage of sensitive information through insecure CORS or inclusion of malicious third-party scripts. +- Recommendation: Review CORS and Access-Control-Allow-Origin settings; restrict allowed origins to trusted domains only. + +### Security Headers Status +| Header | Status | Description / Importance | +| ------------------------------- | ------------------------- | ------------------------------------------------------------------------------------ | +| **Content-Security-Policy** | ❌ Missing | Defines trusted sources for scripts and other resources to mitigate XSS. | +| **Strict-Transport-Security** | ❌ Missing | Forces HTTPS for future requests; prevents protocol downgrade attacks. | +| **Referrer-Policy** | ❌ Missing | Reduces leakage of internal URLs in the `Referer` header. | +| **Access-Control-Allow-Origin** | ✅ Present (`*`) | Allows all origins; convenient for dev but insecure for production. | +| **X-Content-Type-Options** | ✅ Present (`nosniff`) | Prevents browsers from MIME-sniffing content types. | +| **X-Frame-Options** | ✅ Present (`SAMEORIGIN`) | Protects against clickjacking attacks. | +| **Feature-Policy** | ⚠️ Present but deprecated | Controls browser features (camera, mic, payments); replaced by `Permissions-Policy`. | + +**Summary:** + +Some security headers (`X-Content-Type-Options`, `X-Frame-Options`) are correctly configured, but essential ones like **CSP**, **HSTS**, and **Referrer-Policy** are missing. +The `Access-Control-Allow-Origin: *` header allows any domain, which is unsafe in production because it opens cross-origin access. +Overall, the application lacks strong client-side security controls typical of modern hardened web servers. + +### 2 Most Interesting Vulnerabilities Found + +1. **Content Security Policy (CSP) Header Not Set [10038]** + +This issue is critical for client-side protection. The absence of a Content-Security-Policy header means the browser has no restrictions on which scripts, images, or frames can load. +If a malicious script is injected anywhere into the page, the browser will execute it without question. +A strict CSP (for example, `default-src 'self'; script-src 'self'`) drastically reduces the risk of XSS and data-injection attacks. + +2. **Dangerous JavaScript Functions [10110]** + +ZAP detected the presence of potentially unsafe functions such as `eval()` and `document.write()` inside `main.js` and `vendor.js`. +These functions are often exploited when user input is passed directly to them, allowing arbitrary JavaScript execution in the browser. +They should be replaced with safer alternatives or sandboxed carefully to avoid client-side code injection. + +### Screenshot + +![ZAP HTML report overview](https://github.com/user-attachments/assets/38a6e859-6031-4fd2-a349-4264be220d5b) + +### Analysis + +Web applications most frequently suffer from **misconfigured or missing security headers, overly permissive CORS policies, and unsafe client-side JavaScript practices.** +These weaknesses rarely break functionality, which is why developers often overlook them, but they form the base layer for common exploits like XSS, clickjacking, and information disclosure. +Regular automated scans with tools such as OWASP ZAP help identify these issues early in the development pipeline so they can be fixed before deployment. + +## Task 2 — Container Vulnerability Scanning with Trivy + +### Scan Summary + +A Trivy image scan was executed against the `bkimminich/juice-shop` image and the results were saved to `trivy.json`. The JSON was analyzed with `jq` to extract counts and top findings. + +**Key Findings** +- Total CRITICAL vulnerabilities: `8` +- Total HIGH vulnerabilities: `23` + +Commands used to obtain these counts: + +```bash +CRIT=$(jq '[.. | objects | select(has("Vulnerabilities")) | .Vulnerabilities[]? | select(.Severity=="CRITICAL")] | length' trivy.json) +HIGH=$(jq '[.. | objects | select(has("Vulnerabilities")) | .Vulnerabilities[]? | select(.Severity=="HIGH")] | length' trivy.json) +echo "CRITICAL: $CRIT" +echo "HIGH: $HIGH" + +``` + +### Example vulnerable packages (with CVE IDs) + +Below are specific vulnerable packages and CVE IDs extracted from `trivy.json` (unique list command shown in logs): +- `braces` — **CVE-2024-4068** +- `crypto-js` — **CVE-2023-46233** + +Additional notable package: + +- `jsonwebtoken` — appears **6** times in the high/critical findings. Associated identifiers in the scan include **CVE-2015-9235**, **CVE-2022-23539**, and `NSWG-ECO-17` (non-CVE advisory noted in the output). + +Commands used to list packages + CVEs + +```bash +jq -r ' + .. | objects | select(has("Vulnerabilities")) | .Vulnerabilities[]? + | select(.Severity=="HIGH" or .Severity=="CRITICAL") + | "\(.PkgName)\t\(.VulnerabilityID)" +' trivy.json | sort -u | head -n 10 +``` + +And the top vulnerable packages by count: + +```bash +jq -r ' + .. | objects | select(has("Vulnerabilities")) | .Vulnerabilities[]? + | select(.Severity=="HIGH" or .Severity=="CRITICAL") + | .PkgName +' trivy.json | sort | uniq -c | sort -nr | head +``` + +Output snippet: + +```bash +6 jsonwebtoken +4 multer +3 vm2 +3 lodash +... +``` + +### Most common vulnerability type (CVE category / attacker impact) + +From the `.Title` fields in the scan, the most recurring class of issues is **token verification / authentication bypasses** related to `jsonwebtoken` (titles like “Verification Bypass”, “nodejs-jsonwebtoken: verification step bypass with an altered token”, etc.). This indicates multiple high-impact issues in authentication/verification logic or in libraries used for token handling. + +How this was derived: + +```bash +jq -r ' + .. | objects | select(has("Vulnerabilities")) | .Vulnerabilities[]? + | select(.Severity=="HIGH" or .Severity=="CRITICAL") + | .Title +' trivy.json | sed 's/ (.*//g' | sort | uniq -c | sort -nr | head +``` + +Top lines in output: + +```bash +2 Verification Bypass +2 nodejs-jsonwebtoken: verification step bypass with an altered token +2 jsonwebtoken: Unrestricted key type could lead to legacy keys usagen +... +``` + +### Screenshots + +![Command run](https://github.com/user-attachments/assets/2d250f71-67de-4552-9c5d-226375934502) + +![bkimminich/juice-shop](https://github.com/user-attachments/assets/3d19c338-bad5-48d2-bdbd-e5af3320088f) + +![Node.js (node-pkg)](https://github.com/user-attachments/assets/2862c2f3-001d-4e16-81e1-f6bd3821b099) + +### Analysis — Why container image scanning is important before production + +1. **Find issues early, reduce blast radius.** Images frequently contain transitive dependencies (OS libs, language packages). Scanning detects known CVEs before they reach production; fixing them in the build phase is far cheaper and safer than emergency patches in production. + +2. **Attack surface in supply chain.** A vulnerable library (e.g., token handling or sandbox escape) inside an image can lead to severe breaches: auth bypass, remote code execution, DoS. Scanning helps catch high-impact items like these. + +3. **Compliance and auditability.** Automated scans produce artifacts and records that security teams and auditors can review. + +4. **Risk-based prioritization.** Severity counts (CRITICAL / HIGH) let teams triage fixes effectively and block deployments when necessary. + +### Reflection — How to integrate Trivy scans into a CI/CD pipeline + +Practical, concrete steps to automate and enforce image security: + +1. **Scan during the Build Stage** + - Add a pipeline step after the image is built but before pushing to registry: + - `trivy image --format json -o trivy.json ` + - Save `trivy.json` as a pipeline artifact for reviewers. +2. **Fail on policy** + - Enforce a policy that fails the pipeline when CRITICAL vulnerabilities are present, and optionally when HIGH count > threshold. + - Example: fail if `CRITICAL > 0` OR `HIGH > 10` (tune thresholds to org risk appetite). +3. **Produce human-friendly reports** + - Convert Trivy JSON to HTML or table output and upload as build artifacts (so reviewers and security team can inspect easily). +4. **Annotate PRs / MR** + - Integrate the scanner with the VCS to post summary comments on Pull Requests (e.g., “3 CRITICAL, 7 HIGH — see artifact: trivy-report.html”). This gives developers immediate feedback. +5. **Use baselines and ignore lists carefully** + - Maintain a vulnerability baseline for legacy images and an allowlist for accepted risks (with ticketing and expiration). Avoid permanent silencing. +6. **Shift-left + automated PR remediation** + - Optionally, configure automated dependency bump PRs for fixable vulnerabilities (dependabot-like), and block merges until severity drops below policy. +7. **Periodic registry scanning** + - Schedule daily/weekly scans of images in the registry (not just built images) to catch newly published CVE data affecting previously clean images. +8. **Alerting & Escalation** + - Critical findings should trigger notifications to on-call/security team and open remediation tickets automatically. \ No newline at end of file