diff --git a/.github/workflows/actions.yml b/.github/workflows/actions.yml index 13ae1b5..a2ea052 100644 --- a/.github/workflows/actions.yml +++ b/.github/workflows/actions.yml @@ -28,40 +28,40 @@ jobs: run: pip install pylint bandit - name: Code Errors Lint - run: pylint --errors-only demo.py - - - name: Code Linting report - id: lint - # Awk expression - replace newlines with chars in ORS. - # Or true so we get the report, but not gating. - run: | - linting_output="$(pylint demo.py 2>&1 | awk 1 ORS='\\n')" - echo -e "${linting_output}" - echo "::set-output name=LINTING::${linting_output//$'\\n'/'%0A'}" - - - name: Security linting report - id: security_lint - run: | - linting_output="$(bandit demo.py 2>&1 | awk 1 ORS='\\n')" - echo -e "${linting_output}" - echo "::set-output name=BANDIT::${linting_output//$'\\n'/'%0A'}" - - - name: Attach this as a comment - uses: mshick/add-pr-comment@v1 - with: - message: | - ## Linting report - - ``` - ${{ steps.lint.outputs.LINTING }} - ``` - - ## Security Report - - ``` - ${{ steps.security_lint.outputs.BANDIT }} - ``` - repo-token: ${{ secrets.GITHUB_TOKEN }} + run: pylint --errors-only app.py + + # - name: Code Linting report + # id: lint + # # Awk expression - replace newlines with chars in ORS. + # # Or true so we get the report, but not gating. + # run: | + # linting_output="$(pylint app.py 2>&1 | awk 1 ORS='\\n')" + # echo -e "${linting_output}" + # echo "::set-output name=LINTING::${linting_output//$'\\n'/'%0A'}" + # + # - name: Security linting report + # id: security_lint + # run: | + # linting_output="$(bandit app.py 2>&1 | awk 1 ORS='\\n')" + # echo -e "${linting_output}" + # echo "::set-output name=BANDIT::${linting_output//$'\\n'/'%0A'}" + # + # - name: Attach this as a comment + # uses: mshick/add-pr-comment@v1 + # with: + # message: | + # ## Linting report + # + # ``` + # ${{ steps.lint.outputs.LINTING }} + # ``` + # + # ## Security Report + # + # ``` + # ${{ steps.security_lint.outputs.BANDIT }} + # ``` + # repo-token: ${{ secrets.GITHUB_TOKEN }} build: needs: lint runs-on: ubuntu-latest diff --git a/Dockerfile b/Dockerfile index 666060b..0caaea3 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,9 +1,8 @@ # syntax=docker/dockerfile:1 -FROM python:3.8-slim-buster +FROM python:3.8-buster ENV PYTHONUNBUFFERED=1 ENV OBSDEMO_OTLP_ENDPOINT="NONE" ENV OBSDEMO_APP_SECRET="NONE" -ENV OTEL_PROPAGATORS=xray RUN apt update && apt upgrade -y WORKDIR /opt/app COPY requirements.txt /opt/app/ @@ -12,4 +11,4 @@ COPY . /opt/app EXPOSE 5000 -ENTRYPOINT ["/opt/app/demo.py"] +ENTRYPOINT ["python", "/opt/app/app.py"] diff --git a/GrafanaLokiLogs.png b/GrafanaLokiLogs.png new file mode 100644 index 0000000..4c0d205 Binary files /dev/null and b/GrafanaLokiLogs.png differ diff --git a/GrafanaLokiTempo.png b/GrafanaLokiTempo.png new file mode 100644 index 0000000..777e0de Binary files /dev/null and b/GrafanaLokiTempo.png differ diff --git a/GrafanaTempoMap.png b/GrafanaTempoMap.png new file mode 100644 index 0000000..00ba9e1 Binary files /dev/null and b/GrafanaTempoMap.png differ diff --git a/README.md b/README.md index fe8dfdc..7d4a2c4 100644 --- a/README.md +++ b/README.md @@ -1,23 +1,36 @@ # observability-demo-app -A basic Python Flask application that randomly returns an HTTP error for 25% of queries. +A Python application to show the benefits of the [OpenTelemetry](https://www.opentelemetry.io) project. -By default, this works in AWS only and is configured to use X-Ray and Prometheus. +## Deployment -Prometheus should scrape the `/metrics` endpoint of this application +### Docker -## Getting up and running +You can setup this container by editing `docker.env` to the appropriate values and running the following command: + +```bash +docker run --env-file docker.env -P 5000:5000 ghcr.io/contino/observability-demo-app:latest +``` + +Logs will be sent to STDOUT by default, so you'll want to capture these somehow and send them on to your logging engine such as [Loki](https://grafana.com/oss/loki/), [Vector](https://vector.dev/), [Elasticsearch](https://www.elastic.co), or [FluentD](https://www.fluentd.org). + +Traces will be sent to the OTLP endpoint specified in the environment file, and you can see the trace ID in the logs. + +### Running locally/without Docker + +If you don't want to use Docker, then you can follow these steps to get up and running 1. Create a virtualenv 2. Activate the virtualenv 3. Install the requirements: `pip install -r requirements.txt` 4. Set the `OBSDEMO_OTLP_ENDPOINT` environment variable to point to your OTLP collector 5. Set the `OBSDEMO_APP_SECRET` environment variable to the value you want to use for your app secret key -6. Assuming you're running in AWS, set `OTEL_PROPAGATORS=xray` to send traces to XRay 7. Alternatively, copy `.env.sample` to `.env`, update the values, and source it to setup the environment -8. Run the app `python demo.py` +8. Run the app `python app.py` + +## Accessing the App -The app is now available at [http://localhost:5000/](http://localhost:5000/), with metrics at [http://localhost:5000/metrics](http://localhost:5000/metrics). +Once you have run the deployment steps, the app will be available available at [http://localhost:5000/](http://localhost:5000/), with metrics at [http://localhost:5000/metrics](http://localhost:5000/metrics). Point Prometheus at the `/metrics` endpoint, and then launch something like [apache bench](https://httpd.apache.org/docs/2.4/programs/ab.html) against the root: @@ -27,14 +40,61 @@ ab -n 5000000 -c 5 http://localhost:5000/ ## Dashboards -Once the Prometheus data is flowing, you can hook Grafana up and use [this dashboard](https://grafana.com/grafana/dashboards/9688) to view the data. +Once the Prometheus data is flowing, you can hook Grafana up and use [this dashboard](https://grafana.com/grafana/dashboards/9688) to view the data about the flask application. ![The Grafana Dashboard](dashboard.png) -## AWS X-Ray +## Tracing Data + +Whilst the traces aren't anything particularly special, they will demonstrate the power of OpenTelemetry's tracing engine. + +### In Grafana + +Assuming that you have your log entries Loki and traces in Tempo, you should be able to [set up a derived field](https://grafana.com/docs/grafana/latest/datasources/loki/#derived-fields) as follows: + +![Derived Field Configuration](derivedField.png) + +**Note**: Because we are using an "internal" data source, we can leave the `query` field set to `${__value.raw}`, there is no need for an additional URL. + +Once this is set up, go to the `Explore` section and look at the logs for your app. You should see that the records which contain a trace_id field now have a link to tempo: + +![Loki Logs showing the trace ID and a link to Tempo](GrafanaLokiLogs.png) -Whilst the traces aren't anything particularly special, they will prove whether your X-Ray setup is working. +If you click on the "Tempo" button, a new split window will open on the right of your screen showing you the traces + +![Split screen in Grafana showing Loki logs and Tempo Trace Data](GrafanaLokiTempo.png) + +Finally, if you close the log data split and then click on the `Node Graph (beta)` button, you should see a map of the application calling out to external URLs and running various queries against an in-memory SQLite Database: + +![The Tempo Node Graph Panel](GrafanaTempoMap.png) + +Unfortunately it is not possible at present to include this map into a dashboard due to [Grafana/43201](https://github.com/grafana/grafana/issues/43201), so if you want to view a map then you need to view the data in `Explore` mode. We're hoping this can be fixed in due course! + + + +### In AWS XRAY Make sure that [AWS OpenTelemetry Collector](https://aws-otel.github.io/docs/getting-started/collector) is running somewhere, then update the `OBSDEMO_OTEL_ENDPOINT` to point to that location. As long as your IAM policies are correct, your traces should start to show up in X-Ray ![The AWS X-Ray Dashboard](xray.png) + +### Grafana Tempo + +Ensure that the [OTLP Collector Distributor Configuration](https://grafana.com/docs/tempo/latest/configuration/#distributor) includes at least the following lines: + +```yaml +# Distributor config block +distributor: + + # receiver configuration for different protocols + # config is passed down to opentelemetry receivers + # for a production deployment you should only enable the receivers you need! + receivers: + otlp: + protocols: + http: +``` + +Update the `OBSDEMO_OTEL_ENDPOINT` environment variable to point to your Tempo URL. Make sure you add `v1/traces` to the end of the URL otherwise it won't work! + +Once you start the App, you should be able to take a trace ID from the logs and use this in Grafana to view the trace. diff --git a/app.py b/app.py new file mode 100755 index 0000000..d3bd11c --- /dev/null +++ b/app.py @@ -0,0 +1,132 @@ +#/usr/bin/env python +""" Observability Demo App """ +import logging +from logging.config import dictConfig +import os +import time +import sqlite3 +import random + + +import requests + +from flask import Flask, abort, session, g, request +from prometheus_flask_exporter import PrometheusMetrics +# Add imports for OTel components into the application + +from opentelemetry.instrumentation.flask import FlaskInstrumentor +from opentelemetry.instrumentation.requests import RequestsInstrumentor +from opentelemetry.instrumentation.sqlite3 import SQLite3Instrumentor +from opentelemetry import trace +from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter +from opentelemetry.sdk.resources import Resource +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import BatchSpanProcessor +from opentelemetry.instrumentation.logging import LoggingInstrumentor + +# Setup a placeholder for the request start time +REQUEST_START_TIME = "requestStartTime" +# Sends generated traces in the OTLP format to an ADOT Collector running on port 55678 +otlp_endpoint = os.getenv("OBSDEMO_OTLP_ENDPOINT") or "localhost:4317" +# Resource can be required for some backends, e.g. Jaeger +# If resource wouldn't be set - traces wouldn't appears in Jaeger +resource = Resource(attributes={ + "service.name": "observability-demo" +}) + +otlp_exporter = OTLPSpanExporter(endpoint=otlp_endpoint) +trace.set_tracer_provider(TracerProvider(resource=resource)) +span_processor = BatchSpanProcessor( + OTLPSpanExporter( + endpoint=otlp_endpoint + ) + ) + +trace.get_tracer_provider().add_span_processor(span_processor) + + + +LOG_FORMAT = '%(asctime)s %(levelname)s [%(name)s] [%(filename)s:%(lineno)d] [traceID=%(otelTraceID)s spanID=%(otelSpanID)s resource.service.name=%(otelServiceName)s] - %(message)s' # pylint: disable=line-too-long +LoggingInstrumentor().instrument( + set_logging_format=True, + log_level=logging.DEBUG, + log_format=LOG_FORMAT + ) + +app = Flask(__name__) +app.secret_key=os.getenv("OBSDEMO_APP_SECRET") +logger = logging.getLogger(__name__) +logger.info(f"OTLP Configured and pointing to {os.getenv('OBSDEMO_OTLP_ENDPOINT')}") + +@app.before_request +def before_request_func(): + """ Set the session start time before each request """ + session[REQUEST_START_TIME] = int(time.time() * 1_000) + +metrics = PrometheusMetrics(app) +# Initialize instumentor for Flask web framework +FlaskInstrumentor().instrument_app(app) +RequestsInstrumentor().instrument() +SQLite3Instrumentor().instrument() + +def get_db(): + db = getattr(g, '_database', None) + if db is None: + db = g._database = sqlite3.connect("file::memory:?cache=shared") # pylint: disable=E0237 + cur = db.cursor() + cur.execute("CREATE TABLE obsdemo (timestamp integer, randomint integer)") + return db + +@app.teardown_appcontext +def close_connect(exception): + db = getattr(g, '_database', None) + if db is not None: + db.close() + +# static information as metric +metrics.info('app_info', 'Application info', version='1.0.3') + +tracer = trace.get_tracer(__name__) + +@app.route('/') +def main(): + """ The primary route of the application for the index page """ + with tracer.start_as_current_span("index_request"): + chosen_return_code = random.randrange(0,4) + return_codes = [404, 401, 500, 502, 301] + if random.randrange(0, 100) > 95: + logger.info("Random error code selected") + abort(return_codes[chosen_return_code]) + else: + logger.info("Returning valid response") + extreq = requests.get('http://127.0.0.1:5000/external_call') + intreq = requests.get('http://127.0.0.1:5000/internal_call') + text = "You should be seeing some spans now" + + return text + +@app.route('/internal_call') +def internal_call(): + """ The primary route of the application for the index page """ + with tracer.start_as_current_span("index_request"): + logger.info("Internal Code called") + cur = get_db().cursor() + cur.execute("SELECT * FROM obsdemo") + cur.close() + + return "Successfully called internal code" + +@app.route('/external_call') +def external_call(): + """ The primary route of the application for the index page """ + with tracer.start_as_current_span("index_request"): + logger.info("External Code called") + ext_site = requests.get("https://www.google.com/") + cur = get_db().cursor() + cur.execute(f"INSERT INTO obsdemo (timestamp, randomint) VALUES ({time.time()}, {random.randrange(0, 1000)})") + cur.close() + + return "Successfully called Google.com" + +if __name__ == "__main__": + app.run(host="0.0.0.0") diff --git a/demo.py b/demo.py deleted file mode 100755 index 18822b0..0000000 --- a/demo.py +++ /dev/null @@ -1,122 +0,0 @@ -#/usr/bin/env python -""" Observability Demo App """ -import os -import random -import time -from flask import Flask, abort, session -from prometheus_flask_exporter import PrometheusMetrics -# Add imports for OTel components into the application -from opentelemetry import trace -from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter -from opentelemetry.sdk.trace import TracerProvider -from opentelemetry.sdk.trace.export import BatchSpanProcessor -from opentelemetry import propagate -from opentelemetry.sdk.extension.aws.trace.propagation.aws_xray_format import AwsXRayFormat -from opentelemetry.instrumentation.botocore import BotocoreInstrumentor -from opentelemetry.instrumentation.flask import FlaskInstrumentor -# Import the AWS X-Ray for OTel Python IDs Generator into the application. -from opentelemetry.sdk.extension.aws.trace import AwsXRayIdGenerator -##### SET XRAY FORMAT #### -from opentelemetry.sdk.extension.aws.trace.propagation.aws_xray_format import ( - TRACE_ID_DELIMITER, - TRACE_ID_FIRST_PART_LENGTH, - TRACE_ID_VERSION, -) -# Sends generated traces in the OTLP format to an ADOT Collector running on port 55678 -otlp_endpoint = os.getenv("OBSDEMO_OTLP_ENDPOINT") or "localhost:4317" -otlp_exporter = OTLPSpanExporter(endpoint=otlp_endpoint, insecure=True) -# Processes traces in batches as opposed to immediately one after the other -span_processor = BatchSpanProcessor(otlp_exporter) -# Configures the Global Tracer Provider -trace.set_tracer_provider( - TracerProvider( - active_span_processor=span_processor, - id_generator=AwsXRayIdGenerator() - ) - ) - -propagate.set_global_textmap(AwsXRayFormat()) - -# Initialize instumentor for Botocore -BotocoreInstrumentor().instrument() - - - -DIMENSION_API_NAME = "apiName" -DIMENSION_STATUS_CODE = "statusCode" -REQUEST_START_TIME = "requestStartTime" - - -def convert_otel_trace_id_to_xray(otel_trace_id_decimal): - """ Convert the Open Telemetry Trace ID to an XRay Trace ID """ - otel_trace_id_hex = f"{otel_trace_id_decimal}" - x_ray_trace_id = TRACE_ID_DELIMITER.join( - [ - TRACE_ID_VERSION, - otel_trace_id_hex[:TRACE_ID_FIRST_PART_LENGTH], - otel_trace_id_hex[TRACE_ID_FIRST_PART_LENGTH:], - ] - ) - return f'{{"traceId": "{x_ray_trace_id}"}}' - -app = Flask(__name__) -app.secret_key=os.getenv("OBSDEMO_APP_SECRET") - - -@app.before_request -def before_request_func(): - """ Set the session start time before each request """ - session[REQUEST_START_TIME] = int(time.time() * 1_000) - -@app.after_request -def after_request_func(response): - """ After each request, return the response - - TODO: Implement this function so that it calls something else with the same TraceID - - """ - # if request.path == "/outgoing-http-call": - # apiBytesSentCounter.add( - # response.calculate_content_length() + mimicPayloadSize(), - # { - # DIMENSION_API_NAME: request.path, - # DIMENSION_STATUS_CODE: response.status_code, - # }, - # ) - - # apiLatencyRecorder.record( - # int(time.time() * 1_000) - session[REQUEST_START_TIME], - # { - # DIMENSION_API_NAME: request.path, - # DIMENSION_STATUS_CODE: response.status_code, - # }, - # ) - - return response - -metrics = PrometheusMetrics(app) -# Initialize instumentor for Flask web framework -FlaskInstrumentor().instrument_app(app) - -# static information as metric -metrics.info('app_info', 'Application info', version='1.0.3') - -tracer = trace.get_tracer(__name__) - -@app.route('/') -def main(): - """ The primary route of the application for the index page """ - with tracer.start_as_current_span("index_request"): - chosen_return_code = random.randrange(0,4) - return_codes = [404, 401, 500, 502, 301] - if random.randrange(0, 100) > 95: - abort(return_codes[chosen_return_code]) - else: - return app.make_response( - convert_otel_trace_id_to_xray( - trace.get_current_span().get_span_context().trace_id - ) - ) - -if __name__ == "__main__": - app.run(host="0.0.0.0") diff --git a/derivedFields.png b/derivedFields.png new file mode 100644 index 0000000..c810f0e Binary files /dev/null and b/derivedFields.png differ diff --git a/docker-compose-aws.yml b/docker-compose-aws.yml new file mode 100644 index 0000000..93af55f --- /dev/null +++ b/docker-compose-aws.yml @@ -0,0 +1,97 @@ +version: '3.7' + +volumes: + prometheus_data: {} + # grafana_data: {} + +networks: + front-tier: + back-tier: + +services: + + obsdemo: + image: ghcr.io/contino/observability-demo-app:pr-12 + ports: + - 5000:5000 + links: + - prometheus:prometheus + depends_on: + - prometheus + networks: + - back-tier + restart: always + + prometheus: + image: prom/prometheus:v2.26.0 + volumes: + - ./prometheus/:/etc/prometheus/ + - prometheus_data:/prometheus + command: + - '--config.file=/etc/prometheus/prometheus.yml' + - '--storage.tsdb.path=/prometheus' + - '--web.console.libraries=/usr/share/prometheus/console_libraries' + - '--web.console.templates=/usr/share/prometheus/consoles' + ports: + - 9090:9090 + links: + - cadvisor:cadvisor + depends_on: + - cadvisor + networks: + - back-tier + restart: always + + otelexporter: + image: public.ecr.aws/aws-observability/aws-otel-collector:latest + volumes: + - ./otel/:/etc/otel/ + command: + - '--config=/etc/otel/otel-local-config.yaml' + ports: + - 4317:4317 + - 55680:55680 + - 55681:55681 + - 8888:8888 + links: + - prometheus:prometheus + depends_on: + - prometheus + networks: + - back-tier + restart: always + + node-exporter: + image: prom/node-exporter + volumes: + - /proc:/host/proc:ro + - /sys:/host/sys:ro + - /:/rootfs:ro + command: + - '--path.procfs=/host/proc' + - '--path.sysfs=/host/sys' + - --collector.filesystem.ignored-mount-points + - "^/(sys|proc|dev|host|etc|rootfs/var/lib/docker/containers|rootfs/var/lib/docker/overlay2|rootfs/run/docker/netns|rootfs/var/lib/docker/aufs)($$|/)" + ports: + - 9100:9100 + networks: + - back-tier + restart: always + deploy: + mode: global + + cadvisor: + image: gcr.io/cadvisor/cadvisor + volumes: + - /:/rootfs:ro + - /var/run:/var/run:rw + - /sys:/sys:ro + - /var/lib/docker/:/var/lib/docker:ro + ports: + - 8080:8080 + networks: + - back-tier + restart: always + deploy: + mode: global + diff --git a/docker.env b/docker.env new file mode 100644 index 0000000..b254a04 --- /dev/null +++ b/docker.env @@ -0,0 +1,2 @@ +OBSDEMO_APP_SECRET="My Secret String" +OBSDEMO_OTLP_ENDPOINT=https://otlphttp/v1/traces diff --git a/otel/otel-local-config.yml b/otel/otel-local-config.yml new file mode 100644 index 0000000..44a2742 --- /dev/null +++ b/otel/otel-local-config.yml @@ -0,0 +1,35 @@ +extensions: + health_check: + pprof: + endpoint: 0.0.0.0:1777 + +receivers: + otlp: + protocols: + http: + endpoint: 0.0.0.0:55681 + +processors: + batch: + +exporters: + logging: + loglevel: debug + awsxray: + region: 'eu-west-2' + awsemf: + region: 'eu-west-2' + +service: + pipelines: + traces: + receivers: [otlp] + exporters: [awsxray] + metrics: + receivers: [otlp] + exporters: [awsemf] + + extensions: [pprof] + telemetry: + logs: + level: debug diff --git a/prometheus/prometheus.yml b/prometheus/prometheus.yml new file mode 100644 index 0000000..2e0a15f --- /dev/null +++ b/prometheus/prometheus.yml @@ -0,0 +1,68 @@ +global: + scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. + evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. + # scrape_timeout is set to the global default (10s). + +# Alertmanager configuration +alerting: + alertmanagers: + - static_configs: + - targets: + # - alertmanager:9093 + +remote_write: + - url: 'MY AWS PROMETHEUS REMOTE WRITE ENDPOINT' + sigv4: + region: 'MY REGION' + queue_config: + max_samples_per_send: 1000 + max_shards: 200 + capacity: 2500 + +# A scrape configuration containing exactly one endpoint to scrape: +# Here it's Prometheus itself. +scrape_configs: + # The job name is added as a label `job=` to any timeseries scraped from this config. + + - job_name: 'prometheus' + + # Override the global default and scrape targets from this job every 5 seconds. + scrape_interval: 5s + + static_configs: + - targets: ['localhost:9090'] + + + - job_name: 'cadvisor' + + # Override the global default and scrape targets from this job every 5 seconds. + scrape_interval: 5s + + dns_sd_configs: + - names: + - 'tasks.cadvisor' + type: 'A' + port: 8080 + + - job_name: 'obsdemo' + + # Override the global default and scrape targets from this job every 5 seconds. + scrape_interval: 5s + + dns_sd_configs: + - names: + - 'tasks.obsdemo' + type: 'A' + port: 5000 + + - job_name: 'node-exporter' + + # Override the global default and scrape targets from this job every 5 seconds. + scrape_interval: 5s + + dns_sd_configs: + - names: + - 'tasks.node-exporter' + type: 'A' + port: 9100 + diff --git a/requirements.txt b/requirements.txt index dd442b4..beab51e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,46 +1,56 @@ -asgiref==3.4.1 +astroid==2.9.0 backoff==1.10.0 -boto3==1.18.9 -botocore==1.21.9 -certifi==2021.5.30 -charset-normalizer==2.0.3 -click==8.0.1 -Deprecated==1.2.12 -Flask==2.0.1 +certifi==2021.10.8 +charset-normalizer==2.0.9 +click==8.0.3 +Deprecated==1.2.13 +Flask==2.0.2 googleapis-common-protos==1.53.0 -grpcio==1.39.0 -idna==3.2 +greenlet==1.1.2 +grpcio==1.42.0 +idna==3.3 +isort==5.10.1 itsdangerous==2.0.1 -Jinja2==3.0.1 -jmespath==0.10.0 +Jinja2==3.0.3 +lazy-object-proxy==1.6.0 MarkupSafe==2.0.1 -opentelemetry-api==1.4.1 -opentelemetry-distro==0.23b2 -opentelemetry-exporter-otlp==1.4.1 -opentelemetry-exporter-otlp-proto-grpc==1.4.1 -opentelemetry-instrumentation==0.23b2 -opentelemetry-instrumentation-botocore==0.23b2 -opentelemetry-instrumentation-dbapi==0.23b2 -opentelemetry-instrumentation-flask==0.23b2 -opentelemetry-instrumentation-grpc==0.23b2 -opentelemetry-instrumentation-logging==0.23b2 -opentelemetry-instrumentation-requests==0.23b2 -opentelemetry-instrumentation-sqlite3==0.23b2 -opentelemetry-instrumentation-urllib==0.23b2 -opentelemetry-instrumentation-wsgi==0.23b2 -opentelemetry-proto==1.4.1 -opentelemetry-sdk==1.4.1 -opentelemetry-sdk-extension-aws==0.23b2 -opentelemetry-semantic-conventions==0.23b2 -opentelemetry-util-http==0.23b2 -prometheus-client==0.11.0 -prometheus-flask-exporter==0.18.2 -protobuf==3.17.3 -python-dateutil==2.8.2 +mccabe==0.6.1 +opentelemetry-api==1.7.1 +opentelemetry-exporter-otlp==1.7.1 +opentelemetry-exporter-otlp-proto-grpc==1.7.1 +opentelemetry-exporter-otlp-proto-http==1.7.1 +opentelemetry-instrumentation==0.26b1 +opentelemetry-instrumentation-aws-lambda==0.26b1 +opentelemetry-instrumentation-dbapi==0.26b1 +opentelemetry-instrumentation-flask==0.26b1 +opentelemetry-instrumentation-grpc==0.26b1 +opentelemetry-instrumentation-jinja2==0.26b1 +opentelemetry-instrumentation-logging==0.26b1 +opentelemetry-instrumentation-requests==0.26b1 +opentelemetry-instrumentation-sqlite3==0.26b1 +opentelemetry-instrumentation-urllib==0.26b1 +opentelemetry-instrumentation-urllib3==0.26b1 +opentelemetry-instrumentation-wsgi==0.26b1 +opentelemetry-propagator-aws-xray==1.0.1 +opentelemetry-proto==1.7.1 +opentelemetry-sdk==1.7.1 +opentelemetry-semantic-conventions==0.26b1 +opentelemetry-util-http==0.26b1 +packaging==21.3 +platformdirs==2.4.0 +prometheus-client==0.12.0 +prometheus-flask-exporter==0.18.6 +protobuf==3.19.1 +pylint==2.12.2 +pyparsing==3.0.6 +python-json-logger==2.0.2 +python-logging-loki==0.3.1 requests==2.26.0 -s3transfer==0.5.0 +rfc3339==6.2 six==1.16.0 -urllib3==1.26.6 -Werkzeug==2.0.1 -wrapt==1.12.1 - +SQLAlchemy==1.4.27 +toml==0.10.2 +typing-extensions==4.0.1 +urllib3==1.26.7 +Werkzeug==2.0.2 +wrapt==1.13.3