max-conn specifies the maximum connection number for the connection pool.max-conns specifies the maximum connection number for the connection pool.test will replace an actual DB connection being created via the connection string,
with a mock DB for unit testing.
*/
diff --git a/conduit/plugins/exporters/postgresql/postgresql_exporter_test.go b/conduit/plugins/exporters/postgresql/postgresql_exporter_test.go
index c1bdd801..11c1b884 100644
--- a/conduit/plugins/exporters/postgresql/postgresql_exporter_test.go
+++ b/conduit/plugins/exporters/postgresql/postgresql_exporter_test.go
@@ -111,7 +111,7 @@ func TestUnmarshalConfigsContainingDeleteTask(t *testing.T) {
pgsqlExp := postgresqlExporter{}
ecfg := ExporterConfig{
ConnectionString: "",
- MaxConn: 0,
+ MaxConns: 0,
Test: true,
Delete: util.PruneConfigurations{
Rounds: 3000,
@@ -133,7 +133,7 @@ func TestUnmarshalConfigsContainingDeleteTask(t *testing.T) {
pgsqlExp := postgresqlExporter{}
cfg := ExporterConfig{
ConnectionString: "",
- MaxConn: 0,
+ MaxConns: 0,
Test: true,
Delete: util.PruneConfigurations{},
}
@@ -152,7 +152,7 @@ func TestUnmarshalConfigsContainingDeleteTask(t *testing.T) {
pgsqlExp := postgresqlExporter{}
cfg := ExporterConfig{
ConnectionString: "",
- MaxConn: 0,
+ MaxConns: 0,
Test: true,
Delete: util.PruneConfigurations{
Rounds: 1,
diff --git a/conduit/plugins/importers/all/all.go b/conduit/plugins/importers/all/all.go
index 7222c549..a6450a47 100644
--- a/conduit/plugins/importers/all/all.go
+++ b/conduit/plugins/importers/all/all.go
@@ -4,4 +4,5 @@ import (
// Call package wide init function
_ "github.com/algorand/conduit/conduit/plugins/importers/algod"
_ "github.com/algorand/conduit/conduit/plugins/importers/filereader"
+ _ "github.com/algorand/conduit/conduit/plugins/importers/noop"
)
diff --git a/conduit/plugins/importers/filereader/filereader_test.go b/conduit/plugins/importers/filereader/filereader_test.go
index b7e7e1dd..09a1ff57 100644
--- a/conduit/plugins/importers/filereader/filereader_test.go
+++ b/conduit/plugins/importers/filereader/filereader_test.go
@@ -40,8 +40,10 @@ func init() {
}
func TestDefaults(t *testing.T) {
- require.Equal(t, defaultEncodingFormat, filewriter.MessagepackFormat)
- require.Equal(t, defaultIsGzip, true)
+ format, gzip, err := filewriter.ParseFilenamePattern(filewriter.FilePattern)
+ require.NoError(t, err)
+ require.Equal(t, format, defaultEncodingFormat)
+ require.Equal(t, gzip, defaultIsGzip)
}
func TestImporterorterMetadata(t *testing.T) {
@@ -64,9 +66,7 @@ func initializeTestData(t *testing.T, dir string, numRounds int) sdk.Genesis {
Timestamp: 1234,
}
- genesisFilename := filewriter.GenesisFilename
-
- err := filewriter.EncodeToFile(path.Join(dir, genesisFilename), genesisA, filewriter.JSONFormat, false)
+ err := filewriter.EncodeToFile(path.Join(dir, filewriter.GenesisFilename), genesisA, filewriter.JSONFormat, false)
require.NoError(t, err)
for i := 0; i < numRounds; i++ {
diff --git a/conduit/plugins/importers/noop/noop_importer.go b/conduit/plugins/importers/noop/noop_importer.go
new file mode 100644
index 00000000..8fb429e3
--- /dev/null
+++ b/conduit/plugins/importers/noop/noop_importer.go
@@ -0,0 +1,81 @@
+package noop
+
+import (
+ "context"
+ _ "embed" // used to embed config
+ "fmt"
+ "time"
+
+ "github.com/sirupsen/logrus"
+
+ sdk "github.com/algorand/go-algorand-sdk/v2/types"
+
+ "github.com/algorand/conduit/conduit/data"
+ "github.com/algorand/conduit/conduit/plugins"
+ "github.com/algorand/conduit/conduit/plugins/importers"
+)
+
+// PluginName to use when configuring.
+var PluginName = "noop"
+
+const sleepForGetBlock = 100 * time.Millisecond
+
+// `noopImporter`s will function without ever erroring. This means they will also process out of order blocks
+// which may or may not be desirable for different use cases--it can hide errors in actual importers expecting in order
+// block processing.
+// The `noopImporter` will maintain `Round` state according to the round of the last block it processed.
+// It also sleeps 100 milliseconds between blocks to slow down the pipeline.
+type noopImporter struct {
+ round uint64
+ cfg ImporterConfig
+}
+
+//go:embed sample.yaml
+var sampleConfig string
+
+var metadata = plugins.Metadata{
+ Name: PluginName,
+ Description: "noop importer",
+ Deprecated: false,
+ SampleConfig: sampleConfig,
+}
+
+func (imp *noopImporter) Metadata() plugins.Metadata {
+ return metadata
+}
+
+func (imp *noopImporter) Init(_ context.Context, _ data.InitProvider, cfg plugins.PluginConfig, _ *logrus.Logger) error {
+ if err := cfg.UnmarshalConfig(&imp.cfg); err != nil {
+ return fmt.Errorf("init failure in unmarshalConfig: %v", err)
+ }
+ imp.round = imp.cfg.Round
+ return nil
+}
+
+func (imp *noopImporter) Close() error {
+ return nil
+}
+
+func (imp *noopImporter) GetGenesis() (*sdk.Genesis, error) {
+ return &sdk.Genesis{}, nil
+}
+
+func (imp *noopImporter) GetBlock(rnd uint64) (data.BlockData, error) {
+ time.Sleep(sleepForGetBlock)
+ imp.round = rnd
+ return data.BlockData{
+ BlockHeader: sdk.BlockHeader{
+ Round: sdk.Round(rnd),
+ },
+ }, nil
+}
+
+func (imp *noopImporter) Round() uint64 {
+ return imp.round
+}
+
+func init() {
+ importers.Register(PluginName, importers.ImporterConstructorFunc(func() importers.Importer {
+ return &noopImporter{}
+ }))
+}
diff --git a/conduit/plugins/importers/noop/noop_importer_config.go b/conduit/plugins/importers/noop/noop_importer_config.go
new file mode 100644
index 00000000..f49964e5
--- /dev/null
+++ b/conduit/plugins/importers/noop/noop_importer_config.go
@@ -0,0 +1,7 @@
+package noop
+
+// ImporterConfig specific to the noop importer
+type ImporterConfig struct {
+ // Optionally specify the round to start on
+ Round uint64 `yaml:"round"`
+}
diff --git a/conduit/plugins/importers/noop/sample.yaml b/conduit/plugins/importers/noop/sample.yaml
new file mode 100644
index 00000000..a4e99563
--- /dev/null
+++ b/conduit/plugins/importers/noop/sample.yaml
@@ -0,0 +1,3 @@
+name: noop
+# noop has no config
+config:
diff --git a/e2e_tests/src/e2e_conduit/subslurp.py b/e2e_tests/src/e2e_conduit/subslurp.py
index 796982ce..97598394 100644
--- a/e2e_tests/src/e2e_conduit/subslurp.py
+++ b/e2e_tests/src/e2e_conduit/subslurp.py
@@ -7,7 +7,7 @@
logger = logging.getLogger(__name__)
# Matches conduit log output:
-# "UPDATED Pipeline NextRound=1337. FINISHED Pipeline round r=42 (13 txn) exported in 12.3456s"
+# "FINISHED Pipeline round r=42 (13 txn) exported in 12.3456s"
FINISH_ROUND: re.Pattern = re.compile(b"FINISHED Pipeline round r=(\d+)")
diff --git a/examples/Makefile b/examples/Makefile
new file mode 100644
index 00000000..30ac0406
--- /dev/null
+++ b/examples/Makefile
@@ -0,0 +1,10 @@
+CDATA = pypolars_data
+
+conduit: clean build
+ ../cmd/conduit/conduit -d $(CDATA)
+
+build:
+ cd .. && make
+
+clean:
+ rm -f $(CDATA)/metadata.json
\ No newline at end of file
diff --git a/go.mod b/go.mod
index 92a65b89..88d87d4f 100644
--- a/go.mod
+++ b/go.mod
@@ -2,6 +2,8 @@ module github.com/algorand/conduit
go 1.20
+replace github.com/algorand/indexer/v3 => github.com/algorand/indexer/v3 v3.1.1-0.20230905234129-74a692078e66
+
require (
github.com/algorand/go-algorand-sdk/v2 v2.2.0
github.com/algorand/go-codec/codec v1.1.10
diff --git a/go.sum b/go.sum
index 14295a01..28ea684d 100644
--- a/go.sum
+++ b/go.sum
@@ -49,8 +49,8 @@ github.com/algorand/go-algorand-sdk/v2 v2.2.0 h1:zWwK+k/WArtZJUSkDXTDj4a0GUik2iO
github.com/algorand/go-algorand-sdk/v2 v2.2.0/go.mod h1:+3+4EZmMUcQk6bgmtC5Ic5kKZE/g6SmfiW098tYLkPE=
github.com/algorand/go-codec/codec v1.1.10 h1:zmWYU1cp64jQVTOG8Tw8wa+k0VfwgXIPbnDfiVa+5QA=
github.com/algorand/go-codec/codec v1.1.10/go.mod h1:YkEx5nmr/zuCeaDYOIhlDg92Lxju8tj2d2NrYqP7g7k=
-github.com/algorand/indexer/v3 v3.0.0 h1:FxQVt1KdwvJrKUAhJPeo+YAOygnJzgjKT8MUEawH+zc=
-github.com/algorand/indexer/v3 v3.0.0/go.mod h1:P+RpgLu0lR/6RT8ZwspLHBNKVeAMzwRfCSMVsfiwf40=
+github.com/algorand/indexer/v3 v3.1.1-0.20230905234129-74a692078e66 h1:Bl8WhnudNAoV+69+XWwadDRzPP+HY50qe2gbc9LRPAo=
+github.com/algorand/indexer/v3 v3.1.1-0.20230905234129-74a692078e66/go.mod h1:DZ4i4kpH8CJDlK2bqugSfC+FbVMdf81go7nDsqJpchI=
github.com/algorand/oapi-codegen v1.12.0-algorand.0 h1:W9PvED+wAJc+9EeXPONnA+0zE9UhynEqoDs4OgAxKhk=
github.com/algorand/oapi-codegen v1.12.0-algorand.0/go.mod h1:tIWJ9K/qrLDVDt5A1p82UmxZIEGxv2X+uoujdhEAL48=
github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ=
diff --git a/performance/.gitignore b/performance/.gitignore
new file mode 100644
index 00000000..becaa2ad
--- /dev/null
+++ b/performance/.gitignore
@@ -0,0 +1,3 @@
+conduit*.log
+pg.log*
+metadata.json
\ No newline at end of file
diff --git a/performance/Makefile b/performance/Makefile
new file mode 100644
index 00000000..2e42763b
--- /dev/null
+++ b/performance/Makefile
@@ -0,0 +1,121 @@
+random := $(shell echo $$RANDOM)
+PGCONT = performance_pg
+PGUSER = algorand
+PGDB = performance_db # postgres
+PGLOGS = pg.log
+CONDUIT = ../cmd/conduit/conduit
+PGCONN = postgresql://$(PGUSER):$(PGUSER)@localhost:65432/$(PGDB)
+
+echo:
+ @echo "random--->$(random)"
+ @echo "PGCONT--->$(PGCONT)"
+ @echo "PGUSER--->$(PGUSER)"
+ @echo "PGDB--->$(PGDB)"
+ @echo "PGLOGS--->$(PGLOGS)"
+ @echo "CONDUIT--->$(CONDUIT)"
+ @echo "PGCONN--->$(PGCONN)"
+
+nuke: pg-down clean
+ rm conduit_*.log || true
+ rm pg.log_* || true
+
+nuke-and-run: nuke perf-run
+
+nuke-and-debug: nuke pg-up
+ mv ../cmd/conduit/conduit.log "../cmd/conduit/conduit_$(random).log" || true
+
+perf-run: pg-up run-conduit
+
+clean: # pg-down should be called manually
+ rm conduit_data/metadata.json || true
+
+clean-go-cache:
+ cd .. && go clean -cache -testcache -modcache
+
+pg-up: save-logs
+ docker-compose up -d
+ sleep 5
+ make pg-logs-tail
+ docker exec -it $(PGCONT) psql -U algorand -d postgres -c "create database $(PGDB);"
+ make pg-query QUERY="-c \"CREATE EXTENSION pg_stat_statements;\""
+
+pg-down:
+ docker-compose down
+
+pg-logs-tail:
+ docker-compose logs -f > $(PGLOGS) &
+
+save-logs:
+ mv $(PGLOGS) "$(PGLOGS)_$(random)" || true
+ mv conduit.log "conduit_$(random).log" || true
+
+build:
+ cd .. && go mod tidy && make
+
+pg-enter:
+ docker exec -it $(PGCONT) psql -U algorand -d postgres
+
+run-conduit: build
+ $(CONDUIT) -d conduit_data
+
+# - for query hackery... prefer pg_stats.ipynb
+
+QUERY_COL = substring(trim(regexp_replace(regexp_replace(query, '--.*?$$', '', 'gn'), '\\s+', ' ', 'g')), 1, 100) AS query
+TOTAL_SECS_COL = round((total_exec_time/1000)::numeric, 3) AS tot_s
+MEAN_SECS_COL = round((mean_exec_time/1000)::numeric, 3) AS mean_s
+MIN_SECS_COL = round((min_exec_time/1000)::numeric, 3) AS min_s
+MAX_SECS_COL = round((max_exec_time/1000)::numeric, 3) AS max_s
+CPU_COL = round((100 * total_exec_time / sum(total_exec_time::numeric) OVER ())::numeric, 2) AS \"cpu%\"
+LIMIT = 15
+
+define QUERY_TOTAL_TIME
+-c "SELECT dbid, $(QUERY_COL), $(TOTAL_SECS_COL), calls, $(MEAN_SECS_COL), $(CPU_COL) \
+FROM pg_stat_statements \
+ORDER BY total_exec_time DESC \
+LIMIT $(LIMIT);"
+endef
+
+define QUERY_SLOWEST
+-c "SELECT dbid, $(QUERY_COL), calls, $(TOTAL_SECS_COL), $(MIN_SECS_COL), $(MAX_SECS_COL), $(MEAN_SECS_COL) \
+FROM pg_stat_statements \
+ORDER BY mean_exec_time DESC \
+LIMIT $(LIMIT);"
+endef
+
+define QUERY_MEMHOG
+-c "SELECT dbid, $(QUERY_COL), (shared_blks_hit+shared_blks_dirtied) as mem \
+FROM pg_stat_statements \
+ORDER BY (shared_blks_hit+shared_blks_dirtied) DESC \
+LIMIT $(LIMIT);"
+endef
+
+QUERY := -c "SELECT * FROM pg_stat_statements LIMIT 0;"
+pg-query:
+ psql $(PGCONN) $(QUERY)
+
+
+pg-txn-stats: QUERY=-c "SELECT max(round) AS max_round, count(*) AS txn_count from txn;"
+pg-txn-stats: pg-query
+
+pg-conn: QUERY=
+pg-conn: pg-query
+
+pg-stats:
+ make pg-stats-1
+ make pg-stats-2
+ make pg-stats-3
+
+pg-stats-1: QUERY=$(QUERY_TOTAL_TIME)
+pg-stats-1: pg-query
+
+pg-stats-2: QUERY=$(QUERY_SLOWEST)
+pg-stats-2: pg-query
+
+pg-stats-3: QUERY=$(QUERY_MEMHOG)
+pg-stats-3: pg-query
+
+pg-blocking-vac: QUERY=-c "VACUUM FULL ANALYZE;"
+pg-blocking-vac: pg-query
+
+pg-vac: QUERY=-c "VACUUM;"
+pg-vac: pg-query
\ No newline at end of file
diff --git a/performance/conduit_data/conduit.yml b/performance/conduit_data/conduit.yml
new file mode 100644
index 00000000..9fdf4ab8
--- /dev/null
+++ b/performance/conduit_data/conduit.yml
@@ -0,0 +1,28 @@
+log-level: debug
+log-file: conduit.log
+retry-count: 1
+retry-delay: "1s"
+#pid-filepath: /path/to/pidfile
+hide-banner: true
+metrics:
+ mode: ON
+ addr: ":9999"
+ prefix: "conduit"
+importer:
+ name: file_reader
+ config:
+ # assumes we run conduit inside of the `performance/` directory:
+ block-dir: "filereader_blocks"
+ filename-pattern: "%[1]d_block.msgp.gz"
+processors:
+exporter:
+ name: postgresql
+ config:
+ connection-string: "host=localhost user=algorand password=algorand dbname=performance_db port=65432 sslmode=disable"
+ max-conn: 20
+telemetry:
+ enabled: false
+ # uri: ""
+ # index: ""
+ # username: ""
+ # password: ""
diff --git a/performance/docker-compose.yml b/performance/docker-compose.yml
new file mode 100644
index 00000000..620c852a
--- /dev/null
+++ b/performance/docker-compose.yml
@@ -0,0 +1,20 @@
+version: '3'
+
+services:
+ postgres:
+ image: postgres
+ container_name: performance_pg
+ ports:
+ - "65432:5432"
+ environment:
+ POSTGRES_PASSWORD: algorand
+ POSTGRES_USER: algorand
+ POSTGRES_DB: postgres
+ POSTGRES_HOST_AUTH_METHOD: trust
+ command:
+ -c shared_preload_libraries='pg_stat_statements'
+ # pg logs for the stress test with 10 blocks was 3 GB which
+ # interfered with performance measurements
+ # -c log_statement='all'
+ # -c log_duration=on
+ # -c log_min_duration_statement=0
diff --git a/performance/filereader_blocks/.gitignore b/performance/filereader_blocks/.gitignore
new file mode 100644
index 00000000..694556cb
--- /dev/null
+++ b/performance/filereader_blocks/.gitignore
@@ -0,0 +1 @@
+*.msgp.gz
diff --git a/performance/filereader_blocks/genesis.json b/performance/filereader_blocks/genesis.json
new file mode 100644
index 00000000..4d171267
--- /dev/null
+++ b/performance/filereader_blocks/genesis.json
@@ -0,0 +1,30 @@
+{
+ "alloc": [
+ {
+ "addr": "AEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKE3PRHE",
+ "comment": "",
+ "state": {
+ "algo": 1000000000000
+ }
+ },
+ {
+ "addr": "AIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGFFWAF4",
+ "comment": "",
+ "state": {
+ "algo": 1000000000000
+ }
+ },
+ {
+ "addr": "AMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAANVWEXNA",
+ "comment": "",
+ "state": {
+ "algo": 1000000000000
+ }
+ }
+ ],
+ "fees": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAVIOOBQA",
+ "id": "v1",
+ "network": "generated-network",
+ "proto": "future",
+ "rwd": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABFFF5B2Y"
+}
\ No newline at end of file
diff --git a/performance/pg_stats.ipynb b/performance/pg_stats.ipynb
new file mode 100644
index 00000000..24c78da6
--- /dev/null
+++ b/performance/pg_stats.ipynb
@@ -0,0 +1,3317 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Imports"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from datetime import datetime\n",
+ "import json\n",
+ "from pathlib import Path\n",
+ "import re\n",
+ "\n",
+ "import pandas as pd\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Constants"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "CONDUIT_LOG=PosixPath('/Users/zeph/github/algorand/conduit/performance/conduit.log')\n"
+ ]
+ }
+ ],
+ "source": [
+ "CONDUIT_LOG = Path.cwd() / \"conduit.log\"\n",
+ "\n",
+ "PGCONN = \"postgresql://algorand:algorand@localhost:65432/performance_db\"\n",
+ "QUERY_CHARS = 1000\n",
+ "LIMIT = 15\n",
+ "\n",
+ "# Query columns\n",
+ "QUERY_COL = f\"substring(trim(regexp_replace(regexp_replace(query, '--.*?$', '', 'gn'), '\\\\s+', ' ', 'g')), 1, {QUERY_CHARS}) AS query\"\n",
+ "TOTAL_SECS_COL = \"round((total_exec_time/1000)::numeric, 3) AS tot_s\"\n",
+ "MEAN_SECS_COL = \"round((mean_exec_time/1000)::numeric, 3) AS mean_s\"\n",
+ "MIN_SECS_COL = \"round((min_exec_time/1000)::numeric, 3) AS min_s\"\n",
+ "MAX_SECS_COL = \"round((max_exec_time/1000)::numeric, 3) AS max_s\"\n",
+ "CPU_COL = \"round((100 * total_exec_time / sum(total_exec_time::numeric) OVER ())::numeric, 2) AS cpu_pct\"\n",
+ "\n",
+ "# Queries\n",
+ "QUERY_TOTAL_TIME = f\"\"\"SELECT dbid, {QUERY_COL}, {TOTAL_SECS_COL}, calls, {MEAN_SECS_COL}, {CPU_COL}\n",
+ "FROM pg_stat_statements\n",
+ "ORDER BY total_exec_time DESC\n",
+ "LIMIT {LIMIT}\"\"\"\n",
+ "\n",
+ "QUERY_SLOWEST = f\"\"\"SELECT dbid, {QUERY_COL}, calls, {TOTAL_SECS_COL}, {MIN_SECS_COL}, {MAX_SECS_COL}, {MEAN_SECS_COL}\n",
+ "FROM pg_stat_statements\n",
+ "ORDER BY mean_exec_time DESC\n",
+ "LIMIT {LIMIT}\"\"\"\n",
+ "\n",
+ "QUERY_MEMHOG = f\"\"\"SELECT dbid, {QUERY_COL}, (shared_blks_hit+shared_blks_dirtied) as mem\n",
+ "FROM pg_stat_statements\n",
+ "ORDER BY (shared_blks_hit+shared_blks_dirtied) DESC\n",
+ "LIMIT {LIMIT}\"\"\"\n",
+ "\n",
+ "print(f\"{CONDUIT_LOG=}\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Parse the log\n",
+ "\n",
+ "## Overall"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Start Time: 2023-08-31 13:14:22.006342-05:00\n",
+ "Finish Time: 2023-08-31 13:15:29.770026-05:00\n",
+ "Log Rounds: 10\n",
+ "Total Export Time: 0:01:07.763684\n",
+ "Mean Export Time: 6.7763684 seconds\n"
+ ]
+ }
+ ],
+ "source": [
+ "with open(CONDUIT_LOG) as f:\n",
+ " log_content = f.read()\n",
+ "\n",
+ "lines = log_content.strip().split(\"\\n\")\n",
+ "\n",
+ "\n",
+ "# Regular expressions for extracting required data\n",
+ "start_time_pattern = re.compile(r'Block 1 read time')\n",
+ "finish_time_pattern = re.compile(r'round r=(\\d+) .* exported in')\n",
+ "time_pattern = re.compile(\n",
+ " r'(?P