Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .idea/.gitignore

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 6 additions & 0 deletions .idea/inspectionProfiles/profiles_settings.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 4 additions & 0 deletions .idea/misc.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 8 additions & 0 deletions .idea/modules.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 6 additions & 0 deletions .idea/vcs.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 8 additions & 0 deletions .idea/xmljson.iml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

12 changes: 12 additions & 0 deletions API_DZ.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
from itertools import groupby
from urllib.request import urlopen
from json import loads



url = 'https://ru.wikipedia.org/w/api.php?action=query&format=json&prop=revisions&rvlimit=500&titles=%D0%93%D1%80%D0%B0%D0%B4%D1%81%D0%BA%D0%B8%D0%B9,_%D0%90%D0%BB%D0%B5%D0%BA%D1%81%D0%B0%D0%BD%D0%B4%D1%80_%D0%91%D0%BE%D1%80%D0%B8%D1%81%D0%BE%D0%B2%D0%B8%D1%87'
data = loads(urlopen(url).read().decode('utf8'))
statistics = groupby([i['timestamp'][:10] for i in data['query']['pages']['183903']['revisions']])
[print(stat, len(list(el))) for stat, el in statistics]


7 changes: 7 additions & 0 deletions Report.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
## Александр Градский

![img.png](img.png)

## Жан-Поль Бельмондо

![img_1.png](img_1.png)
20 changes: 20 additions & 0 deletions correlation_DZ.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
from json import loads
from urllib.request import urlopen
from itertools import groupby


def date_of_death(gr):
date, m = '', 0
for d, e in gr:
c = len(list(e))
if c > m:
date, m = d, c
return date


url = 'https://ru.wikipedia.org/w/api.php?action=query&format=json&prop=revisions&rvlimit=500&titles=%D0%91%D0%B5%D0' \
'%BB%D1%8C%D0%BC%D0%BE%D0%BD%D0%B4%D0%BE,_%D0%96%D0%B0%D0%BD-%D0%9F%D0%BE%D0%BB%D1%8C '
data = loads(urlopen(url).read().decode('utf8'))

statistics = groupby([el['timestamp'][:10] for el in data['query']['pages']['192203']['revisions']])
print(date_of_death(statistics))
Binary file added img.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added img_1.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
15 changes: 15 additions & 0 deletions making_json.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
import json
import xml.etree.ElementTree as ET
from urllib.request import urlopen


def save_json(path, attributes):
with open(path, 'w', encoding='utf-8') as file:
json.dump(attributes, file, ensure_ascii=False, indent=3)


if __name__ == "__main__":
data = urlopen('https://lenta.ru/rss').read().decode('utf8')
root = ET.fromstring(data)[0].findall('item')
save_json('news.json', [{'pubDate': elements.find('pubDate').text, 'title': elements.find('title').text} for
elements in root])
14 changes: 14 additions & 0 deletions making_json2.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
import json
import xml.etree.ElementTree as ET
from urllib.request import urlopen


def save_json(path, attributes):
with open(path, 'w', encoding='utf-8') as file:
json.dump(attributes, file, ensure_ascii=False, indent=3)


if __name__ == "__main__":
data = urlopen('https://lenta.ru/rss').read().decode('utf8')
root = ET.fromstring(data)[0].findall('item')
save_json('news2.json', [{element.tag: element.text for element in elements} for elements in root])
Loading