Add prepare_dataset.py script for private dataset extraction
This commit is contained in:
parent
f8c61be8da
commit
7c99609b1c
254
.gitignore
vendored
Normal file
254
.gitignore
vendored
Normal file
@ -0,0 +1,254 @@
|
||||
|
||||
# Created by https://www.toptal.com/developers/gitignore/api/python,pycharm+all
|
||||
# Edit at https://www.toptal.com/developers/gitignore?templates=python,pycharm+all
|
||||
|
||||
### PyCharm+all ###
|
||||
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
|
||||
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
|
||||
|
||||
# User-specific stuff
|
||||
.idea/**/workspace.xml
|
||||
.idea/**/tasks.xml
|
||||
.idea/**/usage.statistics.xml
|
||||
.idea/**/dictionaries
|
||||
.idea/**/shelf
|
||||
|
||||
# AWS User-specific
|
||||
.idea/**/aws.xml
|
||||
|
||||
# Generated files
|
||||
.idea/**/contentModel.xml
|
||||
|
||||
# Sensitive or high-churn files
|
||||
.idea/**/dataSources/
|
||||
.idea/**/dataSources.ids
|
||||
.idea/**/dataSources.local.xml
|
||||
.idea/**/sqlDataSources.xml
|
||||
.idea/**/dynamic.xml
|
||||
.idea/**/uiDesigner.xml
|
||||
.idea/**/dbnavigator.xml
|
||||
|
||||
# Gradle
|
||||
.idea/**/gradle.xml
|
||||
.idea/**/libraries
|
||||
|
||||
# Gradle and Maven with auto-import
|
||||
# When using Gradle or Maven with auto-import, you should exclude module files,
|
||||
# since they will be recreated, and may cause churn. Uncomment if using
|
||||
# auto-import.
|
||||
# .idea/artifacts
|
||||
# .idea/compiler.xml
|
||||
# .idea/jarRepositories.xml
|
||||
# .idea/modules.xml
|
||||
# .idea/*.iml
|
||||
# .idea/modules
|
||||
# *.iml
|
||||
# *.ipr
|
||||
|
||||
# CMake
|
||||
cmake-build-*/
|
||||
|
||||
# Mongo Explorer plugin
|
||||
.idea/**/mongoSettings.xml
|
||||
|
||||
# File-based project format
|
||||
*.iws
|
||||
|
||||
# IntelliJ
|
||||
out/
|
||||
|
||||
# mpeltonen/sbt-idea plugin
|
||||
.idea_modules/
|
||||
|
||||
# JIRA plugin
|
||||
atlassian-ide-plugin.xml
|
||||
|
||||
# Cursive Clojure plugin
|
||||
.idea/replstate.xml
|
||||
|
||||
# SonarLint plugin
|
||||
.idea/sonarlint/
|
||||
|
||||
# Crashlytics plugin (for Android Studio and IntelliJ)
|
||||
com_crashlytics_export_strings.xml
|
||||
crashlytics.properties
|
||||
crashlytics-build.properties
|
||||
fabric.properties
|
||||
|
||||
# Editor-based Rest Client
|
||||
.idea/httpRequests
|
||||
|
||||
# Android studio 3.1+ serialized cache file
|
||||
.idea/caches/build_file_checksums.ser
|
||||
|
||||
### PyCharm+all Patch ###
|
||||
# Ignores the whole .idea folder and all .iml files
|
||||
# See https://github.com/joeblau/gitignore.io/issues/186 and https://github.com/joeblau/gitignore.io/issues/360
|
||||
|
||||
.idea/*
|
||||
|
||||
# Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-249601023
|
||||
|
||||
*.iml
|
||||
modules.xml
|
||||
.idea/misc.xml
|
||||
*.ipr
|
||||
|
||||
# Sonarlint plugin
|
||||
.idea/sonarlint
|
||||
|
||||
### Python ###
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
# For a library or package, you might want to ignore these files since the code is
|
||||
# intended to run in multiple environments; otherwise, check them in:
|
||||
# .python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# poetry
|
||||
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
||||
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||
# commonly ignored for libraries.
|
||||
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
||||
#poetry.lock
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
# PyCharm
|
||||
# JetBrains specific template is maintainted in a separate JetBrains.gitignore that can
|
||||
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
#.idea/
|
||||
|
||||
# End of https://www.toptal.com/developers/gitignore/api/python,pycharm+all
|
25
.run/prepare_dataset (ulpressa).run.xml
Normal file
25
.run/prepare_dataset (ulpressa).run.xml
Normal file
@ -0,0 +1,25 @@
|
||||
<component name="ProjectRunConfigurationManager">
|
||||
<configuration default="false" name="prepare_dataset (ulpressa)" type="PythonConfigurationType"
|
||||
factoryName="Python">
|
||||
<module name="social-clusters"/>
|
||||
<option name="INTERPRETER_OPTIONS" value=""/>
|
||||
<option name="PARENT_ENVS" value="true"/>
|
||||
<envs>
|
||||
<env name="PYTHONUNBUFFERED" value="1"/>
|
||||
</envs>
|
||||
<option name="SDK_HOME" value="C:\Users\user\Projects\python\social-clusters\venv\Scripts\python.exe"/>
|
||||
<option name="SDK_NAME" value="Python 3.10 (social-clusters)"/>
|
||||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$"/>
|
||||
<option name="IS_MODULE_SDK" value="false"/>
|
||||
<option name="ADD_CONTENT_ROOTS" value="true"/>
|
||||
<option name="ADD_SOURCE_ROOTS" value="true"/>
|
||||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/prepare_dataset.py"/>
|
||||
<option name="PARAMETERS" value="raw_dataset/ulpressa.json"/>
|
||||
<option name="SHOW_COMMAND_LINE" value="false"/>
|
||||
<option name="EMULATE_TERMINAL" value="false"/>
|
||||
<option name="MODULE_MODE" value="false"/>
|
||||
<option name="REDIRECT_INPUT" value="false"/>
|
||||
<option name="INPUT_FILE" value=""/>
|
||||
<method v="2"/>
|
||||
</configuration>
|
||||
</component>
|
24
.run/prepare_dataset (ultra).run.xml
Normal file
24
.run/prepare_dataset (ultra).run.xml
Normal file
@ -0,0 +1,24 @@
|
||||
<component name="ProjectRunConfigurationManager">
|
||||
<configuration default="false" name="prepare_dataset (ultra)" type="PythonConfigurationType" factoryName="Python">
|
||||
<module name="social-clusters"/>
|
||||
<option name="INTERPRETER_OPTIONS" value=""/>
|
||||
<option name="PARENT_ENVS" value="true"/>
|
||||
<envs>
|
||||
<env name="PYTHONUNBUFFERED" value="1"/>
|
||||
</envs>
|
||||
<option name="SDK_HOME" value="C:\Users\user\Projects\python\social-clusters\venv\Scripts\python.exe"/>
|
||||
<option name="SDK_NAME" value="Python 3.10 (social-clusters)"/>
|
||||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$"/>
|
||||
<option name="IS_MODULE_SDK" value="false"/>
|
||||
<option name="ADD_CONTENT_ROOTS" value="true"/>
|
||||
<option name="ADD_SOURCE_ROOTS" value="true"/>
|
||||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/prepare_dataset.py"/>
|
||||
<option name="PARAMETERS" value="raw_dataset/ultra.json"/>
|
||||
<option name="SHOW_COMMAND_LINE" value="false"/>
|
||||
<option name="EMULATE_TERMINAL" value="false"/>
|
||||
<option name="MODULE_MODE" value="false"/>
|
||||
<option name="REDIRECT_INPUT" value="false"/>
|
||||
<option name="INPUT_FILE" value=""/>
|
||||
<method v="2"/>
|
||||
</configuration>
|
||||
</component>
|
Binary file not shown.
47
prepare_dataset.py
Normal file
47
prepare_dataset.py
Normal file
@ -0,0 +1,47 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from src.person import Person
|
||||
|
||||
|
||||
def __main(json_file_name):
|
||||
json_file = open(json_file_name, encoding='utf-8')
|
||||
data = json.load(json_file)
|
||||
|
||||
persons = []
|
||||
for item in data:
|
||||
person = Person(item)
|
||||
if person.is_closed:
|
||||
continue
|
||||
if person.deactivated:
|
||||
continue
|
||||
persons.append(person.__dict__)
|
||||
|
||||
if len(persons) == 0:
|
||||
raise Exception("No data")
|
||||
|
||||
df = pd.DataFrame()
|
||||
for key in persons[0].keys():
|
||||
current_col = []
|
||||
for person in persons:
|
||||
current_col.append(person[key])
|
||||
df[key] = pd.Series(current_col)
|
||||
|
||||
pathname, extension = os.path.splitext(json_file_name)
|
||||
filename = pathname.split('/')[-1]
|
||||
|
||||
df.to_json(f'{filename}.private.json')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if len(sys.argv) != 2:
|
||||
print('You must specify the VK data in json')
|
||||
exit(1)
|
||||
if not os.path.isfile(sys.argv[1]):
|
||||
print(f'File {sys.argv[1]} is not exists')
|
||||
__main(sys.argv[1])
|
7
readme.md
Normal file
7
readme.md
Normal file
@ -0,0 +1,7 @@
|
||||
Скрипт prepare_dataset используется для получения обезличенного датасета из данных, которые были загружены из ВК.
|
||||
|
||||
"Сырые" датасеты находятся в каталоге raw_dataset.
|
||||
|
||||
Была загружена информамция из пабликов "Улпресса" и "Ultra".
|
||||
|
||||
Обезличенные датасеты находятся в корневом каталоге проекта.
|
1
requirements.txt
Normal file
1
requirements.txt
Normal file
@ -0,0 +1 @@
|
||||
pandas==2.0.1
|
14
src/career.py
Normal file
14
src/career.py
Normal file
@ -0,0 +1,14 @@
|
||||
from src.raw_data import RawData
|
||||
|
||||
|
||||
class Career:
|
||||
def __init__(self, raw_career):
|
||||
data = RawData(raw_career)
|
||||
self.id = data.id
|
||||
self.group_id = data.group_id
|
||||
self.company = data.company
|
||||
self.country_id = data.country_id
|
||||
self.city_id = data.city_id
|
||||
self.year_from = raw_career['from']
|
||||
self.until = data.until
|
||||
self.position = data.position
|
12
src/military.py
Normal file
12
src/military.py
Normal file
@ -0,0 +1,12 @@
|
||||
from src.raw_data import RawData
|
||||
|
||||
|
||||
class Military:
|
||||
def __init__(self, raw_military):
|
||||
data = RawData(raw_military)
|
||||
self.id = data.id
|
||||
self.unit_id = data.unit_id
|
||||
self.unit = data.unit
|
||||
self.country_id = data.country_id
|
||||
self.year_from = raw_military['from']
|
||||
self.until = data.until
|
63
src/person.py
Normal file
63
src/person.py
Normal file
@ -0,0 +1,63 @@
|
||||
from src.career import Career
|
||||
from src.military import Military
|
||||
from src.raw_data import RawData
|
||||
from src.school import School
|
||||
from src.university import University
|
||||
|
||||
|
||||
class Person:
|
||||
def __init__(self, raw_person):
|
||||
data = RawData(raw_person)
|
||||
self.id = hash(str(data.id))
|
||||
self.is_closed = data.is_closed
|
||||
self.deactivated = data.deactivated is not None
|
||||
self.has_photo = data.has_photo == 1
|
||||
self.followers_count = data.followers_count
|
||||
self.sex = data.sex
|
||||
self.bdate = data.get_date(data.bdate)
|
||||
self.relation = data.relation
|
||||
self.country_id = data.get_int(data.country, 'id')
|
||||
self.country = data.get_int(data.country, 'title')
|
||||
self.city_id = data.get_int(data.city, 'id')
|
||||
self.city = data.get_int(data.city, 'title')
|
||||
self.home_town = data.home_town
|
||||
self.status = data.status
|
||||
self.site = data.site is not None
|
||||
self.facebook = data.facebook is not None
|
||||
self.twitter = data.twitter is not None
|
||||
self.instagram = data.instagram is not None
|
||||
self.mobile_phone = data.mobile_phone is not None
|
||||
self.activities = data.activities
|
||||
self.interests = data.interests
|
||||
self.books = data.books
|
||||
self.movies = data.movies
|
||||
self.music = data.music
|
||||
self.tv = data.tv
|
||||
self.games = data.games
|
||||
self.quotes = data.quotes
|
||||
self.about = data.about
|
||||
personal = None
|
||||
if data.personal is not None:
|
||||
if len(data.personal) == 1:
|
||||
personal = data.personal[0]
|
||||
if len(data.personal) > 1:
|
||||
raise Exception(f'PERSONAL {data.id}')
|
||||
self.political = data.get_int(personal, 'political')
|
||||
self.religion = data.get_str(personal, 'religion')
|
||||
self.inspired_by = data.get_str(personal, 'inspired_by')
|
||||
self.people_main = data.get_int(personal, 'people_main')
|
||||
self.life_main = data.get_int(personal, 'life_main')
|
||||
self.smoking = data.get_int(personal, 'smoking')
|
||||
self.alcohol = data.get_int(personal, 'alcohol')
|
||||
self.relatives = Person.__collection(data.relatives, lambda item: item['type'])
|
||||
self.occupation_type = data.get_str(data.occupation, 'type')
|
||||
self.occupation_place_id = data.get_int(data.occupation, 'id')
|
||||
self.occupation_place_name = data.get_str(data.occupation, 'name')
|
||||
self.universities = Person.__collection(data.universities, lambda item: University(item).__dict__)
|
||||
self.schools = Person.__collection(data.schools, lambda item: School(item).__dict__)
|
||||
self.career = Person.__collection(data.career, lambda item: Career(item).__dict__)
|
||||
self.military = Person.__collection(data.military, lambda item: Military(item).__dict__)
|
||||
|
||||
@staticmethod
|
||||
def __collection(collection, function):
|
||||
return list(map(lambda item: function(item), [] if collection is None else collection))
|
31
src/raw_data.py
Normal file
31
src/raw_data.py
Normal file
@ -0,0 +1,31 @@
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
class RawData:
|
||||
def __init__(self, data):
|
||||
self.__dict__.update(data)
|
||||
|
||||
@staticmethod
|
||||
def get_str(value, attr):
|
||||
if value is None:
|
||||
return ''
|
||||
return '' if value is None else value[attr]
|
||||
|
||||
@staticmethod
|
||||
def get_int(value, attr):
|
||||
if value is None:
|
||||
return -1
|
||||
return -1 if value is None else value[attr]
|
||||
|
||||
@staticmethod
|
||||
def get_date(value):
|
||||
if value is None:
|
||||
return None
|
||||
try:
|
||||
return datetime.strptime(value, '%d.%m.%Y').date()
|
||||
except ValueError:
|
||||
try:
|
||||
return datetime.strptime(value, '%d.%m.%y').date()
|
||||
except ValueError:
|
||||
print(f'Invalid date {value}')
|
||||
return None
|
18
src/school.py
Normal file
18
src/school.py
Normal file
@ -0,0 +1,18 @@
|
||||
from src.raw_data import RawData
|
||||
|
||||
|
||||
class School:
|
||||
def __init__(self, raw_school):
|
||||
data = RawData(raw_school)
|
||||
self.id = data.id
|
||||
self.name = data.name
|
||||
self.country_id = data.get_int(data.country, 'id')
|
||||
self.city_id = data.get_int(data.city, 'id')
|
||||
self.year_from = data.year_from
|
||||
self.year_to = data.year_to
|
||||
self.year_graduated = data.year_graduated
|
||||
self.speciality = data.speciality
|
||||
self.type_id = data.type
|
||||
self.type = data.type_str
|
||||
self.litera = raw_school['class']
|
||||
self.litera_id = data.class_id
|
19
src/university.py
Normal file
19
src/university.py
Normal file
@ -0,0 +1,19 @@
|
||||
from src.raw_data import RawData
|
||||
|
||||
|
||||
class University:
|
||||
def __init__(self, raw_university):
|
||||
data = RawData(raw_university)
|
||||
self.id = data.id
|
||||
self.name = data.name
|
||||
self.country_id = data.get_int(data.country, 'id')
|
||||
self.city_id = data.get_int(data.city, 'id')
|
||||
self.faculty_id = data.faculty
|
||||
self.faculty = data.faculty_name
|
||||
self.chair_id = data.chair
|
||||
self.chair = data.chair_name
|
||||
self.graduation = data.graduation
|
||||
self.form_id = data.education_form_id
|
||||
self.form = data.education_form
|
||||
self.status_id = data.education_status_id
|
||||
self.status = data.education_status
|
1
ulpressa.private.json
Normal file
1
ulpressa.private.json
Normal file
File diff suppressed because one or more lines are too long
1
ultra.private.json
Normal file
1
ultra.private.json
Normal file
File diff suppressed because one or more lines are too long
Loading…
Reference in New Issue
Block a user